summaryrefslogtreecommitdiffstats
path: root/media/libstagefright
diff options
context:
space:
mode:
Diffstat (limited to 'media/libstagefright')
-rw-r--r--media/libstagefright/AACExtractor.cpp4
-rw-r--r--media/libstagefright/AACWriter.cpp18
-rw-r--r--media/libstagefright/ACodec.cpp1455
-rw-r--r--media/libstagefright/AMRExtractor.cpp8
-rw-r--r--media/libstagefright/AMRWriter.cpp18
-rw-r--r--media/libstagefright/Android.mk24
-rw-r--r--media/libstagefright/AudioPlayer.cpp31
-rw-r--r--media/libstagefright/AudioSource.cpp26
-rw-r--r--media/libstagefright/AwesomePlayer.cpp186
-rw-r--r--media/libstagefright/CameraSource.cpp149
-rw-r--r--media/libstagefright/CameraSourceTimeLapse.cpp13
-rw-r--r--media/libstagefright/ClockEstimator.cpp177
-rw-r--r--media/libstagefright/CodecBase.cpp (renamed from media/libstagefright/include/chromium_http_stub.h)26
-rw-r--r--media/libstagefright/DataSource.cpp78
-rw-r--r--media/libstagefright/DataURISource.cpp109
-rw-r--r--media/libstagefright/ESDS.cpp2
-rw-r--r--media/libstagefright/FLACExtractor.cpp70
-rw-r--r--media/libstagefright/HTTPBase.cpp52
-rw-r--r--media/libstagefright/MP3Extractor.cpp3
-rw-r--r--media/libstagefright/MPEG2TSWriter.cpp7
-rw-r--r--media/libstagefright/MPEG4Extractor.cpp622
-rw-r--r--media/libstagefright/MPEG4Writer.cpp169
-rw-r--r--media/libstagefright/MediaAdapter.cpp4
-rw-r--r--media/libstagefright/MediaBuffer.cpp7
-rw-r--r--media/libstagefright/MediaBufferGroup.cpp8
-rw-r--r--media/libstagefright/MediaCodec.cpp885
-rw-r--r--media/libstagefright/MediaCodecList.cpp675
-rw-r--r--media/libstagefright/MediaCodecSource.cpp890
-rw-r--r--media/libstagefright/MediaDefs.cpp4
-rw-r--r--media/libstagefright/MediaMuxer.cpp29
-rw-r--r--media/libstagefright/MediaSource.cpp13
-rw-r--r--media/libstagefright/MetaData.cpp7
-rw-r--r--media/libstagefright/NuCachedSource2.cpp83
-rw-r--r--media/libstagefright/NuMediaExtractor.cpp8
-rw-r--r--media/libstagefright/OMXClient.cpp31
-rw-r--r--media/libstagefright/OMXCodec.cpp293
-rw-r--r--media/libstagefright/OggExtractor.cpp36
-rw-r--r--media/libstagefright/SampleIterator.cpp9
-rw-r--r--media/libstagefright/SampleTable.cpp198
-rw-r--r--media/libstagefright/SkipCutBuffer.cpp3
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp15
-rw-r--r--media/libstagefright/StagefrightMetadataRetriever.cpp93
-rw-r--r--media/libstagefright/SurfaceMediaSource.cpp51
-rw-r--r--media/libstagefright/TimedEventQueue.cpp17
-rw-r--r--media/libstagefright/Utils.cpp203
-rw-r--r--media/libstagefright/VBRISeeker.cpp9
-rw-r--r--media/libstagefright/WAVExtractor.cpp10
-rw-r--r--media/libstagefright/avc_utils.cpp47
-rw-r--r--media/libstagefright/chromium_http/Android.mk39
-rw-r--r--media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp355
-rw-r--r--media/libstagefright/chromium_http/DataUriSource.cpp68
-rw-r--r--media/libstagefright/chromium_http/support.cpp659
-rw-r--r--media/libstagefright/chromium_http/support.h178
-rw-r--r--media/libstagefright/chromium_http_stub.cpp102
-rw-r--r--media/libstagefright/codecs/aacdec/Android.mk5
-rw-r--r--media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp372
-rw-r--r--media/libstagefright/codecs/aacdec/DrcPresModeWrap.h62
-rw-r--r--media/libstagefright/codecs/aacdec/SoftAAC2.cpp933
-rw-r--r--media/libstagefright/codecs/aacdec/SoftAAC2.h29
-rw-r--r--media/libstagefright/codecs/aacenc/Android.mk6
-rw-r--r--media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp83
-rw-r--r--media/libstagefright/codecs/aacenc/SoftAACEncoder2.h2
-rw-r--r--media/libstagefright/codecs/aacenc/basic_op/oper_32b.c4
-rw-r--r--media/libstagefright/codecs/aacenc/src/aacenc.c8
-rw-r--r--media/libstagefright/codecs/aacenc/src/adj_thr.c2
-rw-r--r--media/libstagefright/codecs/aacenc/src/bitenc.c3
-rw-r--r--media/libstagefright/codecs/aacenc/src/dyn_bits.c3
-rw-r--r--media/libstagefright/codecs/aacenc/src/psy_main.c6
-rw-r--r--media/libstagefright/codecs/aacenc/src/qc_main.c8
-rw-r--r--media/libstagefright/codecs/aacenc/src/tns.c4
-rw-r--r--media/libstagefright/codecs/amrnb/common/Android.mk2
-rw-r--r--media/libstagefright/codecs/amrnb/dec/Android.mk4
-rw-r--r--media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp4
-rw-r--r--media/libstagefright/codecs/amrnb/enc/Android.mk4
-rw-r--r--media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp2
-rw-r--r--media/libstagefright/codecs/amrwb/Android.mk2
-rw-r--r--media/libstagefright/codecs/amrwbenc/Android.mk4
-rw-r--r--media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp2
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s1
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s1
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s1
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s1
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/autocorr.c4
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/convolve.c4
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/pitch_f4.c3
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/syn_filt.c4
-rw-r--r--media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c4
-rw-r--r--media/libstagefright/codecs/avc/common/Android.mk2
-rw-r--r--media/libstagefright/codecs/avc/enc/Android.mk4
-rw-r--r--media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp144
-rw-r--r--media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h16
-rw-r--r--media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp15
-rw-r--r--media/libstagefright/codecs/common/Android.mk2
-rw-r--r--media/libstagefright/codecs/common/Config.mk6
-rw-r--r--media/libstagefright/codecs/common/cmnMemory.c18
-rw-r--r--media/libstagefright/codecs/flac/enc/Android.mk2
-rw-r--r--media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp41
-rw-r--r--media/libstagefright/codecs/g711/dec/Android.mk2
-rw-r--r--media/libstagefright/codecs/g711/dec/SoftG711.cpp13
-rw-r--r--media/libstagefright/codecs/gsm/dec/Android.mk2
-rw-r--r--media/libstagefright/codecs/gsm/dec/SoftGSM.cpp6
-rw-r--r--media/libstagefright/codecs/hevcdec/Android.mk30
-rw-r--r--media/libstagefright/codecs/hevcdec/SoftHEVC.cpp765
-rw-r--r--media/libstagefright/codecs/hevcdec/SoftHEVC.h125
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/Android.mk4
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp69
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h2
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/src/get_pred_adv_b_add.cpp8
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp2
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/Android.mk4
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp132
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h16
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/include/mp4enc_api.h2
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/dct.cpp12
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp4
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/motion_comp.cpp10
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/mp4def.h2
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/sad_inline.h2
-rw-r--r--media/libstagefright/codecs/mp3dec/Android.mk4
-rw-r--r--media/libstagefright/codecs/mp3dec/SoftMP3.cpp136
-rw-r--r--media/libstagefright/codecs/mp3dec/SoftMP3.h2
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_dct_9_arm.s210
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_arm.s369
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_wm.asm366
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_arm.s237
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s1
-rw-r--r--media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_wm.asm231
-rw-r--r--media/libstagefright/codecs/mp3dec/src/pvmp3_mpeg2_get_scale_data.cpp2
-rw-r--r--media/libstagefright/codecs/on2/dec/Android.mk2
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.cpp86
-rw-r--r--media/libstagefright/codecs/on2/dec/SoftVPX.h6
-rw-r--r--media/libstagefright/codecs/on2/enc/Android.mk4
-rw-r--r--media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp481
-rw-r--r--media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h89
-rw-r--r--media/libstagefright/codecs/on2/h264dec/Android.mk5
-rw-r--r--media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp85
-rw-r--r--media/libstagefright/codecs/on2/h264dec/SoftAVC.h5
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/omxtypes.h60
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/omxtypes.h60
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S16
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DequantTables_s.S8
-rw-r--r--media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/omxtypes.h60
-rw-r--r--media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c3
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_conceal.c2
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_intra_prediction.c10
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c5
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c3
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c2
-rwxr-xr-xmedia/libstagefright/codecs/on2/h264dec/source/h264bsd_util.h3
-rw-r--r--media/libstagefright/codecs/opus/Android.mk4
-rw-r--r--media/libstagefright/codecs/opus/dec/Android.mk19
-rw-r--r--media/libstagefright/codecs/opus/dec/SoftOpus.cpp540
-rw-r--r--media/libstagefright/codecs/opus/dec/SoftOpus.h94
-rw-r--r--media/libstagefright/codecs/raw/Android.mk2
-rw-r--r--media/libstagefright/codecs/raw/SoftRaw.cpp2
-rw-r--r--media/libstagefright/codecs/vorbis/dec/Android.mk2
-rw-r--r--media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp84
-rw-r--r--media/libstagefright/codecs/vorbis/dec/SoftVorbis.h2
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp130
-rw-r--r--media/libstagefright/data/media_codecs_google_audio.xml91
-rw-r--r--media/libstagefright/data/media_codecs_google_telephony.xml25
-rw-r--r--media/libstagefright/data/media_codecs_google_video.xml104
-rw-r--r--media/libstagefright/foundation/ABitReader.cpp68
-rw-r--r--media/libstagefright/foundation/ABuffer.cpp31
-rw-r--r--media/libstagefright/foundation/AHierarchicalStateMachine.cpp2
-rw-r--r--media/libstagefright/foundation/ALooper.cpp8
-rw-r--r--media/libstagefright/foundation/ALooperRoster.cpp67
-rw-r--r--media/libstagefright/foundation/AMessage.cpp122
-rw-r--r--media/libstagefright/foundation/ANetworkSession.cpp6
-rw-r--r--media/libstagefright/foundation/AString.cpp88
-rw-r--r--media/libstagefright/foundation/Android.mk2
-rw-r--r--media/libstagefright/foundation/base64.cpp6
-rw-r--r--media/libstagefright/http/Android.mk28
-rw-r--r--media/libstagefright/http/HTTPHelper.cpp70
-rw-r--r--media/libstagefright/http/HTTPHelper.h (renamed from media/libstagefright/chromium_http/chromium_http_stub.cpp)25
-rw-r--r--media/libstagefright/http/MediaHTTP.cpp205
-rw-r--r--media/libstagefright/httplive/Android.mk2
-rw-r--r--media/libstagefright/httplive/LiveSession.cpp613
-rw-r--r--media/libstagefright/httplive/LiveSession.h47
-rw-r--r--media/libstagefright/httplive/M3UParser.cpp146
-rw-r--r--media/libstagefright/httplive/M3UParser.h7
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.cpp517
-rw-r--r--media/libstagefright/httplive/PlaylistFetcher.h37
-rw-r--r--media/libstagefright/id3/Android.mk4
-rw-r--r--media/libstagefright/id3/ID3.cpp93
-rw-r--r--media/libstagefright/id3/testid3.cpp4
-rw-r--r--media/libstagefright/include/AwesomePlayer.h6
-rw-r--r--media/libstagefright/include/ChromiumHTTPDataSource.h125
-rw-r--r--media/libstagefright/include/FragmentedMP4Parser.h274
-rw-r--r--media/libstagefright/include/HTTPBase.h11
-rw-r--r--media/libstagefright/include/MPEG4Extractor.h11
-rw-r--r--media/libstagefright/include/NuCachedSource2.h3
-rw-r--r--media/libstagefright/include/OMX.h8
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h21
-rw-r--r--media/libstagefright/include/SDPLoader.h8
-rw-r--r--media/libstagefright/include/SampleIterator.h4
-rw-r--r--media/libstagefright/include/SampleTable.h13
-rw-r--r--media/libstagefright/include/SimpleSoftOMXComponent.h5
-rw-r--r--media/libstagefright/include/SoftVideoDecoderOMXComponent.h25
-rw-r--r--media/libstagefright/include/SoftVideoEncoderOMXComponent.h67
-rw-r--r--media/libstagefright/include/SoftwareRenderer.h10
-rw-r--r--media/libstagefright/include/StagefrightMetadataRetriever.h1
-rw-r--r--media/libstagefright/include/TimedEventQueue.h2
-rw-r--r--media/libstagefright/include/WVMExtractor.h3
-rw-r--r--media/libstagefright/matroska/Android.mk2
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.cpp151
-rw-r--r--media/libstagefright/matroska/MatroskaExtractor.h12
-rw-r--r--media/libstagefright/mp4/FragmentedMP4Parser.cpp1993
-rw-r--r--media/libstagefright/mp4/TrackFragment.cpp364
-rw-r--r--media/libstagefright/mp4/TrackFragment.h122
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.cpp44
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.h4
-rw-r--r--media/libstagefright/mpeg2ts/Android.mk2
-rw-r--r--media/libstagefright/mpeg2ts/AnotherPacketSource.cpp129
-rw-r--r--media/libstagefright/mpeg2ts/AnotherPacketSource.h14
-rw-r--r--media/libstagefright/mpeg2ts/ESQueue.cpp73
-rw-r--r--media/libstagefright/mpeg2ts/ESQueue.h1
-rw-r--r--media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp15
-rw-r--r--media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp2
-rw-r--r--media/libstagefright/omx/Android.mk6
-rw-r--r--media/libstagefright/omx/GraphicBufferSource.cpp203
-rw-r--r--media/libstagefright/omx/GraphicBufferSource.h41
-rw-r--r--media/libstagefright/omx/OMX.cpp29
-rw-r--r--media/libstagefright/omx/OMXMaster.cpp2
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp226
-rw-r--r--media/libstagefright/omx/SoftOMXComponent.cpp40
-rw-r--r--media/libstagefright/omx/SoftOMXPlugin.cpp4
-rw-r--r--media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp192
-rw-r--r--media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp311
-rw-r--r--media/libstagefright/omx/tests/Android.mk4
-rw-r--r--media/libstagefright/omx/tests/OMXHarness.cpp13
-rw-r--r--media/libstagefright/rtsp/AAVCAssembler.cpp10
-rw-r--r--media/libstagefright/rtsp/AMPEG2TSAssembler.cpp4
-rw-r--r--media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp20
-rw-r--r--media/libstagefright/rtsp/APacketSource.cpp2
-rw-r--r--media/libstagefright/rtsp/ARTPConnection.cpp2
-rw-r--r--media/libstagefright/rtsp/ARTPWriter.cpp4
-rw-r--r--media/libstagefright/rtsp/ARTSPConnection.cpp4
-rw-r--r--media/libstagefright/rtsp/ARawAudioAssembler.cpp4
-rw-r--r--media/libstagefright/rtsp/ASessionDescription.cpp5
-rw-r--r--media/libstagefright/rtsp/Android.mk8
-rw-r--r--media/libstagefright/rtsp/MyHandler.h57
-rw-r--r--media/libstagefright/rtsp/SDPLoader.cpp32
-rw-r--r--media/libstagefright/tests/Android.mk47
-rw-r--r--media/libstagefright/tests/DummyRecorder.cpp2
-rw-r--r--media/libstagefright/tests/SurfaceMediaSource_test.cpp23
-rw-r--r--media/libstagefright/tests/Utils_test.cpp101
-rw-r--r--media/libstagefright/timedtext/Android.mk3
-rw-r--r--media/libstagefright/timedtext/TimedTextDriver.cpp7
-rw-r--r--media/libstagefright/timedtext/TimedTextPlayer.cpp3
-rw-r--r--media/libstagefright/timedtext/TimedTextSource.h2
-rw-r--r--media/libstagefright/timedtext/test/Android.mk8
-rw-r--r--media/libstagefright/webm/Android.mk23
-rw-r--r--media/libstagefright/webm/EbmlUtil.cpp108
-rw-r--r--media/libstagefright/webm/EbmlUtil.h50
-rw-r--r--media/libstagefright/webm/LinkedBlockingQueue.h79
-rw-r--r--media/libstagefright/webm/WebmConstants.h133
-rw-r--r--media/libstagefright/webm/WebmElement.cpp367
-rw-r--r--media/libstagefright/webm/WebmElement.h127
-rw-r--r--media/libstagefright/webm/WebmFrame.cpp83
-rw-r--r--media/libstagefright/webm/WebmFrame.h46
-rw-r--r--media/libstagefright/webm/WebmFrameThread.cpp399
-rw-r--r--media/libstagefright/webm/WebmFrameThread.h160
-rw-r--r--media/libstagefright/webm/WebmWriter.cpp551
-rw-r--r--media/libstagefright/webm/WebmWriter.h130
-rw-r--r--media/libstagefright/wifi-display/rtp/RTPSender.cpp5
-rw-r--r--media/libstagefright/wifi-display/source/Converter.cpp49
-rw-r--r--media/libstagefright/wifi-display/source/MediaPuller.cpp2
-rw-r--r--media/libstagefright/wifi-display/source/PlaybackSession.cpp8
-rw-r--r--media/libstagefright/wifi-display/source/PlaybackSession.h3
-rw-r--r--media/libstagefright/wifi-display/source/RepeaterSource.cpp3
-rw-r--r--media/libstagefright/wifi-display/source/TSPacketizer.cpp2
-rw-r--r--media/libstagefright/wifi-display/source/WifiDisplaySource.cpp8
-rw-r--r--media/libstagefright/yuv/Android.mk2
-rw-r--r--media/libstagefright/yuv/YUVImage.cpp12
275 files changed, 15268 insertions, 9191 deletions
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 4d1072f..196f6ee 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -219,7 +219,7 @@ sp<MediaSource> AACExtractor::getTrack(size_t index) {
return new AACSource(mDataSource, mMeta, mOffsetVector, mFrameDurationUs);
}
-sp<MetaData> AACExtractor::getTrackMetaData(size_t index, uint32_t flags) {
+sp<MetaData> AACExtractor::getTrackMetaData(size_t index, uint32_t /* flags */) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
@@ -252,7 +252,7 @@ AACSource::~AACSource() {
}
}
-status_t AACSource::start(MetaData *params) {
+status_t AACSource::start(MetaData * /* params */) {
CHECK(!mStarted);
if (mOffsetVector.empty()) {
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index a6f7cfb..353920e 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -14,6 +14,12 @@
* limitations under the License.
*/
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "AACWriter"
#include <utils/Log.h>
@@ -27,10 +33,6 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/mediarecorder.h>
-#include <sys/prctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
namespace android {
@@ -111,7 +113,7 @@ status_t AACWriter::addSource(const sp<MediaSource> &source) {
return OK;
}
-status_t AACWriter::start(MetaData *params) {
+status_t AACWriter::start(MetaData * /* params */) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -171,7 +173,7 @@ status_t AACWriter::reset() {
void *dummy;
pthread_join(mThread, &dummy);
- status_t err = (status_t) dummy;
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
{
status_t status = mSource->stop();
if (err == OK &&
@@ -200,7 +202,7 @@ bool AACWriter::exceedsFileDurationLimit() {
// static
void *AACWriter::ThreadWrapper(void *me) {
- return (void *) static_cast<AACWriter *>(me)->threadFunc();
+ return (void *)(uintptr_t)static_cast<AACWriter *>(me)->threadFunc();
}
/*
@@ -348,7 +350,7 @@ status_t AACWriter::threadFunc() {
mResumed = false;
}
timestampUs -= previousPausedDurationUs;
- ALOGV("time stamp: %lld, previous paused duration: %lld",
+ ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64,
timestampUs, previousPausedDurationUs);
if (timestampUs > maxTimestampUs) {
maxTimestampUs = timestampUs;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a8c95c3..2f2f9cf 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -17,6 +17,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ACodec"
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+#include <inttypes.h>
+#include <utils/Trace.h>
+
#include <media/stagefright/ACodec.h>
#include <binder/MemoryDealer.h>
@@ -25,6 +32,7 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodecList.h>
@@ -36,6 +44,7 @@
#include <media/hardware/HardwareAPI.h>
#include <OMX_AudioExt.h>
+#include <OMX_VideoExt.h>
#include <OMX_Component.h>
#include <OMX_IndexExt.h>
@@ -43,6 +52,48 @@
namespace android {
+// OMX errors are directly mapped into status_t range if
+// there is no corresponding MediaError status code.
+// Use the statusFromOMXError(int32_t omxError) function.
+//
+// Currently this is a direct map.
+// See frameworks/native/include/media/openmax/OMX_Core.h
+//
+// Vendor OMX errors from 0x90000000 - 0x9000FFFF
+// Extension OMX errors from 0x8F000000 - 0x90000000
+// Standard OMX errors from 0x80001000 - 0x80001024 (0x80001024 current)
+//
+
+// returns true if err is a recognized OMX error code.
+// as OMX error is OMX_S32, this is an int32_t type
+static inline bool isOMXError(int32_t err) {
+ return (ERROR_CODEC_MIN <= err && err <= ERROR_CODEC_MAX);
+}
+
+// converts an OMX error to a status_t
+static inline status_t statusFromOMXError(int32_t omxError) {
+ switch (omxError) {
+ case OMX_ErrorInvalidComponentName:
+ case OMX_ErrorComponentNotFound:
+ return NAME_NOT_FOUND; // can trigger illegal argument error for provided names.
+ default:
+ return isOMXError(omxError) ? omxError : 0; // no translation required
+ }
+}
+
+// checks and converts status_t to a non-side-effect status_t
+static inline status_t makeNoSideEffectStatus(status_t err) {
+ switch (err) {
+ // the following errors have side effects and may come
+ // from other code modules. Remap for safety reasons.
+ case INVALID_OPERATION:
+ case DEAD_OBJECT:
+ return UNKNOWN_ERROR;
+ default:
+ return err;
+ }
+}
+
template<class T>
static void InitOMXParams(T *params) {
params->nSize = sizeof(T);
@@ -64,7 +115,7 @@ struct CodecObserver : public BnOMXObserver {
sp<AMessage> msg = mNotify->dup();
msg->setInt32("type", omx_msg.type);
- msg->setPointer("node", omx_msg.node);
+ msg->setInt32("node", omx_msg.node);
switch (omx_msg.type) {
case omx_message::EVENT:
@@ -77,13 +128,13 @@ struct CodecObserver : public BnOMXObserver {
case omx_message::EMPTY_BUFFER_DONE:
{
- msg->setPointer("buffer", omx_msg.u.buffer_data.buffer);
+ msg->setInt32("buffer", omx_msg.u.buffer_data.buffer);
break;
}
case omx_message::FILL_BUFFER_DONE:
{
- msg->setPointer(
+ msg->setInt32(
"buffer", omx_msg.u.extended_buffer_data.buffer);
msg->setInt32(
"range_offset",
@@ -97,12 +148,6 @@ struct CodecObserver : public BnOMXObserver {
msg->setInt64(
"timestamp",
omx_msg.u.extended_buffer_data.timestamp);
- msg->setPointer(
- "platform_private",
- omx_msg.u.extended_buffer_data.platform_private);
- msg->setPointer(
- "data_ptr",
- omx_msg.u.extended_buffer_data.data_ptr);
break;
}
@@ -157,9 +202,7 @@ private:
IOMX::buffer_id bufferID,
size_t rangeOffset, size_t rangeLength,
OMX_U32 flags,
- int64_t timeUs,
- void *platformPrivate,
- void *dataPtr);
+ int64_t timeUs);
void getMoreInputDataIfPossible();
@@ -360,21 +403,26 @@ private:
ACodec::ACodec()
: mQuirks(0),
- mNode(NULL),
+ mNode(0),
mSentFormat(false),
mIsEncoder(false),
mUseMetadataOnEncoderOutput(false),
mShutdownInProgress(false),
- mIsConfiguredForAdaptivePlayback(false),
+ mExplicitShutdown(false),
mEncoderDelay(0),
mEncoderPadding(0),
+ mRotationDegrees(0),
mChannelMaskPresent(false),
mChannelMask(0),
mDequeueCounter(0),
mStoreMetaDataInOutputBuffers(false),
mMetaDataBuffersToSubmit(0),
mRepeatFrameDelayUs(-1ll),
- mMaxPtsGapUs(-1l) {
+ mMaxPtsGapUs(-1ll),
+ mTimePerFrameUs(-1ll),
+ mTimePerCaptureUs(-1ll),
+ mCreateInputBuffersSuspended(false),
+ mTunneled(false) {
mUninitializedState = new UninitializedState(this);
mLoadedState = new LoadedState(this);
mLoadedToIdleState = new LoadedToIdleState(this);
@@ -450,6 +498,10 @@ void ACodec::initiateShutdown(bool keepComponentAllocated) {
sp<AMessage> msg = new AMessage(kWhatShutdown, id());
msg->setInt32("keepComponentAllocated", keepComponentAllocated);
msg->post();
+ if (!keepComponentAllocated) {
+ // ensure shutdown completes in 3 seconds
+ (new AMessage(kWhatReleaseCodecInstance, id()))->post(3000000);
+ }
}
void ACodec::signalRequestIDRFrame() {
@@ -490,7 +542,7 @@ status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
- ALOGV("[%s] Allocating %lu buffers of size %lu on %s port",
+ ALOGV("[%s] Allocating %u buffers of size %u on %s port",
mComponentName.c_str(),
def.nBufferCountActual, def.nBufferSize,
portIndex == kPortIndexInput ? "input" : "output");
@@ -544,7 +596,7 @@ status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
}
sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", ACodec::kWhatBuffersAllocated);
+ notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
notify->setInt32("portIndex", portIndex);
@@ -588,6 +640,27 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
return err;
}
+ if (mRotationDegrees != 0) {
+ uint32_t transform = 0;
+ switch (mRotationDegrees) {
+ case 0: transform = 0; break;
+ case 90: transform = HAL_TRANSFORM_ROT_90; break;
+ case 180: transform = HAL_TRANSFORM_ROT_180; break;
+ case 270: transform = HAL_TRANSFORM_ROT_270; break;
+ default: transform = 0; break;
+ }
+
+ if (transform > 0) {
+ err = native_window_set_buffers_transform(
+ mNativeWindow.get(), transform);
+ if (err != 0) {
+ ALOGE("native_window_set_buffers_transform failed: %s (%d)",
+ strerror(-err), -err);
+ return err;
+ }
+ }
+ }
+
// Set up the native window.
OMX_U32 usage = 0;
err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
@@ -629,6 +702,21 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
return err;
}
+ // Exits here for tunneled video playback codecs -- i.e. skips native window
+ // buffer allocation step as this is managed by the tunneled OMX omponent
+ // itself and explicitly sets def.nBufferCountActual to 0.
+ if (mTunneled) {
+ ALOGV("Tunneled Playback: skipping native window buffer allocation.");
+ def.nBufferCountActual = 0;
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+ *minUndequeuedBuffers = 0;
+ *bufferCount = 0;
+ *bufferSize = 0;
+ return err;
+ }
+
*minUndequeuedBuffers = 0;
err = mNativeWindow->query(
mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
@@ -640,18 +728,34 @@ status_t ACodec::configureOutputBuffersFromNativeWindow(
return err;
}
- // XXX: Is this the right logic to use? It's not clear to me what the OMX
- // buffer counts refer to - how do they account for the renderer holding on
- // to buffers?
- if (def.nBufferCountActual < def.nBufferCountMin + *minUndequeuedBuffers) {
- OMX_U32 newBufferCount = def.nBufferCountMin + *minUndequeuedBuffers;
+ // FIXME: assume that surface is controlled by app (native window
+ // returns the number for the case when surface is not controlled by app)
+ // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+ // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
+
+ // Use conservative allocation while also trying to reduce starvation
+ //
+ // 1. allocate at least nBufferCountMin + minUndequeuedBuffers - that is the
+ // minimum needed for the consumer to be able to work
+ // 2. try to allocate two (2) additional buffers to reduce starvation from
+ // the consumer
+ // plus an extra buffer to account for incorrect minUndequeuedBufs
+ for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
+ OMX_U32 newBufferCount =
+ def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
def.nBufferCountActual = newBufferCount;
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
- if (err != OK) {
- ALOGE("[%s] setting nBufferCountActual to %lu failed: %d",
- mComponentName.c_str(), newBufferCount, err);
+ if (err == OK) {
+ *minUndequeuedBuffers += extraBuffers;
+ break;
+ }
+
+ ALOGW("[%s] setting nBufferCountActual to %u failed: %d",
+ mComponentName.c_str(), newBufferCount, err);
+ /* exit condition */
+ if (extraBuffers == 0) {
return err;
}
}
@@ -676,8 +780,9 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() {
&bufferCount, &bufferSize, &minUndequeuedBuffers);
if (err != 0)
return err;
+ mNumUndequeuedBuffers = minUndequeuedBuffers;
- ALOGV("[%s] Allocating %lu buffers from a native window of size %lu on "
+ ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
"output port",
mComponentName.c_str(), bufferCount, bufferSize);
@@ -701,14 +806,14 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() {
err = mOMX->useGraphicBuffer(mNode, kPortIndexOutput, graphicBuffer,
&bufferId);
if (err != 0) {
- ALOGE("registering GraphicBuffer %lu with OMX IL component failed: "
+ ALOGE("registering GraphicBuffer %u with OMX IL component failed: "
"%d", i, err);
break;
}
mBuffers[kPortIndexOutput].editItemAt(i).mBufferID = bufferId;
- ALOGV("[%s] Registered graphic buffer with ID %p (pointer = %p)",
+ ALOGV("[%s] Registered graphic buffer with ID %u (pointer = %p)",
mComponentName.c_str(),
bufferId, graphicBuffer.get());
}
@@ -729,7 +834,10 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() {
for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
- cancelBufferToNativeWindow(info);
+ status_t error = cancelBufferToNativeWindow(info);
+ if (err == 0) {
+ err = error;
+ }
}
return err;
@@ -741,8 +849,9 @@ status_t ACodec::allocateOutputMetaDataBuffers() {
&bufferCount, &bufferSize, &minUndequeuedBuffers);
if (err != 0)
return err;
+ mNumUndequeuedBuffers = minUndequeuedBuffers;
- ALOGV("[%s] Allocating %lu meta buffers on output port",
+ ALOGV("[%s] Allocating %u meta buffers on output port",
mComponentName.c_str(), bufferCount);
size_t totalSize = bufferCount * 8;
@@ -766,7 +875,7 @@ status_t ACodec::allocateOutputMetaDataBuffers() {
mBuffers[kPortIndexOutput].push(info);
- ALOGV("[%s] allocated meta buffer with ID %p (pointer = %p)",
+ ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
mComponentName.c_str(), info.mBufferID, mem->pointer());
}
@@ -783,7 +892,7 @@ status_t ACodec::submitOutputMetaDataBuffer() {
if (info == NULL)
return ERROR_IO;
- ALOGV("[%s] submitting output meta buffer ID %p for graphic buffer %p",
+ ALOGV("[%s] submitting output meta buffer ID %u for graphic buffer %p",
mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer.get());
--mMetaDataBuffersToSubmit;
@@ -797,23 +906,31 @@ status_t ACodec::submitOutputMetaDataBuffer() {
status_t ACodec::cancelBufferToNativeWindow(BufferInfo *info) {
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
- ALOGV("[%s] Calling cancelBuffer on buffer %p",
+ ALOGV("[%s] Calling cancelBuffer on buffer %u",
mComponentName.c_str(), info->mBufferID);
int err = mNativeWindow->cancelBuffer(
mNativeWindow.get(), info->mGraphicBuffer.get(), -1);
- CHECK_EQ(err, 0);
+ ALOGW_IF(err != 0, "[%s] can not return buffer %u to native window",
+ mComponentName.c_str(), info->mBufferID);
info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
- return OK;
+ return err;
}
ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
ANativeWindowBuffer *buf;
int fenceFd = -1;
CHECK(mNativeWindow.get() != NULL);
+
+ if (mTunneled) {
+ ALOGW("dequeueBufferFromNativeWindow() should not be called in tunnel"
+ " video playback mode mode!");
+ return NULL;
+ }
+
if (native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf) != 0) {
ALOGE("dequeueBuffer failed.");
return NULL;
@@ -907,7 +1024,7 @@ status_t ACodec::freeBuffer(OMX_U32 portIndex, size_t i) {
if (portIndex == kPortIndexOutput && mNativeWindow != NULL
&& info->mStatus == BufferInfo::OWNED_BY_US) {
- CHECK_EQ((status_t)OK, cancelBufferToNativeWindow(info));
+ cancelBufferToNativeWindow(info);
}
CHECK_EQ(mOMX->freeBuffer(
@@ -961,12 +1078,16 @@ status_t ACodec::setComponentRole(
"audio_decoder.aac", "audio_encoder.aac" },
{ MEDIA_MIMETYPE_AUDIO_VORBIS,
"audio_decoder.vorbis", "audio_encoder.vorbis" },
+ { MEDIA_MIMETYPE_AUDIO_OPUS,
+ "audio_decoder.opus", "audio_encoder.opus" },
{ MEDIA_MIMETYPE_AUDIO_G711_MLAW,
"audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW,
"audio_decoder.g711alaw", "audio_encoder.g711alaw" },
{ MEDIA_MIMETYPE_VIDEO_AVC,
"video_decoder.avc", "video_encoder.avc" },
+ { MEDIA_MIMETYPE_VIDEO_HEVC,
+ "video_decoder.hevc", "video_encoder.hevc" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4,
"video_decoder.mpeg4", "video_encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_H263,
@@ -1036,6 +1157,9 @@ status_t ACodec::configureCodec(
encoder = false;
}
+ sp<AMessage> inputFormat = new AMessage();
+ sp<AMessage> outputFormat = new AMessage();
+
mIsEncoder = encoder;
status_t err = setComponentRole(encoder /* isEncoder */, mime);
@@ -1118,74 +1242,124 @@ status_t ACodec::configureCodec(
}
if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
- mMaxPtsGapUs = -1l;
+ mMaxPtsGapUs = -1ll;
+ }
+
+ if (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) {
+ mTimePerCaptureUs = -1ll;
+ }
+
+ if (!msg->findInt32(
+ "create-input-buffers-suspended",
+ (int32_t*)&mCreateInputBuffersSuspended)) {
+ mCreateInputBuffersSuspended = false;
}
}
- // Always try to enable dynamic output buffers on native surface
sp<RefBase> obj;
int32_t haveNativeWindow = msg->findObject("native-window", &obj) &&
- obj != NULL;
+ obj != NULL;
mStoreMetaDataInOutputBuffers = false;
- mIsConfiguredForAdaptivePlayback = false;
+ if (video && !encoder) {
+ inputFormat->setInt32("adaptive-playback", false);
+ }
if (!encoder && video && haveNativeWindow) {
- err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, OMX_TRUE);
- if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
- mComponentName.c_str(), err);
-
- // if adaptive playback has been requested, try JB fallback
- // NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
- // LARGE MEMORY REQUIREMENT
-
- // we will not do adaptive playback on software accessed
- // surfaces as they never had to respond to changes in the
- // crop window, and we don't trust that they will be able to.
- int usageBits = 0;
- bool canDoAdaptivePlayback;
-
- sp<NativeWindowWrapper> windowWrapper(
- static_cast<NativeWindowWrapper *>(obj.get()));
- sp<ANativeWindow> nativeWindow = windowWrapper->getNativeWindow();
-
- if (nativeWindow->query(
- nativeWindow.get(),
- NATIVE_WINDOW_CONSUMER_USAGE_BITS,
- &usageBits) != OK) {
- canDoAdaptivePlayback = false;
- } else {
- canDoAdaptivePlayback =
- (usageBits &
- (GRALLOC_USAGE_SW_READ_MASK |
- GRALLOC_USAGE_SW_WRITE_MASK)) == 0;
+ sp<NativeWindowWrapper> windowWrapper(
+ static_cast<NativeWindowWrapper *>(obj.get()));
+ sp<ANativeWindow> nativeWindow = windowWrapper->getNativeWindow();
+
+ int32_t tunneled;
+ if (msg->findInt32("feature-tunneled-playback", &tunneled) &&
+ tunneled != 0) {
+ ALOGI("Configuring TUNNELED video playback.");
+ mTunneled = true;
+
+ int32_t audioHwSync = 0;
+ if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
+ ALOGW("No Audio HW Sync provided for video tunnel");
+ }
+ err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
+ if (err != OK) {
+ ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
+ audioHwSync, nativeWindow.get());
+ return err;
}
- int32_t maxWidth = 0, maxHeight = 0;
- if (canDoAdaptivePlayback &&
- msg->findInt32("max-width", &maxWidth) &&
- msg->findInt32("max-height", &maxHeight)) {
- ALOGV("[%s] prepareForAdaptivePlayback(%ldx%ld)",
- mComponentName.c_str(), maxWidth, maxHeight);
-
- err = mOMX->prepareForAdaptivePlayback(
- mNode, kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
- ALOGW_IF(err != OK,
- "[%s] prepareForAdaptivePlayback failed w/ err %d",
+ inputFormat->setInt32("adaptive-playback", true);
+ } else {
+ ALOGV("Configuring CPU controlled video playback.");
+ mTunneled = false;
+
+ // Always try to enable dynamic output buffers on native surface
+ err = mOMX->storeMetaDataInBuffers(
+ mNode, kPortIndexOutput, OMX_TRUE);
+ if (err != OK) {
+ ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
mComponentName.c_str(), err);
- mIsConfiguredForAdaptivePlayback = (err == OK);
+
+ // if adaptive playback has been requested, try JB fallback
+ // NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
+ // LARGE MEMORY REQUIREMENT
+
+ // we will not do adaptive playback on software accessed
+ // surfaces as they never had to respond to changes in the
+ // crop window, and we don't trust that they will be able to.
+ int usageBits = 0;
+ bool canDoAdaptivePlayback;
+
+ if (nativeWindow->query(
+ nativeWindow.get(),
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ &usageBits) != OK) {
+ canDoAdaptivePlayback = false;
+ } else {
+ canDoAdaptivePlayback =
+ (usageBits &
+ (GRALLOC_USAGE_SW_READ_MASK |
+ GRALLOC_USAGE_SW_WRITE_MASK)) == 0;
+ }
+
+ int32_t maxWidth = 0, maxHeight = 0;
+ if (canDoAdaptivePlayback &&
+ msg->findInt32("max-width", &maxWidth) &&
+ msg->findInt32("max-height", &maxHeight)) {
+ ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)",
+ mComponentName.c_str(), maxWidth, maxHeight);
+
+ err = mOMX->prepareForAdaptivePlayback(
+ mNode, kPortIndexOutput, OMX_TRUE, maxWidth,
+ maxHeight);
+ ALOGW_IF(err != OK,
+ "[%s] prepareForAdaptivePlayback failed w/ err %d",
+ mComponentName.c_str(), err);
+
+ if (err == OK) {
+ inputFormat->setInt32("max-width", maxWidth);
+ inputFormat->setInt32("max-height", maxHeight);
+ inputFormat->setInt32("adaptive-playback", true);
+ }
+ }
+ // allow failure
+ err = OK;
+ } else {
+ ALOGV("[%s] storeMetaDataInBuffers succeeded",
+ mComponentName.c_str());
+ mStoreMetaDataInOutputBuffers = true;
+ inputFormat->setInt32("adaptive-playback", true);
+ }
+
+ int32_t push;
+ if (msg->findInt32("push-blank-buffers-on-shutdown", &push)
+ && push != 0) {
+ mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
- // allow failure
- err = OK;
- } else {
- ALOGV("[%s] storeMetaDataInBuffers succeeded", mComponentName.c_str());
- mStoreMetaDataInOutputBuffers = true;
- mIsConfiguredForAdaptivePlayback = true;
}
- int32_t push;
- if (msg->findInt32("push-blank-buffers-on-shutdown", &push)
- && push != 0) {
- mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
+ int32_t rotationDegrees;
+ if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
+ mRotationDegrees = rotationDegrees;
+ } else {
+ mRotationDegrees = 0;
}
}
@@ -1193,13 +1367,7 @@ status_t ACodec::configureCodec(
if (encoder) {
err = setupVideoEncoder(mime, msg);
} else {
- int32_t width, height;
- if (!msg->findInt32("width", &width)
- || !msg->findInt32("height", &height)) {
- err = INVALID_OPERATION;
- } else {
- err = setupVideoDecoder(mime, width, height);
- }
+ err = setupVideoDecoder(mime, msg);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
int32_t numChannels, sampleRate;
@@ -1221,16 +1389,52 @@ status_t ACodec::configureCodec(
err = INVALID_OPERATION;
} else {
int32_t isADTS, aacProfile;
+ int32_t sbrMode;
+ int32_t maxOutputChannelCount;
+ int32_t pcmLimiterEnable;
+ drcParams_t drc;
if (!msg->findInt32("is-adts", &isADTS)) {
isADTS = 0;
}
if (!msg->findInt32("aac-profile", &aacProfile)) {
aacProfile = OMX_AUDIO_AACObjectNull;
}
+ if (!msg->findInt32("aac-sbr-mode", &sbrMode)) {
+ sbrMode = -1;
+ }
+
+ if (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) {
+ maxOutputChannelCount = -1;
+ }
+ if (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) {
+ // value is unknown
+ pcmLimiterEnable = -1;
+ }
+ if (!msg->findInt32("aac-encoded-target-level", &drc.encodedTargetLevel)) {
+ // value is unknown
+ drc.encodedTargetLevel = -1;
+ }
+ if (!msg->findInt32("aac-drc-cut-level", &drc.drcCut)) {
+ // value is unknown
+ drc.drcCut = -1;
+ }
+ if (!msg->findInt32("aac-drc-boost-level", &drc.drcBoost)) {
+ // value is unknown
+ drc.drcBoost = -1;
+ }
+ if (!msg->findInt32("aac-drc-heavy-compression", &drc.heavyCompression)) {
+ // value is unknown
+ drc.heavyCompression = -1;
+ }
+ if (!msg->findInt32("aac-target-ref-level", &drc.targetRefLevel)) {
+ // value is unknown
+ drc.targetRefLevel = -1;
+ }
err = setupAACCodec(
encoder, numChannels, sampleRate, bitRate, aacProfile,
- isADTS != 0);
+ isADTS != 0, sbrMode, maxOutputChannelCount, drc,
+ pcmLimiterEnable);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
@@ -1257,8 +1461,10 @@ status_t ACodec::configureCodec(
} else {
if (encoder) {
if (!msg->findInt32(
+ "complexity", &compressionLevel) &&
+ !msg->findInt32(
"flac-compression-level", &compressionLevel)) {
- compressionLevel = 5;// default FLAC compression level
+ compressionLevel = 5; // default FLAC compression level
} else if (compressionLevel < 0) {
ALOGW("compression level %d outside [0..8] range, "
"using 0",
@@ -1319,6 +1525,11 @@ status_t ACodec::configureCodec(
err = setMinBufferSize(kPortIndexInput, 8192); // XXX
}
+ CHECK_EQ(getPortFormat(kPortIndexInput, inputFormat), (status_t)OK);
+ CHECK_EQ(getPortFormat(kPortIndexOutput, outputFormat), (status_t)OK);
+ mInputFormat = inputFormat;
+ mOutputFormat = outputFormat;
+
return err;
}
@@ -1387,7 +1598,9 @@ status_t ACodec::selectAudioPortFormat(
status_t ACodec::setupAACCodec(
bool encoder, int32_t numChannels, int32_t sampleRate,
- int32_t bitRate, int32_t aacProfile, bool isADTS) {
+ int32_t bitRate, int32_t aacProfile, bool isADTS, int32_t sbrMode,
+ int32_t maxOutputChannelCount, const drcParams_t& drc,
+ int32_t pcmLimiterEnable) {
if (encoder && isADTS) {
return -EINVAL;
}
@@ -1454,6 +1667,32 @@ status_t ACodec::setupAACCodec(
profile.nAACERtools = OMX_AUDIO_AACERNone;
profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
+ switch (sbrMode) {
+ case 0:
+ // disable sbr
+ profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+ profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ case 1:
+ // enable single-rate sbr
+ profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+ profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ case 2:
+ // enable dual-rate sbr
+ profile.nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+ profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ case -1:
+ // enable both modes -> the codec will decide which mode should be used
+ profile.nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+ profile.nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ default:
+ // unsupported sbr mode
+ return BAD_VALUE;
+ }
+
err = mOMX->setParameter(
mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
@@ -1484,8 +1723,24 @@ status_t ACodec::setupAACCodec(
? OMX_AUDIO_AACStreamFormatMP4ADTS
: OMX_AUDIO_AACStreamFormatMP4FF;
- return mOMX->setParameter(
- mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation;
+ presentation.nMaxOutputChannels = maxOutputChannelCount;
+ presentation.nDrcCut = drc.drcCut;
+ presentation.nDrcBoost = drc.drcBoost;
+ presentation.nHeavyCompression = drc.heavyCompression;
+ presentation.nTargetReferenceLevel = drc.targetRefLevel;
+ presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
+ presentation.nPCMLimiterEnable = pcmLimiterEnable;
+
+ status_t res = mOMX->setParameter(mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ if (res == OK) {
+ // optional parameters, will not cause configuration failure
+ mOMX->setParameter(mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
+ &presentation, sizeof(presentation));
+ } else {
+ ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
+ }
+ return res;
}
status_t ACodec::setupAC3Codec(
@@ -1682,6 +1937,27 @@ status_t ACodec::setupRawAudioFormat(
mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
}
+status_t ACodec::configureTunneledVideoPlayback(
+ int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+ native_handle_t* sidebandHandle;
+
+ status_t err = mOMX->configureVideoTunnelMode(
+ mNode, kPortIndexOutput, OMX_TRUE, audioHwSync, &sidebandHandle);
+ if (err != OK) {
+ ALOGE("configureVideoTunnelMode failed! (err %d).", err);
+ return err;
+ }
+
+ err = native_window_set_sideband_stream(nativeWindow.get(), sidebandHandle);
+ if (err != OK) {
+ ALOGE("native_window_set_sideband_stream(%p) failed! (err %d).",
+ sidebandHandle, err);
+ return err;
+ }
+
+ return OK;
+}
+
status_t ACodec::setVideoPortFormatType(
OMX_U32 portIndex,
OMX_VIDEO_CODINGTYPE compressionFormat,
@@ -1703,6 +1979,17 @@ status_t ACodec::setVideoPortFormatType(
return err;
}
+ // substitute back flexible color format to codec supported format
+ OMX_U32 flexibleEquivalent;
+ if (compressionFormat == OMX_VIDEO_CodingUnused &&
+ isFlexibleColorFormat(
+ mOMX, mNode, format.eColorFormat, &flexibleEquivalent) &&
+ colorFormat == flexibleEquivalent) {
+ ALOGI("[%s] using color format %#x in place of %#x",
+ mComponentName.c_str(), format.eColorFormat, colorFormat);
+ colorFormat = format.eColorFormat;
+ }
+
// The following assertion is violated by TI's video decoder.
// CHECK_EQ(format.nIndex, index);
@@ -1763,6 +2050,7 @@ static const struct VideoCodingMapEntry {
OMX_VIDEO_CODINGTYPE mVideoCodingType;
} kVideoCodingMapEntry[] = {
{ MEDIA_MIMETYPE_VIDEO_AVC, OMX_VIDEO_CodingAVC },
+ { MEDIA_MIMETYPE_VIDEO_HEVC, OMX_VIDEO_CodingHEVC },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, OMX_VIDEO_CodingMPEG4 },
{ MEDIA_MIMETYPE_VIDEO_H263, OMX_VIDEO_CodingH263 },
{ MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 },
@@ -1803,7 +2091,13 @@ static status_t GetMimeTypeForVideoCoding(
}
status_t ACodec::setupVideoDecoder(
- const char *mime, int32_t width, int32_t height) {
+ const char *mime, const sp<AMessage> &msg) {
+ int32_t width, height;
+ if (!msg->findInt32("width", &width)
+ || !msg->findInt32("height", &height)) {
+ return INVALID_OPERATION;
+ }
+
OMX_VIDEO_CODINGTYPE compressionFormat;
status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
@@ -1818,7 +2112,20 @@ status_t ACodec::setupVideoDecoder(
return err;
}
- err = setSupportedOutputFormat();
+ int32_t tmp;
+ if (msg->findInt32("color-format", &tmp)) {
+ OMX_COLOR_FORMATTYPE colorFormat =
+ static_cast<OMX_COLOR_FORMATTYPE>(tmp);
+ err = setVideoPortFormatType(
+ kPortIndexOutput, OMX_VIDEO_CodingUnused, colorFormat);
+ if (err != OK) {
+ ALOGW("[%s] does not support color format %d",
+ mComponentName.c_str(), colorFormat);
+ err = setSupportedOutputFormat();
+ }
+ } else {
+ err = setSupportedOutputFormat();
+ }
if (err != OK) {
return err;
@@ -1909,6 +2216,7 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
return INVALID_OPERATION;
}
frameRate = (float)tmp;
+ mTimePerFrameUs = (int64_t) (1000000.0f / frameRate);
}
video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
@@ -1983,6 +2291,10 @@ status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
err = setupAVCEncoderParameters(msg);
break;
+ case OMX_VIDEO_CodingHEVC:
+ err = setupHEVCEncoderParameters(msg);
+ break;
+
case OMX_VIDEO_CodingVP8:
case OMX_VIDEO_CodingVP9:
err = setupVPXEncoderParameters(msg);
@@ -2041,7 +2353,6 @@ static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) {
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval;
- CHECK(ret > 1);
return ret;
}
@@ -2211,6 +2522,58 @@ status_t ACodec::setupH263EncoderParameters(const sp<AMessage> &msg) {
return setupErrorCorrectionParameters();
}
+// static
+int /* OMX_VIDEO_AVCLEVELTYPE */ ACodec::getAVCLevelFor(
+ int width, int height, int rate, int bitrate,
+ OMX_VIDEO_AVCPROFILETYPE profile) {
+ // convert bitrate to main/baseline profile kbps equivalent
+ switch (profile) {
+ case OMX_VIDEO_AVCProfileHigh10:
+ bitrate = divUp(bitrate, 3000); break;
+ case OMX_VIDEO_AVCProfileHigh:
+ bitrate = divUp(bitrate, 1250); break;
+ default:
+ bitrate = divUp(bitrate, 1000); break;
+ }
+
+ // convert size and rate to MBs
+ width = divUp(width, 16);
+ height = divUp(height, 16);
+ int mbs = width * height;
+ rate *= mbs;
+ int maxDimension = max(width, height);
+
+ static const int limits[][5] = {
+ /* MBps MB dim bitrate level */
+ { 1485, 99, 28, 64, OMX_VIDEO_AVCLevel1 },
+ { 1485, 99, 28, 128, OMX_VIDEO_AVCLevel1b },
+ { 3000, 396, 56, 192, OMX_VIDEO_AVCLevel11 },
+ { 6000, 396, 56, 384, OMX_VIDEO_AVCLevel12 },
+ { 11880, 396, 56, 768, OMX_VIDEO_AVCLevel13 },
+ { 11880, 396, 56, 2000, OMX_VIDEO_AVCLevel2 },
+ { 19800, 792, 79, 4000, OMX_VIDEO_AVCLevel21 },
+ { 20250, 1620, 113, 4000, OMX_VIDEO_AVCLevel22 },
+ { 40500, 1620, 113, 10000, OMX_VIDEO_AVCLevel3 },
+ { 108000, 3600, 169, 14000, OMX_VIDEO_AVCLevel31 },
+ { 216000, 5120, 202, 20000, OMX_VIDEO_AVCLevel32 },
+ { 245760, 8192, 256, 20000, OMX_VIDEO_AVCLevel4 },
+ { 245760, 8192, 256, 50000, OMX_VIDEO_AVCLevel41 },
+ { 522240, 8704, 263, 50000, OMX_VIDEO_AVCLevel42 },
+ { 589824, 22080, 420, 135000, OMX_VIDEO_AVCLevel5 },
+ { 983040, 36864, 543, 240000, OMX_VIDEO_AVCLevel51 },
+ { 2073600, 36864, 543, 240000, OMX_VIDEO_AVCLevel52 },
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(limits); i++) {
+ const int (&limit)[5] = limits[i];
+ if (rate <= limit[0] && mbs <= limit[1] && maxDimension <= limit[2]
+ && bitrate <= limit[3]) {
+ return limit[4];
+ }
+ }
+ return 0;
+}
+
status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
int32_t bitrate, iFrameInterval;
if (!msg->findInt32("bitrate", &bitrate)
@@ -2319,14 +2682,139 @@ status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
return configureBitrate(bitrate, bitrateMode);
}
+status_t ACodec::setupHEVCEncoderParameters(const sp<AMessage> &msg) {
+ int32_t bitrate, iFrameInterval;
+ if (!msg->findInt32("bitrate", &bitrate)
+ || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+ return INVALID_OPERATION;
+ }
+
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
+ float frameRate;
+ if (!msg->findFloat("frame-rate", &frameRate)) {
+ int32_t tmp;
+ if (!msg->findInt32("frame-rate", &tmp)) {
+ return INVALID_OPERATION;
+ }
+ frameRate = (float)tmp;
+ }
+
+ OMX_VIDEO_PARAM_HEVCTYPE hevcType;
+ InitOMXParams(&hevcType);
+ hevcType.nPortIndex = kPortIndexOutput;
+
+ status_t err = OK;
+ err = mOMX->getParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t profile;
+ if (msg->findInt32("profile", &profile)) {
+ int32_t level;
+ if (!msg->findInt32("level", &level)) {
+ return INVALID_OPERATION;
+ }
+
+ err = verifySupportForProfileAndLevel(profile, level);
+ if (err != OK) {
+ return err;
+ }
+
+ hevcType.eProfile = static_cast<OMX_VIDEO_HEVCPROFILETYPE>(profile);
+ hevcType.eLevel = static_cast<OMX_VIDEO_HEVCLEVELTYPE>(level);
+ }
+
+ // TODO: Need OMX structure definition for setting iFrameInterval
+
+ err = mOMX->setParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+ if (err != OK) {
+ return err;
+ }
+
+ return configureBitrate(bitrate, bitrateMode);
+}
+
status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
int32_t bitrate;
+ int32_t iFrameInterval = 0;
+ size_t tsLayers = 0;
+ OMX_VIDEO_ANDROID_VPXTEMPORALLAYERPATTERNTYPE pattern =
+ OMX_VIDEO_VPXTemporalLayerPatternNone;
+ static const uint32_t kVp8LayerRateAlloction
+ [OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS]
+ [OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS] = {
+ {100, 100, 100}, // 1 layer
+ { 60, 100, 100}, // 2 layers {60%, 40%}
+ { 40, 60, 100}, // 3 layers {40%, 20%, 40%}
+ };
if (!msg->findInt32("bitrate", &bitrate)) {
return INVALID_OPERATION;
}
+ msg->findInt32("i-frame-interval", &iFrameInterval);
OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ float frameRate;
+ if (!msg->findFloat("frame-rate", &frameRate)) {
+ int32_t tmp;
+ if (!msg->findInt32("frame-rate", &tmp)) {
+ return INVALID_OPERATION;
+ }
+ frameRate = (float)tmp;
+ }
+
+ AString tsSchema;
+ if (msg->findString("ts-schema", &tsSchema)) {
+ if (tsSchema == "webrtc.vp8.1-layer") {
+ pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+ tsLayers = 1;
+ } else if (tsSchema == "webrtc.vp8.2-layer") {
+ pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+ tsLayers = 2;
+ } else if (tsSchema == "webrtc.vp8.3-layer") {
+ pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
+ tsLayers = 3;
+ } else {
+ ALOGW("Unsupported ts-schema [%s]", tsSchema.c_str());
+ }
+ }
+
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
+ InitOMXParams(&vp8type);
+ vp8type.nPortIndex = kPortIndexOutput;
+ status_t err = mOMX->getParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+ &vp8type, sizeof(vp8type));
+
+ if (err == OK) {
+ if (iFrameInterval > 0) {
+ vp8type.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate);
+ }
+ vp8type.eTemporalPattern = pattern;
+ vp8type.nTemporalLayerCount = tsLayers;
+ if (tsLayers > 0) {
+ for (size_t i = 0; i < OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS; i++) {
+ vp8type.nTemporalLayerBitrateRatio[i] =
+ kVp8LayerRateAlloction[tsLayers - 1][i];
+ }
+ }
+ if (bitrateMode == OMX_Video_ControlRateConstant) {
+ vp8type.nMinQuantizer = 2;
+ vp8type.nMaxQuantizer = 63;
+ }
+
+ err = mOMX->setParameter(
+ mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+ &vp8type, sizeof(vp8type));
+ if (err != OK) {
+ ALOGW("Extended VP8 parameters set failed: %d", err);
+ }
+ }
+
return configureBitrate(bitrate, bitrateMode);
}
@@ -2482,19 +2970,7 @@ void ACodec::waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs() {
return;
}
- int minUndequeuedBufs = 0;
- status_t err = mNativeWindow->query(
- mNativeWindow.get(), NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
- &minUndequeuedBufs);
-
- if (err != OK) {
- ALOGE("[%s] NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
- mComponentName.c_str(), strerror(-err), -err);
-
- minUndequeuedBufs = 0;
- }
-
- while (countBuffersOwnedByNativeWindow() > (size_t)minUndequeuedBufs
+ while (countBuffersOwnedByNativeWindow() > mNumUndequeuedBuffers
&& dequeueBufferFromNativeWindow() != NULL) {
// these buffers will be submitted as regular buffers; account for this
if (mStoreMetaDataInOutputBuffers && mMetaDataBuffersToSubmit > 0) {
@@ -2510,7 +2986,7 @@ bool ACodec::allYourBuffersAreBelongToUs(
if (info->mStatus != BufferInfo::OWNED_BY_US
&& info->mStatus != BufferInfo::OWNED_BY_NATIVE_WINDOW) {
- ALOGV("[%s] Buffer %p on port %ld still has status %d",
+ ALOGV("[%s] Buffer %u on port %u still has status %d",
mComponentName.c_str(),
info->mBufferID, portIndex, info->mStatus);
return false;
@@ -2540,79 +3016,291 @@ void ACodec::processDeferredMessages() {
}
}
-void ACodec::sendFormatChange(const sp<AMessage> &reply) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
+// static
+bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
+ MediaImage &image = params.sMediaImage;
+ memset(&image, 0, sizeof(image));
+
+ image.mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+ image.mNumPlanes = 0;
+
+ const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
+ image.mWidth = params.nFrameWidth;
+ image.mHeight = params.nFrameHeight;
+
+ // only supporting YUV420
+ if (fmt != OMX_COLOR_FormatYUV420Planar &&
+ fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
+ fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
+ fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar) {
+ ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+ return false;
+ }
+
+ // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
+ if (params.nStride != 0 && params.nSliceHeight == 0) {
+ ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
+ params.nFrameHeight);
+ params.nSliceHeight = params.nFrameHeight;
+ }
+
+ // we need stride and slice-height to be non-zero
+ if (params.nStride == 0 || params.nSliceHeight == 0) {
+ ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+ fmt, fmt, params.nStride, params.nSliceHeight);
+ return false;
+ }
+
+ // set-up YUV format
+ image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
+ image.mNumPlanes = 3;
+ image.mBitDepth = 8;
+ image.mPlane[image.Y].mOffset = 0;
+ image.mPlane[image.Y].mColInc = 1;
+ image.mPlane[image.Y].mRowInc = params.nStride;
+ image.mPlane[image.Y].mHorizSubsampling = 1;
+ image.mPlane[image.Y].mVertSubsampling = 1;
+
+ switch (fmt) {
+ case OMX_COLOR_FormatYUV420Planar: // used for YV12
+ case OMX_COLOR_FormatYUV420PackedPlanar:
+ image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+ image.mPlane[image.U].mColInc = 1;
+ image.mPlane[image.U].mRowInc = params.nStride / 2;
+ image.mPlane[image.U].mHorizSubsampling = 2;
+ image.mPlane[image.U].mVertSubsampling = 2;
+
+ image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
+ + (params.nStride * params.nSliceHeight / 4);
+ image.mPlane[image.V].mColInc = 1;
+ image.mPlane[image.V].mRowInc = params.nStride / 2;
+ image.mPlane[image.V].mHorizSubsampling = 2;
+ image.mPlane[image.V].mVertSubsampling = 2;
+ break;
+
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+ // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
+ case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+ // NV12
+ image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+ image.mPlane[image.U].mColInc = 2;
+ image.mPlane[image.U].mRowInc = params.nStride;
+ image.mPlane[image.U].mHorizSubsampling = 2;
+ image.mPlane[image.U].mVertSubsampling = 2;
+
+ image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
+ image.mPlane[image.V].mColInc = 2;
+ image.mPlane[image.V].mRowInc = params.nStride;
+ image.mPlane[image.V].mHorizSubsampling = 2;
+ image.mPlane[image.V].mVertSubsampling = 2;
+ break;
+
+ default:
+ TRESPASS();
+ }
+ return true;
+}
+
+// static
+bool ACodec::describeColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ DescribeColorFormatParams &describeParams)
+{
+ OMX_INDEXTYPE describeColorFormatIndex;
+ if (omx->getExtensionIndex(
+ node, "OMX.google.android.index.describeColorFormat",
+ &describeColorFormatIndex) != OK ||
+ omx->getParameter(
+ node, describeColorFormatIndex,
+ &describeParams, sizeof(describeParams)) != OK) {
+ return describeDefaultColorFormat(describeParams);
+ }
+ return describeParams.sMediaImage.mType !=
+ MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+}
+
+// static
+bool ACodec::isFlexibleColorFormat(
+ const sp<IOMX> &omx, IOMX::node_id node,
+ uint32_t colorFormat, OMX_U32 *flexibleEquivalent) {
+ DescribeColorFormatParams describeParams;
+ InitOMXParams(&describeParams);
+ describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+ // reasonable dummy values
+ describeParams.nFrameWidth = 128;
+ describeParams.nFrameHeight = 128;
+ describeParams.nStride = 128;
+ describeParams.nSliceHeight = 128;
+
+ CHECK(flexibleEquivalent != NULL);
+
+ if (!describeColorFormat(omx, node, describeParams)) {
+ return false;
+ }
+
+ const MediaImage &img = describeParams.sMediaImage;
+ if (img.mType == MediaImage::MEDIA_IMAGE_TYPE_YUV) {
+ if (img.mNumPlanes != 3 ||
+ img.mPlane[img.Y].mHorizSubsampling != 1 ||
+ img.mPlane[img.Y].mVertSubsampling != 1) {
+ return false;
+ }
+
+ // YUV 420
+ if (img.mPlane[img.U].mHorizSubsampling == 2
+ && img.mPlane[img.U].mVertSubsampling == 2
+ && img.mPlane[img.V].mHorizSubsampling == 2
+ && img.mPlane[img.V].mVertSubsampling == 2) {
+ // possible flexible YUV420 format
+ if (img.mBitDepth <= 8) {
+ *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
+ // TODO: catch errors an return them instead of using CHECK
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
- def.nPortIndex = kPortIndexOutput;
+ def.nPortIndex = portIndex;
CHECK_EQ(mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)),
(status_t)OK);
- CHECK_EQ((int)def.eDir, (int)OMX_DirOutput);
+ CHECK_EQ((int)def.eDir,
+ (int)(portIndex == kPortIndexOutput ? OMX_DirOutput : OMX_DirInput));
switch (def.eDomain) {
case OMX_PortDomainVideo:
{
OMX_VIDEO_PORTDEFINITIONTYPE *videoDef = &def.format.video;
+ switch ((int)videoDef->eCompressionFormat) {
+ case OMX_VIDEO_CodingUnused:
+ {
+ CHECK(mIsEncoder ^ (portIndex == kPortIndexOutput));
+ notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW);
+
+ notify->setInt32("stride", videoDef->nStride);
+ notify->setInt32("slice-height", videoDef->nSliceHeight);
+ notify->setInt32("color-format", videoDef->eColorFormat);
+
+ DescribeColorFormatParams describeParams;
+ InitOMXParams(&describeParams);
+ describeParams.eColorFormat = videoDef->eColorFormat;
+ describeParams.nFrameWidth = videoDef->nFrameWidth;
+ describeParams.nFrameHeight = videoDef->nFrameHeight;
+ describeParams.nStride = videoDef->nStride;
+ describeParams.nSliceHeight = videoDef->nSliceHeight;
+
+ if (describeColorFormat(mOMX, mNode, describeParams)) {
+ notify->setBuffer(
+ "image-data",
+ ABuffer::CreateAsCopy(
+ &describeParams.sMediaImage,
+ sizeof(describeParams.sMediaImage)));
+ }
- AString mime;
- if (!mIsEncoder) {
- notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW);
- } else if (GetMimeTypeForVideoCoding(
- videoDef->eCompressionFormat, &mime) != OK) {
- notify->setString("mime", "application/octet-stream");
- } else {
- notify->setString("mime", mime.c_str());
- }
+ if (portIndex != kPortIndexOutput) {
+ // TODO: also get input crop
+ break;
+ }
- notify->setInt32("width", videoDef->nFrameWidth);
- notify->setInt32("height", videoDef->nFrameHeight);
+ OMX_CONFIG_RECTTYPE rect;
+ InitOMXParams(&rect);
+ rect.nPortIndex = portIndex;
+
+ if (mOMX->getConfig(
+ mNode,
+ (portIndex == kPortIndexOutput ?
+ OMX_IndexConfigCommonOutputCrop :
+ OMX_IndexConfigCommonInputCrop),
+ &rect, sizeof(rect)) != OK) {
+ rect.nLeft = 0;
+ rect.nTop = 0;
+ rect.nWidth = videoDef->nFrameWidth;
+ rect.nHeight = videoDef->nFrameHeight;
+ }
- if (!mIsEncoder) {
- notify->setInt32("stride", videoDef->nStride);
- notify->setInt32("slice-height", videoDef->nSliceHeight);
- notify->setInt32("color-format", videoDef->eColorFormat);
-
- OMX_CONFIG_RECTTYPE rect;
- InitOMXParams(&rect);
- rect.nPortIndex = kPortIndexOutput;
-
- if (mOMX->getConfig(
- mNode, OMX_IndexConfigCommonOutputCrop,
- &rect, sizeof(rect)) != OK) {
- rect.nLeft = 0;
- rect.nTop = 0;
- rect.nWidth = videoDef->nFrameWidth;
- rect.nHeight = videoDef->nFrameHeight;
- }
+ CHECK_GE(rect.nLeft, 0);
+ CHECK_GE(rect.nTop, 0);
+ CHECK_GE(rect.nWidth, 0u);
+ CHECK_GE(rect.nHeight, 0u);
+ CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth);
+ CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight);
- CHECK_GE(rect.nLeft, 0);
- CHECK_GE(rect.nTop, 0);
- CHECK_GE(rect.nWidth, 0u);
- CHECK_GE(rect.nHeight, 0u);
- CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth);
- CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight);
-
- notify->setRect(
- "crop",
- rect.nLeft,
- rect.nTop,
- rect.nLeft + rect.nWidth - 1,
- rect.nTop + rect.nHeight - 1);
-
- if (mNativeWindow != NULL) {
- reply->setRect(
+ notify->setRect(
"crop",
rect.nLeft,
rect.nTop,
- rect.nLeft + rect.nWidth,
- rect.nTop + rect.nHeight);
+ rect.nLeft + rect.nWidth - 1,
+ rect.nTop + rect.nHeight - 1);
+
+ break;
+ }
+
+ case OMX_VIDEO_CodingVP8:
+ case OMX_VIDEO_CodingVP9:
+ {
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
+ InitOMXParams(&vp8type);
+ vp8type.nPortIndex = kPortIndexOutput;
+ status_t err = mOMX->getParameter(
+ mNode,
+ (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+ &vp8type,
+ sizeof(vp8type));
+
+ if (err == OK) {
+ AString tsSchema = "none";
+ if (vp8type.eTemporalPattern
+ == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ switch (vp8type.nTemporalLayerCount) {
+ case 1:
+ {
+ tsSchema = "webrtc.vp8.1-layer";
+ break;
+ }
+ case 2:
+ {
+ tsSchema = "webrtc.vp8.2-layer";
+ break;
+ }
+ case 3:
+ {
+ tsSchema = "webrtc.vp8.3-layer";
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+ }
+ notify->setString("ts-schema", tsSchema);
+ }
+ // Fall through to set up mime.
+ }
+
+ default:
+ {
+ CHECK(mIsEncoder ^ (portIndex == kPortIndexInput));
+ AString mime;
+ if (GetMimeTypeForVideoCoding(
+ videoDef->eCompressionFormat, &mime) != OK) {
+ notify->setString("mime", "application/octet-stream");
+ } else {
+ notify->setString("mime", mime.c_str());
+ }
+ break;
}
}
+
+ notify->setInt32("width", videoDef->nFrameWidth);
+ notify->setInt32("height", videoDef->nFrameHeight);
break;
}
@@ -2625,7 +3313,7 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
{
OMX_AUDIO_PARAM_PCMMODETYPE params;
InitOMXParams(&params);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
CHECK_EQ(mOMX->getParameter(
mNode, OMX_IndexParamAudioPcm,
@@ -2645,20 +3333,6 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
- if (mEncoderDelay + mEncoderPadding) {
- size_t frameSize = params.nChannels * sizeof(int16_t);
- if (mSkipCutBuffer != NULL) {
- size_t prevbufsize = mSkipCutBuffer->size();
- if (prevbufsize != 0) {
- ALOGW("Replacing SkipCutBuffer holding %d "
- "bytes",
- prevbufsize);
- }
- }
- mSkipCutBuffer = new SkipCutBuffer(
- mEncoderDelay * frameSize,
- mEncoderPadding * frameSize);
- }
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
@@ -2670,7 +3344,7 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
{
OMX_AUDIO_PARAM_AACPROFILETYPE params;
InitOMXParams(&params);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
CHECK_EQ(mOMX->getParameter(
mNode, OMX_IndexParamAudioAac,
@@ -2687,7 +3361,7 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
{
OMX_AUDIO_PARAM_AMRTYPE params;
InitOMXParams(&params);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
CHECK_EQ(mOMX->getParameter(
mNode, OMX_IndexParamAudioAmr,
@@ -2713,7 +3387,7 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
{
OMX_AUDIO_PARAM_FLACTYPE params;
InitOMXParams(&params);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
CHECK_EQ(mOMX->getParameter(
mNode, OMX_IndexParamAudioFlac,
@@ -2726,11 +3400,45 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
break;
}
+ case OMX_AUDIO_CodingMP3:
+ {
+ OMX_AUDIO_PARAM_MP3TYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ CHECK_EQ(mOMX->getParameter(
+ mNode, OMX_IndexParamAudioMp3,
+ &params, sizeof(params)),
+ (status_t)OK);
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MPEG);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+
+ case OMX_AUDIO_CodingVORBIS:
+ {
+ OMX_AUDIO_PARAM_VORBISTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ CHECK_EQ(mOMX->getParameter(
+ mNode, OMX_IndexParamAudioVorbis,
+ &params, sizeof(params)),
+ (status_t)OK);
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_VORBIS);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+
case OMX_AUDIO_CodingAndroidAC3:
{
OMX_AUDIO_PARAM_ANDROID_AC3TYPE params;
InitOMXParams(&params);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
CHECK_EQ((status_t)OK, mOMX->getParameter(
mNode,
@@ -2744,7 +3452,52 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
break;
}
+ case OMX_AUDIO_CodingAndroidOPUS:
+ {
+ OMX_AUDIO_PARAM_ANDROID_OPUSTYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ CHECK_EQ((status_t)OK, mOMX->getParameter(
+ mNode,
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
+ &params,
+ sizeof(params)));
+
+ notify->setString("mime", MEDIA_MIMETYPE_AUDIO_OPUS);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSampleRate);
+ break;
+ }
+
+ case OMX_AUDIO_CodingG711:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE params;
+ InitOMXParams(&params);
+ params.nPortIndex = portIndex;
+
+ CHECK_EQ((status_t)OK, mOMX->getParameter(
+ mNode,
+ (OMX_INDEXTYPE)OMX_IndexParamAudioPcm,
+ &params,
+ sizeof(params)));
+
+ const char *mime = NULL;
+ if (params.ePCMMode == OMX_AUDIO_PCMModeMULaw) {
+ mime = MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+ } else if (params.ePCMMode == OMX_AUDIO_PCMModeALaw) {
+ mime = MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+ } else { // params.ePCMMode == OMX_AUDIO_PCMModeLinear
+ mime = MEDIA_MIMETYPE_AUDIO_RAW;
+ }
+ notify->setString("mime", mime);
+ notify->setInt32("channel-count", params.nChannels);
+ notify->setInt32("sample-rate", params.nSamplingRate);
+ break;
+ }
+
default:
+ ALOGE("UNKNOWN AUDIO CODING: %d\n", audioDef->eEncoding);
TRESPASS();
}
break;
@@ -2754,6 +3507,43 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
TRESPASS();
}
+ return OK;
+}
+
+void ACodec::sendFormatChange(const sp<AMessage> &reply) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatOutputFormatChanged);
+
+ CHECK_EQ(getPortFormat(kPortIndexOutput, notify), (status_t)OK);
+
+ AString mime;
+ CHECK(notify->findString("mime", &mime));
+
+ int32_t left, top, right, bottom;
+ if (mime == MEDIA_MIMETYPE_VIDEO_RAW &&
+ mNativeWindow != NULL &&
+ notify->findRect("crop", &left, &top, &right, &bottom)) {
+ // notify renderer of the crop change
+ // NOTE: native window uses extended right-bottom coordinate
+ reply->setRect("crop", left, top, right + 1, bottom + 1);
+ } else if (mime == MEDIA_MIMETYPE_AUDIO_RAW &&
+ (mEncoderDelay || mEncoderPadding)) {
+ int32_t channelCount;
+ CHECK(notify->findInt32("channel-count", &channelCount));
+ size_t frameSize = channelCount * sizeof(int16_t);
+ if (mSkipCutBuffer != NULL) {
+ size_t prevbufsize = mSkipCutBuffer->size();
+ if (prevbufsize != 0) {
+ ALOGW("Replacing SkipCutBuffer holding %d "
+ "bytes",
+ prevbufsize);
+ }
+ }
+ mSkipCutBuffer = new SkipCutBuffer(
+ mEncoderDelay * frameSize,
+ mEncoderPadding * frameSize);
+ }
+
notify->post();
mSentFormat = true;
@@ -2761,9 +3551,19 @@ void ACodec::sendFormatChange(const sp<AMessage> &reply) {
void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", ACodec::kWhatError);
- notify->setInt32("omx-error", error);
+ notify->setInt32("what", CodecBase::kWhatError);
+ ALOGE("signalError(omxError %#x, internalError %d)", error, internalError);
+
+ if (internalError == UNKNOWN_ERROR) { // find better error code
+ const status_t omxStatus = statusFromOMXError(error);
+ if (omxStatus != 0) {
+ internalError = omxStatus;
+ } else {
+ ALOGW("Invalid OMX error %#x", error);
+ }
+ }
notify->setInt32("err", internalError);
+ notify->setInt32("actionCode", ACTION_CODE_FATAL); // could translate from OMX error.
notify->post();
}
@@ -2960,7 +3760,8 @@ ACodec::BaseState::BaseState(ACodec *codec, const sp<AState> &parentState)
mCodec(codec) {
}
-ACodec::BaseState::PortMode ACodec::BaseState::getPortMode(OMX_U32 portIndex) {
+ACodec::BaseState::PortMode ACodec::BaseState::getPortMode(
+ OMX_U32 /* portIndex */) {
return KEEP_BUFFERS;
}
@@ -2986,6 +3787,7 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
case ACodec::kWhatCreateInputSurface:
case ACodec::kWhatSignalEndOfInputStream:
{
+ // This may result in an app illegal state exception.
ALOGE("Message 0x%x was not handled", msg->what());
mCodec->signalError(OMX_ErrorUndefined, INVALID_OPERATION);
return true;
@@ -2993,11 +3795,25 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
case ACodec::kWhatOMXDied:
{
+ // This will result in kFlagSawMediaServerDie handling in MediaCodec.
ALOGE("OMX/mediaserver died, signalling error!");
mCodec->signalError(OMX_ErrorResourcesLost, DEAD_OBJECT);
break;
}
+ case ACodec::kWhatReleaseCodecInstance:
+ {
+ ALOGI("[%s] forcing the release of codec",
+ mCodec->mComponentName.c_str());
+ status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+ ALOGE_IF("[%s] failed to release codec instance: err=%d",
+ mCodec->mComponentName.c_str(), err);
+ sp<AMessage> notify = mCodec->mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+ notify->post();
+ break;
+ }
+
default:
return false;
}
@@ -3009,8 +3825,16 @@ bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
int32_t type;
CHECK(msg->findInt32("type", &type));
+ // there is a possibility that this is an outstanding message for a
+ // codec that we have already destroyed
+ if (mCodec->mNode == NULL) {
+ ALOGI("ignoring message as already freed component: %s",
+ msg->debugString().c_str());
+ return true;
+ }
+
IOMX::node_id nodeID;
- CHECK(msg->findPointer("node", &nodeID));
+ CHECK(msg->findInt32("node", (int32_t*)&nodeID));
CHECK_EQ(nodeID, mCodec->mNode);
switch (type) {
@@ -3041,7 +3865,7 @@ bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
case omx_message::EMPTY_BUFFER_DONE:
{
IOMX::buffer_id bufferID;
- CHECK(msg->findPointer("buffer", &bufferID));
+ CHECK(msg->findInt32("buffer", (int32_t*)&bufferID));
return onOMXEmptyBufferDone(bufferID);
}
@@ -3049,27 +3873,21 @@ bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
case omx_message::FILL_BUFFER_DONE:
{
IOMX::buffer_id bufferID;
- CHECK(msg->findPointer("buffer", &bufferID));
+ CHECK(msg->findInt32("buffer", (int32_t*)&bufferID));
int32_t rangeOffset, rangeLength, flags;
int64_t timeUs;
- void *platformPrivate;
- void *dataPtr;
CHECK(msg->findInt32("range_offset", &rangeOffset));
CHECK(msg->findInt32("range_length", &rangeLength));
CHECK(msg->findInt32("flags", &flags));
CHECK(msg->findInt64("timestamp", &timeUs));
- CHECK(msg->findPointer("platform_private", &platformPrivate));
- CHECK(msg->findPointer("data_ptr", &dataPtr));
return onOMXFillBufferDone(
bufferID,
(size_t)rangeOffset, (size_t)rangeLength,
(OMX_U32)flags,
- timeUs,
- platformPrivate,
- dataPtr);
+ timeUs);
}
default:
@@ -3089,7 +3907,13 @@ bool ACodec::BaseState::onOMXEvent(
ALOGE("[%s] ERROR(0x%08lx)", mCodec->mComponentName.c_str(), data1);
- mCodec->signalError((OMX_ERRORTYPE)data1);
+ // verify OMX component sends back an error we expect.
+ OMX_ERRORTYPE omxError = (OMX_ERRORTYPE)data1;
+ if (!isOMXError(omxError)) {
+ ALOGW("Invalid OMX error %#x", omxError);
+ omxError = OMX_ErrorUndefined;
+ }
+ mCodec->signalError(omxError);
return true;
}
@@ -3104,23 +3928,12 @@ bool ACodec::BaseState::onOMXEmptyBufferDone(IOMX::buffer_id bufferID) {
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_COMPONENT);
info->mStatus = BufferInfo::OWNED_BY_US;
- const sp<AMessage> &bufferMeta = info->mData->meta();
- void *mediaBuffer;
- if (bufferMeta->findPointer("mediaBuffer", &mediaBuffer)
- && mediaBuffer != NULL) {
- // We're in "store-metadata-in-buffers" mode, the underlying
- // OMX component had access to data that's implicitly refcounted
- // by this "mediaBuffer" object. Now that the OMX component has
- // told us that it's done with the input buffer, we can decrement
- // the mediaBuffer's reference count.
-
- ALOGV("releasing mbuf %p", mediaBuffer);
-
- ((MediaBuffer *)mediaBuffer)->release();
- mediaBuffer = NULL;
-
- bufferMeta->setPointer("mediaBuffer", NULL);
- }
+ // We're in "store-metadata-in-buffers" mode, the underlying
+ // OMX component had access to data that's implicitly refcounted
+ // by this "MediaBuffer" object. Now that the OMX component has
+ // told us that it's done with the input buffer, we can decrement
+ // the mediaBuffer's reference count.
+ info->mData->setMediaBufferBase(NULL);
PortMode mode = getPortMode(kPortIndexInput);
@@ -3151,14 +3964,14 @@ void ACodec::BaseState::postFillThisBuffer(BufferInfo *info) {
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFillThisBuffer);
- notify->setPointer("buffer-id", info->mBufferID);
+ notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
+ notify->setInt32("buffer-id", info->mBufferID);
info->mData->meta()->clear();
notify->setBuffer("buffer", info->mData);
sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec->id());
- reply->setPointer("buffer-id", info->mBufferID);
+ reply->setInt32("buffer-id", info->mBufferID);
notify->setMessage("reply", reply);
@@ -3169,8 +3982,7 @@ void ACodec::BaseState::postFillThisBuffer(BufferInfo *info) {
void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) {
IOMX::buffer_id bufferID;
- CHECK(msg->findPointer("buffer-id", &bufferID));
-
+ CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
sp<ABuffer> buffer;
int32_t err = OK;
bool eos = false;
@@ -3274,8 +4086,7 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) {
(outputMode == FREE_BUFFERS ? "FREE" :
outputMode == KEEP_BUFFERS ? "KEEP" : "RESUBMIT"));
if (outputMode == RESUBMIT_BUFFERS) {
- CHECK_EQ(mCodec->submitOutputMetaDataBuffer(),
- (status_t)OK);
+ mCodec->submitOutputMetaDataBuffer();
}
}
@@ -3368,10 +4179,8 @@ bool ACodec::BaseState::onOMXFillBufferDone(
IOMX::buffer_id bufferID,
size_t rangeOffset, size_t rangeLength,
OMX_U32 flags,
- int64_t timeUs,
- void *platformPrivate,
- void *dataPtr) {
- ALOGV("[%s] onOMXFillBufferDone %p time %lld us, flags = 0x%08lx",
+ int64_t timeUs) {
+ ALOGV("[%s] onOMXFillBufferDone %u time %" PRId64 " us, flags = 0x%08x",
mCodec->mComponentName.c_str(), bufferID, timeUs, flags);
ssize_t index;
@@ -3408,7 +4217,7 @@ bool ACodec::BaseState::onOMXFillBufferDone(
case RESUBMIT_BUFFERS:
{
if (rangeLength == 0 && !(flags & OMX_BUFFERFLAG_EOS)) {
- ALOGV("[%s] calling fillBuffer %p",
+ ALOGV("[%s] calling fillBuffer %u",
mCodec->mComponentName.c_str(), info->mBufferID);
CHECK_EQ(mCodec->mOMX->fillBuffer(
@@ -3422,7 +4231,7 @@ bool ACodec::BaseState::onOMXFillBufferDone(
sp<AMessage> reply =
new AMessage(kWhatOutputBufferDrained, mCodec->id());
- if (!mCodec->mSentFormat) {
+ if (!mCodec->mSentFormat && rangeLength > 0) {
mCodec->sendFormatChange(reply);
}
@@ -3449,12 +4258,12 @@ bool ACodec::BaseState::onOMXFillBufferDone(
info->mData->meta()->setInt64("timeUs", timeUs);
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatDrainThisBuffer);
- notify->setPointer("buffer-id", info->mBufferID);
+ notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
+ notify->setInt32("buffer-id", info->mBufferID);
notify->setBuffer("buffer", info->mData);
notify->setInt32("flags", flags);
- reply->setPointer("buffer-id", info->mBufferID);
+ reply->setInt32("buffer-id", info->mBufferID);
notify->setMessage("reply", reply);
@@ -3466,7 +4275,7 @@ bool ACodec::BaseState::onOMXFillBufferDone(
ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str());
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatEOS);
+ notify->setInt32("what", CodecBase::kWhatEOS);
notify->setInt32("err", mCodec->mInputEOSResult);
notify->post();
@@ -3490,8 +4299,7 @@ bool ACodec::BaseState::onOMXFillBufferDone(
void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
IOMX::buffer_id bufferID;
- CHECK(msg->findPointer("buffer-id", &bufferID));
-
+ CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
ssize_t index;
BufferInfo *info =
mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
@@ -3507,19 +4315,43 @@ void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
int32_t render;
if (mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
- && (info->mData == NULL || info->mData->size() != 0)) {
+ && info->mData != NULL && info->mData->size() != 0) {
+ ATRACE_NAME("render");
// The client wants this buffer to be rendered.
+ int64_t timestampNs = 0;
+ if (!msg->findInt64("timestampNs", &timestampNs)) {
+ // TODO: it seems like we should use the timestamp
+ // in the (media)buffer as it potentially came from
+ // an input surface, but we did not propagate it prior to
+ // API 20. Perhaps check for target SDK version.
+#if 0
+ if (info->mData->meta()->findInt64("timeUs", &timestampNs)) {
+ ALOGV("using buffer PTS of %" PRId64, timestampNs);
+ timestampNs *= 1000;
+ }
+#endif
+ }
+
status_t err;
+ err = native_window_set_buffers_timestamp(mCodec->mNativeWindow.get(), timestampNs);
+ if (err != OK) {
+ ALOGW("failed to set buffer timestamp: %d", err);
+ }
+
if ((err = mCodec->mNativeWindow->queueBuffer(
mCodec->mNativeWindow.get(),
info->mGraphicBuffer.get(), -1)) == OK) {
info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
} else {
- mCodec->signalError(OMX_ErrorUndefined, err);
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
info->mStatus = BufferInfo::OWNED_BY_US;
}
} else {
+ if (mCodec->mNativeWindow != NULL &&
+ (info->mData == NULL || info->mData->size() != 0)) {
+ ATRACE_NAME("frame-drop");
+ }
info->mStatus = BufferInfo::OWNED_BY_US;
}
@@ -3550,7 +4382,7 @@ void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
}
if (info != NULL) {
- ALOGV("[%s] calling fillBuffer %p",
+ ALOGV("[%s] calling fillBuffer %u",
mCodec->mComponentName.c_str(), info->mBufferID);
CHECK_EQ(mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID),
@@ -3620,10 +4452,11 @@ bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) {
int32_t keepComponentAllocated;
CHECK(msg->findInt32(
"keepComponentAllocated", &keepComponentAllocated));
- CHECK(!keepComponentAllocated);
+ ALOGW_IF(keepComponentAllocated,
+ "cannot keep component allocated on shutdown in Uninitialized state");
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatShutdownCompleted);
+ notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
notify->post();
handled = true;
@@ -3633,13 +4466,20 @@ bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) {
case ACodec::kWhatFlush:
{
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFlushCompleted);
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
notify->post();
handled = true;
break;
}
+ case ACodec::kWhatReleaseCodecInstance:
+ {
+ // nothing to do, as we have already signaled shutdown
+ handled = true;
+ break;
+ }
+
default:
return BaseState::onMessageReceived(msg);
}
@@ -3680,6 +4520,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
AString componentName;
uint32_t quirks = 0;
+ int32_t encoder = false;
if (msg->findString("componentName", &componentName)) {
ssize_t index = matchingCodecs.add();
OMXCodec::CodecNameAndQuirks *entry = &matchingCodecs.editItemAt(index);
@@ -3692,7 +4533,6 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
} else {
CHECK(msg->findString("mime", &mime));
- int32_t encoder;
if (!msg->findInt32("encoder", &encoder)) {
encoder = false;
}
@@ -3721,6 +4561,8 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
if (err == OK) {
break;
+ } else {
+ ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
}
node = NULL;
@@ -3728,10 +4570,10 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
if (node == NULL) {
if (!mime.empty()) {
- ALOGE("Unable to instantiate a decoder for type '%s'.",
- mime.c_str());
+ ALOGE("Unable to instantiate a %scoder for type '%s'.",
+ encoder ? "en" : "de", mime.c_str());
} else {
- ALOGE("Unable to instantiate decoder '%s'.", componentName.c_str());
+ ALOGE("Unable to instantiate codec '%s'.", componentName.c_str());
}
mCodec->signalError(OMX_ErrorComponentNotFound);
@@ -3755,7 +4597,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
{
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatComponentAllocated);
+ notify->setInt32("what", CodecBase::kWhatComponentAllocated);
notify->setString("componentName", mCodec->mComponentName.c_str());
notify->post();
}
@@ -3782,7 +4624,8 @@ void ACodec::LoadedState::stateEntered() {
mCodec->mDequeueCounter = 0;
mCodec->mMetaDataBuffersToSubmit = 0;
mCodec->mRepeatFrameDelayUs = -1ll;
- mCodec->mIsConfiguredForAdaptivePlayback = false;
+ mCodec->mInputFormat.clear();
+ mCodec->mOutputFormat.clear();
if (mCodec->mShutdownInProgress) {
bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
@@ -3792,6 +4635,9 @@ void ACodec::LoadedState::stateEntered() {
onShutdown(keepComponentAllocated);
}
+ mCodec->mExplicitShutdown = false;
+
+ mCodec->processDeferredMessages();
}
void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
@@ -3801,9 +4647,12 @@ void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
mCodec->changeState(mCodec->mUninitializedState);
}
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatShutdownCompleted);
- notify->post();
+ if (mCodec->mExplicitShutdown) {
+ sp<AMessage> notify = mCodec->mNotify->dup();
+ notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+ notify->post();
+ mCodec->mExplicitShutdown = false;
+ }
}
bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
@@ -3837,6 +4686,7 @@ bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findInt32(
"keepComponentAllocated", &keepComponentAllocated));
+ mCodec->mExplicitShutdown = true;
onShutdown(keepComponentAllocated);
handled = true;
@@ -3846,7 +4696,7 @@ bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
case ACodec::kWhatFlush:
{
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFlushCompleted);
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
notify->post();
handled = true;
@@ -3875,7 +4725,7 @@ bool ACodec::LoadedState::onConfigureComponent(
ALOGE("[%s] configureCodec returning error %d",
mCodec->mComponentName.c_str(), err);
- mCodec->signalError(OMX_ErrorUndefined, err);
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
return false;
}
@@ -3895,7 +4745,9 @@ bool ACodec::LoadedState::onConfigureComponent(
{
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatComponentConfigured);
+ notify->setInt32("what", CodecBase::kWhatComponentConfigured);
+ notify->setMessage("input-format", mCodec->mInputFormat);
+ notify->setMessage("output-format", mCodec->mOutputFormat);
notify->post();
}
@@ -3903,11 +4755,11 @@ bool ACodec::LoadedState::onConfigureComponent(
}
void ACodec::LoadedState::onCreateInputSurface(
- const sp<AMessage> &msg) {
+ const sp<AMessage> & /* msg */) {
ALOGV("onCreateInputSurface");
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatInputSurfaceCreated);
+ notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
sp<IGraphicBufferProducer> bufferProducer;
status_t err;
@@ -3931,7 +4783,7 @@ void ACodec::LoadedState::onCreateInputSurface(
}
}
- if (err == OK && mCodec->mMaxPtsGapUs > 0l) {
+ if (err == OK && mCodec->mMaxPtsGapUs > 0ll) {
err = mCodec->mOMX->setInternalOption(
mCodec->mNode,
kPortIndexInput,
@@ -3941,6 +4793,41 @@ void ACodec::LoadedState::onCreateInputSurface(
if (err != OK) {
ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
+ mCodec->mComponentName.c_str(),
+ err);
+ }
+ }
+
+ if (err == OK && mCodec->mTimePerCaptureUs > 0ll
+ && mCodec->mTimePerFrameUs > 0ll) {
+ int64_t timeLapse[2];
+ timeLapse[0] = mCodec->mTimePerFrameUs;
+ timeLapse[1] = mCodec->mTimePerCaptureUs;
+ err = mCodec->mOMX->setInternalOption(
+ mCodec->mNode,
+ kPortIndexInput,
+ IOMX::INTERNAL_OPTION_TIME_LAPSE,
+ &timeLapse[0],
+ sizeof(timeLapse));
+
+ if (err != OK) {
+ ALOGE("[%s] Unable to configure time lapse (err %d)",
+ mCodec->mComponentName.c_str(),
+ err);
+ }
+ }
+
+ if (err == OK && mCodec->mCreateInputBuffersSuspended) {
+ bool suspend = true;
+ err = mCodec->mOMX->setInternalOption(
+ mCodec->mNode,
+ kPortIndexInput,
+ IOMX::INTERNAL_OPTION_SUSPEND,
+ &suspend,
+ sizeof(suspend));
+
+ if (err != OK) {
+ ALOGE("[%s] Unable to configure option to suspend (err %d)",
mCodec->mComponentName.c_str(),
err);
}
@@ -3985,7 +4872,7 @@ void ACodec::LoadedToIdleState::stateEntered() {
"(error 0x%08x)",
err);
- mCodec->signalError(OMX_ErrorUndefined, err);
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
mCodec->changeState(mCodec->mLoadedState);
}
@@ -4003,6 +4890,7 @@ status_t ACodec::LoadedToIdleState::allocateBuffers() {
bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
+ case kWhatSetParameters:
case kWhatShutdown:
{
mCodec->deferMessage(msg);
@@ -4025,7 +4913,7 @@ bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
{
// We haven't even started yet, so we're flushed alright...
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFlushCompleted);
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
notify->post();
return true;
}
@@ -4069,6 +4957,7 @@ void ACodec::IdleToExecutingState::stateEntered() {
bool ACodec::IdleToExecutingState::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
+ case kWhatSetParameters:
case kWhatShutdown:
{
mCodec->deferMessage(msg);
@@ -4085,7 +4974,7 @@ bool ACodec::IdleToExecutingState::onMessageReceived(const sp<AMessage> &msg) {
{
// We haven't even started yet, so we're flushed alright...
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFlushCompleted);
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
notify->post();
return true;
@@ -4129,7 +5018,7 @@ ACodec::ExecutingState::ExecutingState(ACodec *codec)
}
ACodec::BaseState::PortMode ACodec::ExecutingState::getPortMode(
- OMX_U32 portIndex) {
+ OMX_U32 /* portIndex */) {
return RESUBMIT_BUFFERS;
}
@@ -4191,11 +5080,14 @@ void ACodec::ExecutingState::resume() {
submitOutputBuffers();
- // Post the first input buffer.
+ // Post all available input buffers
CHECK_GT(mCodec->mBuffers[kPortIndexInput].size(), 0u);
- BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(0);
-
- postFillThisBuffer(info);
+ for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
+ BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
+ if (info->mStatus == BufferInfo::OWNED_BY_US) {
+ postFillThisBuffer(info);
+ }
+ }
mActive = true;
}
@@ -4217,6 +5109,7 @@ bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) {
"keepComponentAllocated", &keepComponentAllocated));
mCodec->mShutdownInProgress = true;
+ mCodec->mExplicitShutdown = true;
mCodec->mKeepComponentAllocated = keepComponentAllocated;
mActive = false;
@@ -4338,6 +5231,22 @@ status_t ACodec::setParameters(const sp<AMessage> &params) {
}
}
+ int64_t skipFramesBeforeUs;
+ if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
+ status_t err =
+ mOMX->setInternalOption(
+ mNode,
+ kPortIndexInput,
+ IOMX::INTERNAL_OPTION_START_TIME,
+ &skipFramesBeforeUs,
+ sizeof(skipFramesBeforeUs));
+
+ if (err != OK) {
+ ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
+ return err;
+ }
+ }
+
int32_t dropInputFrames;
if (params->findInt32("drop-input-frames", &dropInputFrames)) {
bool suspend = dropInputFrames != 0;
@@ -4371,7 +5280,7 @@ status_t ACodec::setParameters(const sp<AMessage> &params) {
void ACodec::onSignalEndOfInputStream() {
sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", ACodec::kWhatSignaledInputEOS);
+ notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
status_t err = mOMX->signalEndOfInputStream(mNode);
if (err != OK) {
@@ -4491,7 +5400,7 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent(
"port reconfiguration (error 0x%08x)",
err);
- mCodec->signalError(OMX_ErrorUndefined, err);
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
// This is technically not correct, but appears to be
// the only way to free the component instance.
@@ -4760,7 +5669,7 @@ bool ACodec::FlushingState::onOMXEvent(
{
sp<AMessage> msg = new AMessage(kWhatOMXMessage, mCodec->id());
msg->setInt32("type", omx_message::EVENT);
- msg->setPointer("node", mCodec->mNode);
+ msg->setInt32("node", mCodec->mNode);
msg->setInt32("event", event);
msg->setInt32("data1", data1);
msg->setInt32("data2", data2);
@@ -4801,7 +5710,7 @@ void ACodec::FlushingState::changeStateIfWeOwnAllBuffers() {
mCodec->waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs();
sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", ACodec::kWhatFlushCompleted);
+ notify->setInt32("what", CodecBase::kWhatFlushCompleted);
notify->post();
mCodec->mPortEOS[kPortIndexInput] =
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 03dcbf9..a6fb3d8 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -189,7 +189,7 @@ sp<MediaSource> AMRExtractor::getTrack(size_t index) {
mOffsetTable, mOffsetTableLength);
}
-sp<MetaData> AMRExtractor::getTrackMetaData(size_t index, uint32_t flags) {
+sp<MetaData> AMRExtractor::getTrackMetaData(size_t index, uint32_t /* flags */) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
@@ -221,7 +221,7 @@ AMRSource::~AMRSource() {
}
}
-status_t AMRSource::start(MetaData *params) {
+status_t AMRSource::start(MetaData * /* params */) {
CHECK(!mStarted);
mOffset = mIsWide ? 9 : 6;
@@ -258,14 +258,14 @@ status_t AMRSource::read(
int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
mCurrentTimeUs = seekFrame * 20000ll;
- int index = seekFrame / 50;
+ size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
if (index >= mOffsetTableLength) {
index = mOffsetTableLength - 1;
}
mOffset = mOffsetTable[index] + (mIsWide ? 9 : 6);
- for (int i = 0; i< seekFrame - index * 50; i++) {
+ for (size_t i = 0; i< seekFrame - index * 50; i++) {
status_t err;
if ((err = getFrameSizeByOffset(mDataSource, mOffset,
mIsWide, &size)) != OK) {
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 8d5eec8..9aa7d95 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -14,6 +14,12 @@
* limitations under the License.
*/
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/AMRWriter.h>
#include <media/stagefright/MediaBuffer.h>
@@ -22,10 +28,6 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/mediarecorder.h>
-#include <sys/prctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
namespace android {
@@ -105,7 +107,7 @@ status_t AMRWriter::addSource(const sp<MediaSource> &source) {
return OK;
}
-status_t AMRWriter::start(MetaData *params) {
+status_t AMRWriter::start(MetaData * /* params */) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -162,7 +164,7 @@ status_t AMRWriter::reset() {
void *dummy;
pthread_join(mThread, &dummy);
- status_t err = (status_t) dummy;
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
{
status_t status = mSource->stop();
if (err == OK &&
@@ -191,7 +193,7 @@ bool AMRWriter::exceedsFileDurationLimit() {
// static
void *AMRWriter::ThreadWrapper(void *me) {
- return (void *) static_cast<AMRWriter *>(me)->threadFunc();
+ return (void *)(uintptr_t) static_cast<AMRWriter *>(me)->threadFunc();
}
status_t AMRWriter::threadFunc() {
@@ -235,7 +237,7 @@ status_t AMRWriter::threadFunc() {
mResumed = false;
}
timestampUs -= previousPausedDurationUs;
- ALOGV("time stamp: %lld, previous paused duration: %lld",
+ ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64,
timestampUs, previousPausedDurationUs);
if (timestampUs > maxTimestampUs) {
maxTimestampUs = timestampUs;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 6a2a696..193f8a7 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -14,7 +14,10 @@ LOCAL_SRC_FILES:= \
AwesomePlayer.cpp \
CameraSource.cpp \
CameraSourceTimeLapse.cpp \
+ ClockEstimator.cpp \
+ CodecBase.cpp \
DataSource.cpp \
+ DataURISource.cpp \
DRMExtractor.cpp \
ESDS.cpp \
FileSource.cpp \
@@ -30,8 +33,10 @@ LOCAL_SRC_FILES:= \
MediaBufferGroup.cpp \
MediaCodec.cpp \
MediaCodecList.cpp \
+ MediaCodecSource.cpp \
MediaDefs.cpp \
MediaExtractor.cpp \
+ http/MediaHTTP.cpp \
MediaMuxer.cpp \
MediaSource.cpp \
MetaData.cpp \
@@ -55,22 +60,23 @@ LOCAL_SRC_FILES:= \
WVMExtractor.cpp \
XINGSeeker.cpp \
avc_utils.cpp \
- mp4/FragmentedMP4Parser.cpp \
- mp4/TrackFragment.cpp \
LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/include/media/ \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/frameworks/native/services/connectivitymanager \
$(TOP)/external/flac/include \
$(TOP)/external/tremolo \
$(TOP)/external/openssl/include \
+ $(TOP)/external/libvpx/libwebm \
+ $(TOP)/system/netd/include \
+ $(TOP)/external/icu/icu4c/source/common \
+ $(TOP)/external/icu/icu4c/source/i18n \
LOCAL_SHARED_LIBRARIES := \
libbinder \
libcamera_client \
- libconnectivitymanager \
libcutils \
libdl \
libdrmframework \
@@ -80,6 +86,8 @@ LOCAL_SHARED_LIBRARIES := \
libicuuc \
liblog \
libmedia \
+ libnetd_client \
+ libopus \
libsonivox \
libssl \
libstagefright_omx \
@@ -95,6 +103,7 @@ LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
libstagefright_aacenc \
libstagefright_matroska \
+ libstagefright_webm \
libstagefright_timedtext \
libvpx \
libwebm \
@@ -103,13 +112,6 @@ LOCAL_STATIC_LIBRARIES := \
libFLAC \
libmedia_helper
-LOCAL_SRC_FILES += \
- chromium_http_stub.cpp
-LOCAL_CPPFLAGS += -DCHROMIUM_AVAILABLE=1
-
-LOCAL_SHARED_LIBRARIES += libstlport
-include external/stlport/libstlport.mk
-
LOCAL_SHARED_LIBRARIES += \
libstagefright_enc_common \
libstagefright_avc_common \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 05ee34e..e24824b 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "AudioPlayer"
#include <utils/Log.h>
@@ -21,6 +23,7 @@
#include <binder/IPCThreadState.h>
#include <media/AudioTrack.h>
+#include <media/openmax/OMX_Audio.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/AudioPlayer.h>
@@ -139,6 +142,12 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
} else {
ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
}
+
+ int32_t aacaot = -1;
+ if ((audioFormat == AUDIO_FORMAT_AAC) && format->findInt32(kKeyAACAOT, &aacaot)) {
+ // Redefine AAC format corrosponding to aac profile
+ mapAACProfileToAudioFormat(audioFormat,(OMX_AUDIO_AACPROFILETYPE) aacaot);
+ }
}
int avgBitRate = -1;
@@ -221,7 +230,8 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) {
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
- 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
+ 0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+ 0 /*notificationFrames*/);
if ((err = mAudioTrack->initCheck()) != OK) {
mAudioTrack.clear();
@@ -410,7 +420,7 @@ status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
// static
size_t AudioPlayer::AudioSinkCallback(
- MediaPlayerBase::AudioSink *audioSink,
+ MediaPlayerBase::AudioSink * /* audioSink */,
void *buffer, size_t size, void *cookie,
MediaPlayerBase::AudioSink::cb_event_t event) {
AudioPlayer *me = (AudioPlayer *)cookie;
@@ -565,12 +575,12 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) {
int64_t timeToCompletionUs =
(1000000ll * numFramesPendingPlayout) / mSampleRate;
- ALOGV("total number of frames played: %lld (%lld us)",
+ ALOGV("total number of frames played: %" PRId64 " (%lld us)",
(mNumFramesPlayed + numAdditionalFrames),
1000000ll * (mNumFramesPlayed + numAdditionalFrames)
/ mSampleRate);
- ALOGV("%d frames left to play, %lld us (%.2f secs)",
+ ALOGV("%d frames left to play, %" PRId64 " us (%.2f secs)",
numFramesPendingPlayout,
timeToCompletionUs, timeToCompletionUs / 1E6);
@@ -627,7 +637,7 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) {
mPositionTimeRealUs =
((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
/ mSampleRate;
- ALOGV("buffer->size() = %d, "
+ ALOGV("buffer->size() = %zu, "
"mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
mInputBuffer->range_length(),
mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
@@ -745,7 +755,7 @@ int64_t AudioPlayer::getOutputPlayPositionUs_l()
// HAL position is relative to the first buffer we sent at mStartPosUs
const int64_t renderedDuration = mStartPosUs + playedUs;
- ALOGV("getOutputPlayPositionUs_l %lld", renderedDuration);
+ ALOGV("getOutputPlayPositionUs_l %" PRId64, renderedDuration);
return renderedDuration;
}
@@ -756,8 +766,13 @@ int64_t AudioPlayer::getMediaTimeUs() {
if (mSeeking) {
return mSeekTimeUs;
}
+ if (mReachedEOS) {
+ int64_t durationUs;
+ mSource->getFormat()->findInt64(kKeyDuration, &durationUs);
+ return durationUs;
+ }
mPositionTimeRealUs = getOutputPlayPositionUs_l();
- ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %lld",
+ ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %" PRId64,
mPositionTimeRealUs);
return mPositionTimeRealUs;
}
@@ -795,7 +810,7 @@ bool AudioPlayer::getMediaTimeMapping(
status_t AudioPlayer::seekTo(int64_t time_us) {
Mutex::Autolock autoLock(mLock);
- ALOGV("seekTo( %lld )", time_us);
+ ALOGV("seekTo( %" PRId64 " )", time_us);
mSeeking = true;
mPositionTimeRealUs = mPositionTimeMediaUs = -1;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index d7223d9..804f131 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <inttypes.h>
+#include <stdlib.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "AudioSource"
#include <utils/Log.h>
@@ -26,7 +29,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <cutils/properties.h>
-#include <stdlib.h>
namespace android {
@@ -65,10 +67,10 @@ AudioSource::AudioSource(
if (status == OK) {
// make sure that the AudioRecord callback never returns more than the maximum
// buffer size
- int frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
+ uint32_t frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
// make sure that the AudioRecord total buffer size is large enough
- int bufCount = 2;
+ size_t bufCount = 2;
while ((bufCount * frameCount) < minFrameCount) {
bufCount++;
}
@@ -76,10 +78,10 @@ AudioSource::AudioSource(
mRecord = new AudioRecord(
inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
audio_channel_in_mask_from_count(channelCount),
- bufCount * frameCount,
+ (size_t) (bufCount * frameCount),
AudioRecordCallbackFunction,
this,
- frameCount);
+ frameCount /*notificationFrames*/);
mInitCheck = mRecord->initCheck();
} else {
mInitCheck = status;
@@ -136,7 +138,7 @@ void AudioSource::releaseQueuedFrames_l() {
}
void AudioSource::waitOutstandingEncodingFrames_l() {
- ALOGV("waitOutstandingEncodingFrames_l: %lld", mNumClientOwnedBuffers);
+ ALOGV("waitOutstandingEncodingFrames_l: %" PRId64, mNumClientOwnedBuffers);
while (mNumClientOwnedBuffers > 0) {
mFrameEncodingCompletionCondition.wait(mLock);
}
@@ -153,6 +155,8 @@ status_t AudioSource::reset() {
}
mStarted = false;
+ mFrameAvailableCondition.signal();
+
mRecord->stop();
waitOutstandingEncodingFrames_l();
releaseQueuedFrames_l();
@@ -208,7 +212,7 @@ void AudioSource::rampVolume(
}
status_t AudioSource::read(
- MediaBuffer **out, const ReadOptions *options) {
+ MediaBuffer **out, const ReadOptions * /* options */) {
Mutex::Autolock autoLock(mLock);
*out = NULL;
@@ -269,7 +273,7 @@ void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
int64_t timeUs = systemTime() / 1000ll;
- ALOGV("dataCallbackTimestamp: %lld us", timeUs);
+ ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
Mutex::Autolock autoLock(mLock);
if (!mStarted) {
ALOGW("Spurious callback from AudioRecord. Drop the audio data.");
@@ -278,8 +282,8 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
// Drop retrieved and previously lost audio data.
if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
- mRecord->getInputFramesLost();
- ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs);
+ (void) mRecord->getInputFramesLost();
+ ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
return OK;
}
@@ -308,7 +312,7 @@ status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
if (numLostBytes > 0) {
// Loss of audio frames should happen rarely; thus the LOGW should
// not cause a logging spam
- ALOGW("Lost audio record data: %d bytes", numLostBytes);
+ ALOGW("Lost audio record data: %zu bytes", numLostBytes);
}
while (numLostBytes > 0) {
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index aae6800..ab8ac79 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -19,6 +19,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "AwesomePlayer"
#define ATRACE_TAG ATRACE_TAG_VIDEO
+
+#include <inttypes.h>
+
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -34,16 +37,20 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/timedtext/TimedTextDriver.h>
#include <media/stagefright/AudioPlayer.h>
+#include <media/stagefright/ClockEstimator.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
@@ -82,7 +89,7 @@ struct AwesomeEvent : public TimedEventQueue::Event {
protected:
virtual ~AwesomeEvent() {}
- virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ virtual void fire(TimedEventQueue * /* queue */, int64_t /* now_us */) {
(mPlayer->*mMethod)();
}
@@ -96,17 +103,21 @@ private:
struct AwesomeLocalRenderer : public AwesomeRenderer {
AwesomeLocalRenderer(
- const sp<ANativeWindow> &nativeWindow, const sp<MetaData> &meta)
- : mTarget(new SoftwareRenderer(nativeWindow, meta)) {
+ const sp<ANativeWindow> &nativeWindow, const sp<AMessage> &format)
+ : mFormat(format),
+ mTarget(new SoftwareRenderer(nativeWindow)) {
}
virtual void render(MediaBuffer *buffer) {
+ int64_t timeUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+
render((const uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
+ buffer->range_length(), timeUs * 1000);
}
- void render(const void *data, size_t size) {
- mTarget->render(data, size, NULL);
+ void render(const void *data, size_t size, int64_t timestampNs) {
+ mTarget->render(data, size, timestampNs, NULL, mFormat);
}
protected:
@@ -116,6 +127,7 @@ protected:
}
private:
+ sp<AMessage> mFormat;
SoftwareRenderer *mTarget;
AwesomeLocalRenderer(const AwesomeLocalRenderer &);
@@ -227,6 +239,8 @@ AwesomePlayer::AwesomePlayer()
&AwesomePlayer::onAudioTearDownEvent);
mAudioTearDownEventPending = false;
+ mClockEstimator = new WindowedLinearFitEstimator();
+
reset();
}
@@ -276,15 +290,20 @@ void AwesomePlayer::setUID(uid_t uid) {
}
status_t AwesomePlayer::setDataSource(
- const char *uri, const KeyedVector<String8, String8> *headers) {
+ const sp<IMediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
- return setDataSource_l(uri, headers);
+ return setDataSource_l(httpService, uri, headers);
}
status_t AwesomePlayer::setDataSource_l(
- const char *uri, const KeyedVector<String8, String8> *headers) {
+ const sp<IMediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers) {
reset_l();
+ mHTTPService = httpService;
mUri = uri;
if (headers) {
@@ -301,7 +320,7 @@ status_t AwesomePlayer::setDataSource_l(
}
}
- ALOGI("setDataSource_l(URL suppressed)");
+ ALOGI("setDataSource_l(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
// The actual work will be done during preparation in the call to
// ::finishSetDataSource_l to avoid blocking the calling thread in
@@ -393,6 +412,13 @@ status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
totalBitRate += bitrate;
}
+ sp<MetaData> fileMeta = mExtractor->getMetaData();
+ if (fileMeta != NULL) {
+ int64_t duration;
+ if (fileMeta->findInt64(kKeyDuration, &duration)) {
+ mDurationUs = duration;
+ }
+ }
mBitrate = totalBitRate;
@@ -581,6 +607,7 @@ void AwesomePlayer::reset_l() {
mSeekNotificationSent = true;
mSeekTimeUs = 0;
+ mHTTPService.clear();
mUri.setTo("");
mUriHeaders.clear();
@@ -708,11 +735,9 @@ void AwesomePlayer::onBufferingUpdate() {
finishAsyncPrepare_l();
}
} else {
- int64_t bitrate;
- if (getBitrate(&bitrate)) {
- size_t cachedSize = mCachedSource->cachedSize();
- int64_t cachedDurationUs = cachedSize * 8000000ll / bitrate;
-
+ bool eos2;
+ int64_t cachedDurationUs;
+ if (getCachedDuration_l(&cachedDurationUs, &eos2) && mDurationUs > 0) {
int percentage = 100.0 * (double)cachedDurationUs / mDurationUs;
if (percentage > 100) {
percentage = 100;
@@ -720,12 +745,12 @@ void AwesomePlayer::onBufferingUpdate() {
notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
} else {
- // We don't know the bitrate of the stream, use absolute size
+ // We don't know the bitrate/duration of the stream, use absolute size
// limits to maintain the cache.
if ((mFlags & PLAYING) && !eos
&& (cachedDataRemaining < kLowWaterMarkBytes)) {
- ALOGI("cache is running low (< %d) , pausing.",
+ ALOGI("cache is running low (< %zu) , pausing.",
kLowWaterMarkBytes);
modifyFlags(CACHE_UNDERRUN, SET);
pause_l();
@@ -734,12 +759,12 @@ void AwesomePlayer::onBufferingUpdate() {
notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
} else if (eos || cachedDataRemaining > kHighWaterMarkBytes) {
if (mFlags & CACHE_UNDERRUN) {
- ALOGI("cache has filled up (> %d), resuming.",
+ ALOGI("cache has filled up (> %zu), resuming.",
kHighWaterMarkBytes);
modifyFlags(CACHE_UNDERRUN, CLEAR);
play_l();
} else if (mFlags & PREPARING) {
- ALOGV("cache has filled up (> %d), prepare is done",
+ ALOGV("cache has filled up (> %zu), prepare is done",
kHighWaterMarkBytes);
finishAsyncPrepare_l();
}
@@ -1213,7 +1238,9 @@ void AwesomePlayer::initRenderer_l() {
// allocate their buffers in local address space. This renderer
// then performs a color conversion and copy to get the data
// into the ANativeBuffer.
- mVideoRenderer = new AwesomeLocalRenderer(mNativeWindow, meta);
+ sp<AMessage> format;
+ convertMetaDataToMessage(meta, &format);
+ mVideoRenderer = new AwesomeLocalRenderer(mNativeWindow, format);
}
}
@@ -1482,7 +1509,7 @@ void AwesomePlayer::addTextSource_l(size_t trackIndex, const sp<MediaSource>& so
CHECK(source != NULL);
if (mTextDriver == NULL) {
- mTextDriver = new TimedTextDriver(mListener);
+ mTextDriver = new TimedTextDriver(mListener, mHTTPService);
}
mTextDriver->addInBandTextSource(trackIndex, source);
@@ -1694,7 +1721,7 @@ void AwesomePlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
}
if (mAudioPlayer != NULL) {
- ALOGV("seeking audio to %lld us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
+ ALOGV("seeking audio to %" PRId64 " us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
// If we don't have a video time, seek audio to the originally
// requested seek time instead.
@@ -1758,7 +1785,7 @@ void AwesomePlayer::onVideoEvent() {
if (!mVideoBuffer) {
MediaSource::ReadOptions options;
if (mSeeking != NO_SEEK) {
- ALOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
+ ALOGV("seeking to %" PRId64 " us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
options.setSeekTo(
mSeekTimeUs,
@@ -1828,7 +1855,7 @@ void AwesomePlayer::onVideoEvent() {
if (mSeeking == SEEK_VIDEO_ONLY) {
if (mSeekTimeUs > timeUs) {
- ALOGI("XXX mSeekTimeUs = %lld us, timeUs = %lld us",
+ ALOGI("XXX mSeekTimeUs = %" PRId64 " us, timeUs = %" PRId64 " us",
mSeekTimeUs, timeUs);
}
}
@@ -1858,21 +1885,28 @@ void AwesomePlayer::onVideoEvent() {
TimeSource *ts =
((mFlags & AUDIO_AT_EOS) || !(mFlags & AUDIOPLAYER_STARTED))
? &mSystemTimeSource : mTimeSource;
+ int64_t systemTimeUs = mSystemTimeSource.getRealTimeUs();
+ int64_t looperTimeUs = ALooper::GetNowUs();
if (mFlags & FIRST_FRAME) {
modifyFlags(FIRST_FRAME, CLEAR);
mSinceLastDropped = 0;
- mTimeSourceDeltaUs = ts->getRealTimeUs() - timeUs;
+ mClockEstimator->reset();
+ mTimeSourceDeltaUs = estimateRealTimeUs(ts, systemTimeUs) - timeUs;
}
int64_t realTimeUs, mediaTimeUs;
if (!(mFlags & AUDIO_AT_EOS) && mAudioPlayer != NULL
&& mAudioPlayer->getMediaTimeMapping(&realTimeUs, &mediaTimeUs)) {
+ ALOGV("updating TSdelta (%" PRId64 " => %" PRId64 " change %" PRId64 ")",
+ mTimeSourceDeltaUs, realTimeUs - mediaTimeUs,
+ mTimeSourceDeltaUs - (realTimeUs - mediaTimeUs));
+ ATRACE_INT("TS delta change (ms)", (mTimeSourceDeltaUs - (realTimeUs - mediaTimeUs)) / 1E3);
mTimeSourceDeltaUs = realTimeUs - mediaTimeUs;
}
if (wasSeeking == SEEK_VIDEO_ONLY) {
- int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
+ int64_t nowUs = estimateRealTimeUs(ts, systemTimeUs) - mTimeSourceDeltaUs;
int64_t latenessUs = nowUs - timeUs;
@@ -1883,12 +1917,13 @@ void AwesomePlayer::onVideoEvent() {
}
}
+ int64_t latenessUs = 0;
if (wasSeeking == NO_SEEK) {
// Let's display the first frame after seeking right away.
- int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
+ int64_t nowUs = estimateRealTimeUs(ts, systemTimeUs) - mTimeSourceDeltaUs;
- int64_t latenessUs = nowUs - timeUs;
+ latenessUs = nowUs - timeUs;
ATRACE_INT("Video Lateness (ms)", latenessUs / 1E3);
@@ -1918,13 +1953,13 @@ void AwesomePlayer::onVideoEvent() {
if (latenessUs > 40000) {
// We're more than 40ms late.
- ALOGV("we're late by %lld us (%.2f secs)",
+ ALOGV("we're late by %" PRId64 " us (%.2f secs)",
latenessUs, latenessUs / 1E6);
if (!(mFlags & SLOW_DECODER_HACK)
|| mSinceLastDropped > FRAME_DROP_FREQ)
{
- ALOGV("we're late by %lld us (%.2f secs) dropping "
+ ALOGV("we're late by %" PRId64 " us (%.2f secs) dropping "
"one after %d frames",
latenessUs, latenessUs / 1E6, mSinceLastDropped);
@@ -1942,9 +1977,9 @@ void AwesomePlayer::onVideoEvent() {
}
}
- if (latenessUs < -10000) {
- // We're more than 10ms early.
- postVideoEvent_l(10000);
+ if (latenessUs < -30000) {
+ // We're more than 30ms early, schedule at most 20 ms before time due
+ postVideoEvent_l(latenessUs < -60000 ? 30000 : -latenessUs - 20000);
return;
}
}
@@ -1958,6 +1993,8 @@ void AwesomePlayer::onVideoEvent() {
if (mVideoRenderer != NULL) {
mSinceLastDropped++;
+ mVideoBuffer->meta_data()->setInt64(kKeyTime, looperTimeUs - latenessUs);
+
mVideoRenderer->render(mVideoBuffer);
if (!mVideoRenderingStarted) {
mVideoRenderingStarted = true;
@@ -2007,14 +2044,26 @@ void AwesomePlayer::onVideoEvent() {
int64_t nextTimeUs;
CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &nextTimeUs));
- int64_t delayUs = nextTimeUs - ts->getRealTimeUs() + mTimeSourceDeltaUs;
- postVideoEvent_l(delayUs > 10000 ? 10000 : delayUs < 0 ? 0 : delayUs);
+ systemTimeUs = mSystemTimeSource.getRealTimeUs();
+ int64_t delayUs = nextTimeUs - estimateRealTimeUs(ts, systemTimeUs) + mTimeSourceDeltaUs;
+ ATRACE_INT("Frame delta (ms)", (nextTimeUs - timeUs) / 1E3);
+ ALOGV("next frame in %" PRId64, delayUs);
+ // try to schedule 30ms before time due
+ postVideoEvent_l(delayUs > 60000 ? 30000 : (delayUs < 30000 ? 0 : delayUs - 30000));
return;
}
postVideoEvent_l();
}
+int64_t AwesomePlayer::estimateRealTimeUs(TimeSource *ts, int64_t systemTimeUs) {
+ if (ts == &mSystemTimeSource) {
+ return systemTimeUs;
+ } else {
+ return (int64_t)mClockEstimator->estimate(systemTimeUs, ts->getRealTimeUs());
+ }
+}
+
void AwesomePlayer::postVideoEvent_l(int64_t delayUs) {
ATRACE_CALL();
@@ -2192,15 +2241,14 @@ status_t AwesomePlayer::finishSetDataSource_l() {
if (!strncasecmp("http://", mUri.string(), 7)
|| !strncasecmp("https://", mUri.string(), 8)
|| isWidevineStreaming) {
- mConnectingDataSource = HTTPBase::Create(
- (mFlags & INCOGNITO)
- ? HTTPBase::kFlagIncognito
- : 0);
-
- if (mUIDValid) {
- mConnectingDataSource->setUID(mUID);
+ if (mHTTPService == NULL) {
+ ALOGE("Attempt to play media from http URI without HTTP service.");
+ return UNKNOWN_ERROR;
}
+ sp<IMediaHTTPConnection> conn = mHTTPService->makeHTTPConnection();
+ mConnectingDataSource = new MediaHTTP(conn);
+
String8 cacheConfig;
bool disconnectAtHighwatermark;
NuCachedSource2::RemoveCacheSpecificHeaders(
@@ -2208,6 +2256,10 @@ status_t AwesomePlayer::finishSetDataSource_l() {
mLock.unlock();
status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
+ // force connection at this point, to avoid a race condition between getMIMEType and the
+ // caching datasource constructed below, which could result in multiple requests to the
+ // server, and/or failed connections.
+ String8 contentType = mConnectingDataSource->getMIMEType();
mLock.lock();
if (err != OK) {
@@ -2238,8 +2290,6 @@ status_t AwesomePlayer::finishSetDataSource_l() {
mConnectingDataSource.clear();
- String8 contentType = dataSource->getMIMEType();
-
if (strncasecmp(contentType.string(), "audio/", 6)) {
// We're not doing this for streams that appear to be audio-only
// streams to ensure that even low bandwidth streams start
@@ -2271,12 +2321,12 @@ status_t AwesomePlayer::finishSetDataSource_l() {
if (finalStatus != OK
|| (metaDataSize >= 0
- && cachedDataRemaining >= metaDataSize)
+ && (off64_t)cachedDataRemaining >= metaDataSize)
|| (mFlags & PREPARE_CANCELLED)) {
break;
}
- ALOGV("now cached %d bytes of data", cachedDataRemaining);
+ ALOGV("now cached %zu bytes of data", cachedDataRemaining);
if (metaDataSize < 0
&& cachedDataRemaining >= kMinBytesForSniffing) {
@@ -2295,8 +2345,8 @@ status_t AwesomePlayer::finishSetDataSource_l() {
sniffedMIME = tmp.string();
if (meta == NULL
- || !meta->findInt64(
- "meta-data-size", &metaDataSize)) {
+ || !meta->findInt64("meta-data-size",
+ reinterpret_cast<int64_t*>(&metaDataSize))) {
metaDataSize = kHighWaterMarkBytes;
}
@@ -2316,7 +2366,8 @@ status_t AwesomePlayer::finishSetDataSource_l() {
}
}
} else {
- dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
+ dataSource = DataSource::CreateFromURI(
+ mHTTPService, mUri.string(), &mUriHeaders);
}
if (dataSource == NULL) {
@@ -2583,12 +2634,12 @@ status_t AwesomePlayer::getTrackInfo(Parcel *reply) const {
status_t AwesomePlayer::selectAudioTrack_l(
const sp<MediaSource>& source, size_t trackIndex) {
- ALOGI("selectAudioTrack_l: trackIndex=%d, mFlags=0x%x", trackIndex, mFlags);
+ ALOGI("selectAudioTrack_l: trackIndex=%zu, mFlags=0x%x", trackIndex, mFlags);
{
Mutex::Autolock autoLock(mStatsLock);
if ((ssize_t)trackIndex == mActiveAudioTrackIndex) {
- ALOGI("Track %d is active. Does nothing.", trackIndex);
+ ALOGI("Track %zu is active. Does nothing.", trackIndex);
return OK;
}
//mStats.mFlags = mFlags;
@@ -2654,14 +2705,14 @@ status_t AwesomePlayer::selectAudioTrack_l(
status_t AwesomePlayer::selectTrack(size_t trackIndex, bool select) {
ATRACE_CALL();
- ALOGV("selectTrack: trackIndex = %d and select=%d", trackIndex, select);
+ ALOGV("selectTrack: trackIndex = %zu and select=%d", trackIndex, select);
Mutex::Autolock autoLock(mLock);
size_t trackCount = mExtractor->countTracks();
if (mTextDriver != NULL) {
trackCount += mTextDriver->countExternalTracks();
}
if (trackIndex >= trackCount) {
- ALOGE("Track index (%d) is out of range [0, %d)", trackIndex, trackCount);
+ ALOGE("Track index (%zu) is out of range [0, %zu)", trackIndex, trackCount);
return ERROR_OUT_OF_RANGE;
}
@@ -2673,14 +2724,14 @@ status_t AwesomePlayer::selectTrack(size_t trackIndex, bool select) {
isAudioTrack = !strncasecmp(mime, "audio/", 6);
if (!isAudioTrack && strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) != 0) {
- ALOGE("Track %d is not either audio or timed text", trackIndex);
+ ALOGE("Track %zu is not either audio or timed text", trackIndex);
return ERROR_UNSUPPORTED;
}
}
if (isAudioTrack) {
if (!select) {
- ALOGE("Deselect an audio track (%d) is not supported", trackIndex);
+ ALOGE("Deselect an audio track (%zu) is not supported", trackIndex);
return ERROR_UNSUPPORTED;
}
return selectAudioTrack_l(mExtractor->getTrack(trackIndex), trackIndex);
@@ -2758,7 +2809,7 @@ status_t AwesomePlayer::invoke(const Parcel &request, Parcel *reply) {
{
Mutex::Autolock autoLock(mLock);
if (mTextDriver == NULL) {
- mTextDriver = new TimedTextDriver(mListener);
+ mTextDriver = new TimedTextDriver(mListener, mHTTPService);
}
// String values written in Parcel are UTF-16 values.
String8 uri(request.readString16());
@@ -2770,7 +2821,7 @@ status_t AwesomePlayer::invoke(const Parcel &request, Parcel *reply) {
{
Mutex::Autolock autoLock(mLock);
if (mTextDriver == NULL) {
- mTextDriver = new TimedTextDriver(mListener);
+ mTextDriver = new TimedTextDriver(mListener, mHTTPService);
}
int fd = request.readFileDescriptor();
off64_t offset = request.readInt64();
@@ -2803,14 +2854,15 @@ bool AwesomePlayer::isStreamingHTTP() const {
return mCachedSource != NULL || mWVMExtractor != NULL;
}
-status_t AwesomePlayer::dump(int fd, const Vector<String16> &args) const {
+status_t AwesomePlayer::dump(
+ int fd, const Vector<String16> & /* args */) const {
Mutex::Autolock autoLock(mStatsLock);
FILE *out = fdopen(dup(fd), "w");
fprintf(out, " AwesomePlayer\n");
if (mStats.mFd < 0) {
- fprintf(out, " URI(suppressed)");
+ fprintf(out, " URI(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
} else {
fprintf(out, " fd(%d)", mStats.mFd);
}
@@ -2818,7 +2870,7 @@ status_t AwesomePlayer::dump(int fd, const Vector<String16> &args) const {
fprintf(out, ", flags(0x%08x)", mStats.mFlags);
if (mStats.mBitrate >= 0) {
- fprintf(out, ", bitrate(%lld bps)", mStats.mBitrate);
+ fprintf(out, ", bitrate(%" PRId64 " bps)", mStats.mBitrate);
}
fprintf(out, "\n");
@@ -2826,7 +2878,7 @@ status_t AwesomePlayer::dump(int fd, const Vector<String16> &args) const {
for (size_t i = 0; i < mStats.mTracks.size(); ++i) {
const TrackStat &stat = mStats.mTracks.itemAt(i);
- fprintf(out, " Track %d\n", i + 1);
+ fprintf(out, " Track %zu\n", i + 1);
fprintf(out, " MIME(%s)", stat.mMIME.string());
if (!stat.mDecoderName.isEmpty()) {
@@ -2838,8 +2890,8 @@ status_t AwesomePlayer::dump(int fd, const Vector<String16> &args) const {
if ((ssize_t)i == mStats.mVideoTrackIndex) {
fprintf(out,
" videoDimensions(%d x %d), "
- "numVideoFramesDecoded(%lld), "
- "numVideoFramesDropped(%lld)\n",
+ "numVideoFramesDecoded(%" PRId64 "), "
+ "numVideoFramesDropped(%" PRId64 ")\n",
mStats.mVideoWidth,
mStats.mVideoHeight,
mStats.mNumVideoFramesDecoded,
@@ -2899,6 +2951,9 @@ void AwesomePlayer::onAudioTearDownEvent() {
// get current position so we can start recreated stream from here
getPosition(&mAudioTearDownPosition);
+ sp<IMediaHTTPService> savedHTTPService = mHTTPService;
+
+ bool wasLooping = mFlags & LOOPING;
// Reset and recreate
reset_l();
@@ -2908,7 +2963,7 @@ void AwesomePlayer::onAudioTearDownEvent() {
mFileSource = fileSource;
err = setDataSource_l(fileSource);
} else {
- err = setDataSource_l(uri, &uriHeaders);
+ err = setDataSource_l(savedHTTPService, uri, &uriHeaders);
}
mFlags |= PREPARING;
@@ -2917,6 +2972,9 @@ void AwesomePlayer::onAudioTearDownEvent() {
// a MEDIA_ERROR to the client and abort the prepare
mFlags |= PREPARE_CANCELLED;
}
+ if (wasLooping) {
+ mFlags |= LOOPING;
+ }
mAudioTearDown = true;
mIsAsyncPrepare = true;
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 3017fe7..f76aed6 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "CameraSource"
#include <utils/Log.h>
@@ -31,6 +33,12 @@
#include <utils/String8.h>
#include <cutils/properties.h>
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
namespace android {
static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
@@ -63,12 +71,15 @@ CameraSourceListener::~CameraSourceListener() {
}
void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
+ UNUSED_UNLESS_VERBOSE(msgType);
+ UNUSED_UNLESS_VERBOSE(ext1);
+ UNUSED_UNLESS_VERBOSE(ext2);
ALOGV("notify(%d, %d, %d)", msgType, ext1, ext2);
}
void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
- camera_frame_metadata_t *metadata) {
- ALOGV("postData(%d, ptr:%p, size:%d)",
+ camera_frame_metadata_t * /* metadata */) {
+ ALOGV("postData(%d, ptr:%p, size:%zu)",
msgType, dataPtr->pointer(), dataPtr->size());
sp<CameraSource> source = mSource.promote();
@@ -577,14 +588,15 @@ CameraSource::~CameraSource() {
}
}
-void CameraSource::startCameraRecording() {
+status_t CameraSource::startCameraRecording() {
ALOGV("startCameraRecording");
// Reset the identity to the current thread because media server owns the
// camera and recording is started by the applications. The applications
// will connect to the camera in ICameraRecordingProxy::startRecording.
int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t err;
if (mNumInputBuffers > 0) {
- status_t err = mCamera->sendCommand(
+ err = mCamera->sendCommand(
CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
// This could happen for CameraHAL1 clients; thus the failure is
@@ -595,17 +607,25 @@ void CameraSource::startCameraRecording() {
}
}
+ err = OK;
if (mCameraFlags & FLAGS_HOT_CAMERA) {
mCamera->unlock();
mCamera.clear();
- CHECK_EQ((status_t)OK,
- mCameraRecordingProxy->startRecording(new ProxyListener(this)));
+ if ((err = mCameraRecordingProxy->startRecording(
+ new ProxyListener(this))) != OK) {
+ ALOGE("Failed to start recording, received error: %s (%d)",
+ strerror(-err), err);
+ }
} else {
mCamera->setListener(new CameraSourceListener(this));
mCamera->startRecording();
- CHECK(mCamera->recordingEnabled());
+ if (!mCamera->recordingEnabled()) {
+ err = -EINVAL;
+ ALOGE("Failed to start recording");
+ }
}
IPCThreadState::self()->restoreCallingIdentity(token);
+ return err;
}
status_t CameraSource::start(MetaData *meta) {
@@ -637,10 +657,12 @@ status_t CameraSource::start(MetaData *meta) {
}
}
- startCameraRecording();
+ status_t err;
+ if ((err = startCameraRecording()) == OK) {
+ mStarted = true;
+ }
- mStarted = true;
- return OK;
+ return err;
}
void CameraSource::stopCameraRecording() {
@@ -655,63 +677,80 @@ void CameraSource::stopCameraRecording() {
void CameraSource::releaseCamera() {
ALOGV("releaseCamera");
- if (mCamera != 0) {
+ sp<Camera> camera;
+ bool coldCamera = false;
+ {
+ Mutex::Autolock autoLock(mLock);
+ // get a local ref and clear ref to mCamera now
+ camera = mCamera;
+ mCamera.clear();
+ coldCamera = (mCameraFlags & FLAGS_HOT_CAMERA) == 0;
+ }
+
+ if (camera != 0) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- if ((mCameraFlags & FLAGS_HOT_CAMERA) == 0) {
+ if (coldCamera) {
ALOGV("Camera was cold when we started, stopping preview");
- mCamera->stopPreview();
- mCamera->disconnect();
+ camera->stopPreview();
+ camera->disconnect();
}
- mCamera->unlock();
- mCamera.clear();
- mCamera = 0;
+ camera->unlock();
IPCThreadState::self()->restoreCallingIdentity(token);
}
- if (mCameraRecordingProxy != 0) {
- mCameraRecordingProxy->asBinder()->unlinkToDeath(mDeathNotifier);
- mCameraRecordingProxy.clear();
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mCameraRecordingProxy != 0) {
+ mCameraRecordingProxy->asBinder()->unlinkToDeath(mDeathNotifier);
+ mCameraRecordingProxy.clear();
+ }
+ mCameraFlags = 0;
}
- mCameraFlags = 0;
}
status_t CameraSource::reset() {
ALOGD("reset: E");
- Mutex::Autolock autoLock(mLock);
- mStarted = false;
- mFrameAvailableCondition.signal();
- int64_t token;
- bool isTokenValid = false;
- if (mCamera != 0) {
- token = IPCThreadState::self()->clearCallingIdentity();
- isTokenValid = true;
- }
- releaseQueuedFrames();
- while (!mFramesBeingEncoded.empty()) {
- if (NO_ERROR !=
- mFrameCompleteCondition.waitRelative(mLock,
- mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
- ALOGW("Timed out waiting for outstanding frames being encoded: %d",
- mFramesBeingEncoded.size());
+ {
+ Mutex::Autolock autoLock(mLock);
+ mStarted = false;
+ mFrameAvailableCondition.signal();
+
+ int64_t token;
+ bool isTokenValid = false;
+ if (mCamera != 0) {
+ token = IPCThreadState::self()->clearCallingIdentity();
+ isTokenValid = true;
+ }
+ releaseQueuedFrames();
+ while (!mFramesBeingEncoded.empty()) {
+ if (NO_ERROR !=
+ mFrameCompleteCondition.waitRelative(mLock,
+ mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
+ ALOGW("Timed out waiting for outstanding frames being encoded: %zu",
+ mFramesBeingEncoded.size());
+ }
+ }
+ stopCameraRecording();
+ if (isTokenValid) {
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
- }
- stopCameraRecording();
- releaseCamera();
- if (isTokenValid) {
- IPCThreadState::self()->restoreCallingIdentity(token);
- }
- if (mCollectStats) {
- ALOGI("Frames received/encoded/dropped: %d/%d/%d in %lld us",
- mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
- mLastFrameTimestampUs - mFirstFrameTimeUs);
- }
+ if (mCollectStats) {
+ ALOGI("Frames received/encoded/dropped: %d/%d/%d in %" PRId64 " us",
+ mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
+ mLastFrameTimestampUs - mFirstFrameTimeUs);
+ }
- if (mNumGlitches > 0) {
- ALOGW("%d long delays between neighboring video frames", mNumGlitches);
+ if (mNumGlitches > 0) {
+ ALOGW("%d long delays between neighboring video frames", mNumGlitches);
+ }
+
+ CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
}
- CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
+ releaseCamera();
+
ALOGD("reset: X");
return OK;
}
@@ -789,7 +828,7 @@ status_t CameraSource::read(
ALOGW("camera recording proxy is gone");
return ERROR_END_OF_STREAM;
}
- ALOGW("Timed out waiting for incoming camera video frames: %lld us",
+ ALOGW("Timed out waiting for incoming camera video frames: %" PRId64 " us",
mLastFrameTimestampUs);
}
}
@@ -812,10 +851,10 @@ status_t CameraSource::read(
void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
int32_t msgType, const sp<IMemory> &data) {
- ALOGV("dataCallbackTimestamp: timestamp %lld us", timestampUs);
+ ALOGV("dataCallbackTimestamp: timestamp %" PRId64 " us", timestampUs);
Mutex::Autolock autoLock(mLock);
if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
- ALOGV("Drop frame at %lld/%lld us", timestampUs, mStartTimeUs);
+ ALOGV("Drop frame at %" PRId64 "/%" PRId64 " us", timestampUs, mStartTimeUs);
releaseOneRecordingFrame(data);
return;
}
@@ -854,7 +893,7 @@ void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
mFramesReceived.push_back(data);
int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
mFrameTimes.push_back(timeUs);
- ALOGV("initial delay: %lld, current time stamp: %lld",
+ ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
mStartTimeUs, timeUs);
mFrameAvailableCondition.signal();
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 5772316..0acd9d0 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "CameraSourceTimeLapse"
@@ -79,13 +81,14 @@ CameraSourceTimeLapse::CameraSourceTimeLapse(
mSkipCurrentFrame(false) {
mTimeBetweenFrameCaptureUs = timeBetweenFrameCaptureUs;
- ALOGD("starting time lapse mode: %lld us",
+ ALOGD("starting time lapse mode: %" PRId64 " us",
mTimeBetweenFrameCaptureUs);
mVideoWidth = videoSize.width;
mVideoHeight = videoSize.height;
- if (!trySettingVideoSize(videoSize.width, videoSize.height)) {
+ if (OK == mInitCheck && !trySettingVideoSize(videoSize.width, videoSize.height)) {
+ releaseCamera();
mInitCheck = NO_INIT;
}
@@ -134,7 +137,7 @@ bool CameraSourceTimeLapse::trySettingVideoSize(
}
bool videoSizeSupported = false;
- for (uint32_t i = 0; i < supportedSizes.size(); ++i) {
+ for (size_t i = 0; i < supportedSizes.size(); ++i) {
int32_t pictureWidth = supportedSizes[i].width;
int32_t pictureHeight = supportedSizes[i].height;
@@ -231,7 +234,7 @@ sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(
return newMemory;
}
-bool CameraSourceTimeLapse::skipCurrentFrame(int64_t timestampUs) {
+bool CameraSourceTimeLapse::skipCurrentFrame(int64_t /* timestampUs */) {
ALOGV("skipCurrentFrame");
if (mSkipCurrentFrame) {
mSkipCurrentFrame = false;
@@ -265,7 +268,7 @@ bool CameraSourceTimeLapse::skipFrameAndModifyTimeStamp(int64_t *timestampUs) {
// Really make sure that this video recording frame will not be dropped.
if (*timestampUs < mStartTimeUs) {
- ALOGI("set timestampUs to start time stamp %lld us", mStartTimeUs);
+ ALOGI("set timestampUs to start time stamp %" PRId64 " us", mStartTimeUs);
*timestampUs = mStartTimeUs;
}
return false;
diff --git a/media/libstagefright/ClockEstimator.cpp b/media/libstagefright/ClockEstimator.cpp
new file mode 100644
index 0000000..34d1e42
--- /dev/null
+++ b/media/libstagefright/ClockEstimator.cpp
@@ -0,0 +1,177 @@
+/*
+**
+** Copyright 2014, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClockEstimator"
+#include <utils/Log.h>
+
+#include <math.h>
+#include <media/stagefright/ClockEstimator.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+WindowedLinearFitEstimator::WindowedLinearFitEstimator(
+ size_t headLength, double headFactor, size_t mainLength, double tailFactor)
+ : mHeadFactorInv(1. / headFactor),
+ mTailFactor(tailFactor),
+ mHistoryLength(mainLength + headLength),
+ mHeadLength(headLength) {
+ reset();
+ mXHistory.resize(mHistoryLength);
+ mYHistory.resize(mHistoryLength);
+ mFirstWeight = pow(headFactor, mHeadLength);
+}
+
+WindowedLinearFitEstimator::LinearFit::LinearFit() {
+ reset();
+}
+
+void WindowedLinearFitEstimator::LinearFit::reset() {
+ mX = mXX = mY = mYY = mXY = mW = 0.;
+}
+
+double WindowedLinearFitEstimator::LinearFit::size() const {
+ double s = mW * mW + mX * mX + mY * mY + mXX * mXX + mXY * mXY + mYY * mYY;
+ if (s > 1e72) {
+ // 1e72 corresponds to clock monotonic time of about 8 years
+ ALOGW("estimator is overflowing: w=%g x=%g y=%g xx=%g xy=%g yy=%g",
+ mW, mX, mY, mXX, mXY, mYY);
+ }
+ return s;
+}
+
+void WindowedLinearFitEstimator::LinearFit::add(double x, double y, double w) {
+ mW += w;
+ mX += w * x;
+ mY += w * y;
+ mXX += w * x * x;
+ mXY += w * x * y;
+ mYY += w * y * y;
+}
+
+void WindowedLinearFitEstimator::LinearFit::combine(const LinearFit &lf) {
+ mW += lf.mW;
+ mX += lf.mX;
+ mY += lf.mY;
+ mXX += lf.mXX;
+ mXY += lf.mXY;
+ mYY += lf.mYY;
+}
+
+void WindowedLinearFitEstimator::LinearFit::scale(double w) {
+ mW *= w;
+ mX *= w;
+ mY *= w;
+ mXX *= w;
+ mXY *= w;
+ mYY *= w;
+}
+
+double WindowedLinearFitEstimator::LinearFit::interpolate(double x) {
+ double div = mW * mXX - mX * mX;
+ if (fabs(div) < 1e-5 * mW * mW) {
+ // this only should happen on the first value
+ return x;
+ // assuming a = 1, we could also return x + (mY - mX) / mW;
+ }
+ double a_div = (mW * mXY - mX * mY);
+ double b_div = (mXX * mY - mX * mXY);
+ ALOGV("a=%.4g b=%.4g in=%g out=%g",
+ a_div / div, b_div / div, x, (a_div * x + b_div) / div);
+ return (a_div * x + b_div) / div;
+}
+
+double WindowedLinearFitEstimator::estimate(double x, double y) {
+ /*
+ * TODO: We could update the head by adding the new sample to it
+ * and amplifying it, but this approach can lead to unbounded
+ * error. Instead, we recalculate the head at each step, which
+ * is computationally more expensive. We could balance the two
+ * methods by recalculating just before the error becomes
+ * significant.
+ */
+ const bool update_head = false;
+ if (update_head) {
+ // add new sample to the head
+ mHead.scale(mHeadFactorInv); // amplify head
+ mHead.add(x, y, mFirstWeight);
+ }
+
+ /*
+ * TRICKY: place elements into the circular buffer at decreasing
+ * indices, so that we can access past elements by addition
+ * (thereby avoiding potentially negative indices.)
+ */
+ if (mNumSamples >= mHeadLength) {
+ // move last head sample from head to the main window
+ size_t lastHeadIx = (mSampleIx + mHeadLength) % mHistoryLength;
+ if (update_head) {
+ mHead.add(mXHistory[lastHeadIx], mYHistory[lastHeadIx], -1.); // remove
+ }
+ mMain.add(mXHistory[lastHeadIx], mYHistory[lastHeadIx], 1.);
+ if (mNumSamples >= mHistoryLength) {
+ // move last main sample from main window to tail
+ mMain.add(mXHistory[mSampleIx], mYHistory[mSampleIx], -1.); // remove
+ mTail.add(mXHistory[mSampleIx], mYHistory[mSampleIx], 1.);
+ mTail.scale(mTailFactor); // attenuate tail
+ }
+ }
+
+ mXHistory.editItemAt(mSampleIx) = x;
+ mYHistory.editItemAt(mSampleIx) = y;
+ if (mNumSamples < mHistoryLength) {
+ ++mNumSamples;
+ }
+
+ // recalculate head unless we were using the update method
+ if (!update_head) {
+ mHead.reset();
+ double w = mFirstWeight;
+ for (size_t headIx = 0; headIx < mHeadLength && headIx < mNumSamples; ++headIx) {
+ size_t ix = (mSampleIx + headIx) % mHistoryLength;
+ mHead.add(mXHistory[ix], mYHistory[ix], w);
+ w *= mHeadFactorInv;
+ }
+ }
+
+ if (mSampleIx > 0) {
+ --mSampleIx;
+ } else {
+ mSampleIx = mHistoryLength - 1;
+ }
+
+ // return estimation result
+ LinearFit total;
+ total.combine(mHead);
+ total.combine(mMain);
+ total.combine(mTail);
+ return total.interpolate(x);
+}
+
+void WindowedLinearFitEstimator::reset() {
+ mHead.reset();
+ mMain.reset();
+ mTail.reset();
+ mNumSamples = 0;
+ mSampleIx = mHistoryLength - 1;
+}
+
+}; // namespace android
+
+
diff --git a/media/libstagefright/include/chromium_http_stub.h b/media/libstagefright/CodecBase.cpp
index e0651a4..f729d4d 100644
--- a/media/libstagefright/include/chromium_http_stub.h
+++ b/media/libstagefright/CodecBase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,21 +14,25 @@
* limitations under the License.
*/
-#ifndef CHROMIUM_HTTP_STUB_H_
-#define CHROMIUM_HTTP_STUB_H_
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CodecBase"
-#include <include/HTTPBase.h>
-#include <media/stagefright/DataSource.h>
+#include <inttypes.h>
+
+#include <media/stagefright/CodecBase.h>
namespace android {
-extern "C" {
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags);
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
- const char *host, int32_t port, const char *exclusionList);
+CodecBase::CodecBase() {
+}
-DataSource *createDataUriSource(const char *uri);
+CodecBase::~CodecBase() {
}
+
+CodecBase::PortDescription::PortDescription() {
+}
+
+CodecBase::PortDescription::~PortDescription() {
}
-#endif
+} // namespace android
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 97987e2..c99db84 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -13,13 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSource"
#include "include/AMRExtractor.h"
-#if CHROMIUM_AVAILABLE
-#include "include/chromium_http_stub.h"
-#endif
-
#include "include/AACExtractor.h"
#include "include/DRMExtractor.h"
#include "include/FLACExtractor.h"
@@ -35,10 +33,15 @@
#include "matroska/MatroskaExtractor.h"
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataURISource.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaHTTP.h>
#include <utils/String8.h>
#include <cutils/properties.h>
@@ -180,7 +183,15 @@ void DataSource::RegisterDefaultSniffers() {
// static
sp<DataSource> DataSource::CreateFromURI(
- const char *uri, const KeyedVector<String8, String8> *headers) {
+ const sp<IMediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ String8 *contentType,
+ HTTPBase *httpSource) {
+ if (contentType != NULL) {
+ *contentType = "";
+ }
+
bool isWidevine = !strncasecmp("widevine://", uri, 11);
sp<DataSource> source;
@@ -189,7 +200,19 @@ sp<DataSource> DataSource::CreateFromURI(
} else if (!strncasecmp("http://", uri, 7)
|| !strncasecmp("https://", uri, 8)
|| isWidevine) {
- sp<HTTPBase> httpSource = HTTPBase::Create();
+ if (httpService == NULL) {
+ ALOGE("Invalid http service!");
+ return NULL;
+ }
+
+ if (httpSource == NULL) {
+ sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
+ return NULL;
+ }
+ httpSource = new MediaHTTP(conn);
+ }
String8 tmp;
if (isWidevine) {
@@ -199,32 +222,38 @@ sp<DataSource> DataSource::CreateFromURI(
uri = tmp.string();
}
- if (httpSource->connect(uri, headers) != OK) {
+ String8 cacheConfig;
+ bool disconnectAtHighwatermark;
+ KeyedVector<String8, String8> nonCacheSpecificHeaders;
+ if (headers != NULL) {
+ nonCacheSpecificHeaders = *headers;
+ NuCachedSource2::RemoveCacheSpecificHeaders(
+ &nonCacheSpecificHeaders,
+ &cacheConfig,
+ &disconnectAtHighwatermark);
+ }
+
+ if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
+ ALOGE("Failed to connect http source!");
return NULL;
}
if (!isWidevine) {
- String8 cacheConfig;
- bool disconnectAtHighwatermark;
- if (headers != NULL) {
- KeyedVector<String8, String8> copy = *headers;
- NuCachedSource2::RemoveCacheSpecificHeaders(
- &copy, &cacheConfig, &disconnectAtHighwatermark);
+ if (contentType != NULL) {
+ *contentType = httpSource->getMIMEType();
}
source = new NuCachedSource2(
httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string());
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
} else {
// We do not want that prefetching, caching, datasource wrapper
// in the widevine:// case.
source = httpSource;
}
-
-# if CHROMIUM_AVAILABLE
} else if (!strncasecmp("data:", uri, 5)) {
- source = createDataUriSource(uri);
-#endif
+ source = DataURISource::Create(uri);
} else {
// Assume it's a filename.
source = new FileSource(uri);
@@ -237,6 +266,19 @@ sp<DataSource> DataSource::CreateFromURI(
return source;
}
+sp<DataSource> DataSource::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) {
+ if (httpService == NULL) {
+ return NULL;
+ }
+
+ sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ return NULL;
+ } else {
+ return new MediaHTTP(conn);
+ }
+}
+
String8 DataSource::getMIMEType() const {
return String8("application/octet-stream");
}
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
new file mode 100644
index 0000000..2c39314
--- /dev/null
+++ b/media/libstagefright/DataURISource.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/DataURISource.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+
+namespace android {
+
+// static
+sp<DataURISource> DataURISource::Create(const char *uri) {
+ if (strncasecmp("data:", uri, 5)) {
+ return NULL;
+ }
+
+ char *commaPos = strrchr(uri, ',');
+
+ if (commaPos == NULL) {
+ return NULL;
+ }
+
+ sp<ABuffer> buffer;
+
+ AString tmp(&uri[5], commaPos - &uri[5]);
+
+ if (tmp.endsWith(";base64")) {
+ AString encoded(commaPos + 1);
+
+ // Strip CR and LF...
+ for (size_t i = encoded.size(); i-- > 0;) {
+ if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
+ encoded.erase(i, 1);
+ }
+ }
+
+ buffer = decodeBase64(encoded);
+
+ if (buffer == NULL) {
+ ALOGE("Malformed base64 encoded content found.");
+ return NULL;
+ }
+ } else {
+#if 0
+ size_t dataLen = strlen(uri) - tmp.size() - 6;
+ buffer = new ABuffer(dataLen);
+ memcpy(buffer->data(), commaPos + 1, dataLen);
+
+ // unescape
+#else
+ // MediaPlayer doesn't care for this right now as we don't
+ // play any text-based media.
+ return NULL;
+#endif
+ }
+
+ // We don't really care about charset or mime type.
+
+ return new DataURISource(buffer);
+}
+
+DataURISource::DataURISource(const sp<ABuffer> &buffer)
+ : mBuffer(buffer) {
+}
+
+DataURISource::~DataURISource() {
+}
+
+status_t DataURISource::initCheck() const {
+ return OK;
+}
+
+ssize_t DataURISource::readAt(off64_t offset, void *data, size_t size) {
+ if ((offset < 0) || (offset >= (off64_t)mBuffer->size())) {
+ return 0;
+ }
+
+ size_t copy = mBuffer->size() - offset;
+ if (copy > size) {
+ copy = size;
+ }
+
+ memcpy(data, mBuffer->data() + offset, copy);
+
+ return copy;
+}
+
+status_t DataURISource::getSize(off64_t *size) {
+ *size = mBuffer->size();
+
+ return OK;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index 4a0c35c..427bf7b 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -91,7 +91,7 @@ status_t ESDS::skipDescriptorHeader(
}
while (more);
- ALOGV("tag=0x%02x data_size=%d", *tag, *data_size);
+ ALOGV("tag=0x%02x data_size=%zu", *tag, *data_size);
if (*data_size > size) {
return ERROR_MALFORMED;
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 098fcf9..fa7251c 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -208,55 +208,55 @@ private:
// with the same parameter list, but discard redundant information.
FLAC__StreamDecoderReadStatus FLACParser::read_callback(
- const FLAC__StreamDecoder *decoder, FLAC__byte buffer[],
+ const FLAC__StreamDecoder * /* decoder */, FLAC__byte buffer[],
size_t *bytes, void *client_data)
{
return ((FLACParser *) client_data)->readCallback(buffer, bytes);
}
FLAC__StreamDecoderSeekStatus FLACParser::seek_callback(
- const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamDecoder * /* decoder */,
FLAC__uint64 absolute_byte_offset, void *client_data)
{
return ((FLACParser *) client_data)->seekCallback(absolute_byte_offset);
}
FLAC__StreamDecoderTellStatus FLACParser::tell_callback(
- const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamDecoder * /* decoder */,
FLAC__uint64 *absolute_byte_offset, void *client_data)
{
return ((FLACParser *) client_data)->tellCallback(absolute_byte_offset);
}
FLAC__StreamDecoderLengthStatus FLACParser::length_callback(
- const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamDecoder * /* decoder */,
FLAC__uint64 *stream_length, void *client_data)
{
return ((FLACParser *) client_data)->lengthCallback(stream_length);
}
FLAC__bool FLACParser::eof_callback(
- const FLAC__StreamDecoder *decoder, void *client_data)
+ const FLAC__StreamDecoder * /* decoder */, void *client_data)
{
return ((FLACParser *) client_data)->eofCallback();
}
FLAC__StreamDecoderWriteStatus FLACParser::write_callback(
- const FLAC__StreamDecoder *decoder, const FLAC__Frame *frame,
+ const FLAC__StreamDecoder * /* decoder */, const FLAC__Frame *frame,
const FLAC__int32 * const buffer[], void *client_data)
{
return ((FLACParser *) client_data)->writeCallback(frame, buffer);
}
void FLACParser::metadata_callback(
- const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamDecoder * /* decoder */,
const FLAC__StreamMetadata *metadata, void *client_data)
{
((FLACParser *) client_data)->metadataCallback(metadata);
}
void FLACParser::error_callback(
- const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamDecoder * /* decoder */,
FLAC__StreamDecoderErrorStatus status, void *client_data)
{
((FLACParser *) client_data)->errorCallback(status);
@@ -380,15 +380,21 @@ void FLACParser::errorCallback(FLAC__StreamDecoderErrorStatus status)
// Copy samples from FLAC native 32-bit non-interleaved to 16-bit interleaved.
// These are candidates for optimization if needed.
-static void copyMono8(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyMono8(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i] << 8;
}
}
-static void copyStereo8(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyStereo8(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i] << 8;
*dst++ = src[1][i] << 8;
@@ -404,15 +410,21 @@ static void copyMultiCh8(short *dst, const int *const *src, unsigned nSamples, u
}
}
-static void copyMono16(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyMono16(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i];
}
}
-static void copyStereo16(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyStereo16(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i];
*dst++ = src[1][i];
@@ -430,15 +442,21 @@ static void copyMultiCh16(short *dst, const int *const *src, unsigned nSamples,
// 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
-static void copyMono24(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyMono24(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i] >> 8;
}
}
-static void copyStereo24(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyStereo24(
+ short *dst,
+ const int *const *src,
+ unsigned nSamples,
+ unsigned /* nChannels */) {
for (unsigned i = 0; i < nSamples; ++i) {
*dst++ = src[0][i] >> 8;
*dst++ = src[1][i] >> 8;
@@ -454,8 +472,11 @@ static void copyMultiCh24(short *dst, const int *const *src, unsigned nSamples,
}
}
-static void copyTrespass(short *dst, const int *const *src, unsigned nSamples, unsigned nChannels)
-{
+static void copyTrespass(
+ short * /* dst */,
+ const int *const * /* src */,
+ unsigned /* nSamples */,
+ unsigned /* nChannels */) {
TRESPASS();
}
@@ -700,7 +721,7 @@ FLACSource::~FLACSource()
}
}
-status_t FLACSource::start(MetaData *params)
+status_t FLACSource::start(MetaData * /* params */)
{
ALOGV("FLACSource::start");
@@ -792,8 +813,7 @@ sp<MediaSource> FLACExtractor::getTrack(size_t index)
}
sp<MetaData> FLACExtractor::getTrackMetaData(
- size_t index, uint32_t flags)
-{
+ size_t index, uint32_t /* flags */) {
if (mInitCheck != OK || index > 0) {
return NULL;
}
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 5fa4b6f..32291c8 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -20,17 +20,13 @@
#include "include/HTTPBase.h"
-#if CHROMIUM_AVAILABLE
-#include "include/chromium_http_stub.h"
-#endif
-
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <cutils/properties.h>
#include <cutils/qtaguid.h>
-#include <ConnectivityManager.h>
+#include <NetdClient.h>
namespace android {
@@ -40,34 +36,7 @@ HTTPBase::HTTPBase()
mTotalTransferBytes(0),
mPrevBandwidthMeasureTimeUs(0),
mPrevEstimatedBandWidthKbps(0),
- mBandWidthCollectFreqMs(5000),
- mUIDValid(false),
- mUID(0) {
-}
-
-// static
-sp<HTTPBase> HTTPBase::Create(uint32_t flags) {
-#if CHROMIUM_AVAILABLE
- HTTPBase *dataSource = createChromiumHTTPDataSource(flags);
- if (dataSource) {
- return dataSource;
- }
-#endif
- {
- TRESPASS();
-
- return NULL;
- }
-}
-
-// static
-status_t HTTPBase::UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
-#if CHROMIUM_AVAILABLE
- return UpdateChromiumHTTPDataSourceProxyConfig(host, port, exclusionList);
-#else
- return INVALID_OPERATION;
-#endif
+ mBandWidthCollectFreqMs(5000) {
}
void HTTPBase::addBandwidthMeasurement(
@@ -135,21 +104,6 @@ status_t HTTPBase::setBandwidthStatCollectFreq(int32_t freqMs) {
return OK;
}
-void HTTPBase::setUID(uid_t uid) {
- mUIDValid = true;
- mUID = uid;
-}
-
-bool HTTPBase::getUID(uid_t *uid) const {
- if (!mUIDValid) {
- return false;
- }
-
- *uid = mUID;
-
- return true;
-}
-
// static
void HTTPBase::RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag) {
int res = qtaguid_tagSocket(sockfd, kTag, uid);
@@ -168,7 +122,7 @@ void HTTPBase::UnRegisterSocketUserTag(int sockfd) {
// static
void HTTPBase::RegisterSocketUserMark(int sockfd, uid_t uid) {
- ConnectivityManager::markSocketAsUser(sockfd, uid);
+ setNetworkForUser(uid, sockfd);
}
// static
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 380dab4..4a63152 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -398,7 +398,8 @@ sp<MediaSource> MP3Extractor::getTrack(size_t index) {
mSeeker);
}
-sp<MetaData> MP3Extractor::getTrackMetaData(size_t index, uint32_t flags) {
+sp<MetaData> MP3Extractor::getTrackMetaData(
+ size_t index, uint32_t /* flags */) {
if (mInitCheck != OK || index != 0) {
return NULL;
}
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index c9ed5bb..9856f92 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -555,7 +555,7 @@ status_t MPEG2TSWriter::addSource(const sp<MediaSource> &source) {
return OK;
}
-status_t MPEG2TSWriter::start(MetaData *param) {
+status_t MPEG2TSWriter::start(MetaData * /* param */) {
CHECK(!mStarted);
mStarted = true;
@@ -596,7 +596,8 @@ bool MPEG2TSWriter::reachedEOS() {
return !mStarted || (mNumSourcesDone == mSources.size() ? true : false);
}
-status_t MPEG2TSWriter::dump(int fd, const Vector<String16> &args) {
+status_t MPEG2TSWriter::dump(
+ int /* fd */, const Vector<String16> & /* args */) {
return OK;
}
@@ -681,7 +682,7 @@ void MPEG2TSWriter::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- ALOGV("writing access unit at time %.2f secs (index %d)",
+ ALOGV("writing access unit at time %.2f secs (index %zu)",
minTimeUs / 1E6, minIndex);
source = mSources.editItemAt(minIndex);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 4f1c5b3..d922dc0 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -16,17 +16,19 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG4Extractor"
-#include <utils/Log.h>
-
-#include "include/MPEG4Extractor.h"
-#include "include/SampleTable.h"
-#include "include/ESDS.h"
#include <ctype.h>
+#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
+#include <utils/Log.h>
+
+#include "include/MPEG4Extractor.h"
+#include "include/SampleTable.h"
+#include "include/ESDS.h"
+
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -46,11 +48,13 @@ namespace android {
class MPEG4Source : public MediaSource {
public:
// Caller retains ownership of both "dataSource" and "sampleTable".
- MPEG4Source(const sp<MetaData> &format,
+ MPEG4Source(const sp<MPEG4Extractor> &owner,
+ const sp<MetaData> &format,
const sp<DataSource> &dataSource,
int32_t timeScale,
const sp<SampleTable> &sampleTable,
Vector<SidxEntry> &sidx,
+ const Trex *trex,
off64_t firstMoofOffset);
virtual status_t start(MetaData *params = NULL);
@@ -67,6 +71,8 @@ protected:
private:
Mutex mLock;
+ // keep the MPEG4Extractor around, since we're referencing its data
+ sp<MPEG4Extractor> mOwner;
sp<MetaData> mFormat;
sp<DataSource> mDataSource;
int32_t mTimescale;
@@ -74,6 +80,7 @@ private:
uint32_t mCurrentSampleIndex;
uint32_t mCurrentFragmentIndex;
Vector<SidxEntry> &mSegments;
+ const Trex *mTrex;
off64_t mFirstMoofOffset;
off64_t mCurrentMoofOffset;
off64_t mNextMoofOffset;
@@ -95,6 +102,7 @@ private:
uint64_t* mCurrentSampleInfoOffsets;
bool mIsAVC;
+ bool mIsHEVC;
size_t mNALLengthSize;
bool mStarted;
@@ -140,6 +148,7 @@ private:
off64_t offset;
size_t size;
uint32_t duration;
+ int32_t compositionOffset;
uint8_t iv[16];
Vector<size_t> clearsizes;
Vector<size_t> encryptedsizes;
@@ -260,7 +269,7 @@ static void hexdump(const void *_data, size_t size) {
const uint8_t *data = (const uint8_t *)_data;
size_t offset = 0;
while (offset < size) {
- printf("0x%04x ", offset);
+ printf("0x%04zx ", offset);
size_t n = size - offset;
if (n > 16) {
@@ -317,6 +326,9 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('a', 'v', 'c', '1'):
return MEDIA_MIMETYPE_VIDEO_AVC;
+ case FOURCC('h', 'v', 'c', '1'):
+ case FOURCC('h', 'e', 'v', '1'):
+ return MEDIA_MIMETYPE_VIDEO_HEVC;
default:
CHECK(!"should not be here.");
return NULL;
@@ -339,8 +351,7 @@ static bool AdjustChannelsAndRate(uint32_t fourcc, uint32_t *channels, uint32_t
}
MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
- : mSidxDuration(0),
- mMoofOffset(0),
+ : mMoofOffset(0),
mDataSource(source),
mInitCheck(NO_INIT),
mHasVideo(false),
@@ -365,7 +376,7 @@ MPEG4Extractor::~MPEG4Extractor() {
SINF *sinf = mFirstSINF;
while (sinf) {
SINF *next = sinf->next;
- delete sinf->IPMPData;
+ delete[] sinf->IPMPData;
delete sinf;
sinf = next;
}
@@ -405,7 +416,7 @@ size_t MPEG4Extractor::countTracks() {
track = track->next;
}
- ALOGV("MPEG4Extractor::countTracks: %d tracks", n);
+ ALOGV("MPEG4Extractor::countTracks: %zu tracks", n);
return n;
}
@@ -478,8 +489,18 @@ status_t MPEG4Extractor::readMetaData() {
off64_t offset = 0;
status_t err;
while (true) {
+ off64_t orig_offset = offset;
err = parseChunk(&offset, 0);
- if (err == OK) {
+
+ if (err != OK && err != UNKNOWN_ERROR) {
+ break;
+ } else if (offset <= orig_offset) {
+ // only continue parsing if the offset was advanced,
+ // otherwise we might end up in an infinite loop
+ ALOGE("did not advance: 0x%lld->0x%lld", orig_offset, offset);
+ err = ERROR_MALFORMED;
+ break;
+ } else if (err == OK) {
continue;
}
@@ -488,12 +509,12 @@ status_t MPEG4Extractor::readMetaData() {
break;
}
uint32_t chunk_type = ntohl(hdr[1]);
- if (chunk_type == FOURCC('s', 'i', 'd', 'x')) {
- // parse the sidx box too
- continue;
- } else if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
// store the offset of the first segment
mMoofOffset = offset;
+ } else if (chunk_type != FOURCC('m', 'd', 'a', 't')) {
+ // keep parsing until we get to the data
+ continue;
}
break;
}
@@ -505,8 +526,6 @@ status_t MPEG4Extractor::readMetaData() {
} else {
mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
}
-
- mInitCheck = OK;
} else {
mInitCheck = err;
}
@@ -571,7 +590,8 @@ static int32_t readSize(off64_t offset,
return size;
}
-status_t MPEG4Extractor::parseDrmSINF(off64_t *offset, off64_t data_offset) {
+status_t MPEG4Extractor::parseDrmSINF(
+ off64_t * /* offset */, off64_t data_offset) {
uint8_t updateIdTag;
if (mDataSource->readAt(data_offset, &updateIdTag, 1) < 1) {
return ERROR_IO;
@@ -682,7 +702,10 @@ status_t MPEG4Extractor::parseDrmSINF(off64_t *offset, off64_t data_offset) {
return ERROR_MALFORMED;
}
sinf->len = dataLen - 3;
- sinf->IPMPData = new char[sinf->len];
+ sinf->IPMPData = new (std::nothrow) char[sinf->len];
+ if (sinf->IPMPData == NULL) {
+ return ERROR_MALFORMED;
+ }
data_offset += 2;
if (mDataSource->readAt(data_offset, sinf->IPMPData, sinf->len) < sinf->len) {
@@ -757,8 +780,25 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
// The smallest valid chunk is 16 bytes long in this case.
return ERROR_MALFORMED;
}
+ } else if (chunk_size == 0) {
+ if (depth == 0) {
+ // atom extends to end of file
+ off64_t sourceSize;
+ if (mDataSource->getSize(&sourceSize) == OK) {
+ chunk_size = (sourceSize - *offset);
+ } else {
+ // XXX could we just pick a "sufficiently large" value here?
+ ALOGE("atom size is 0, and data source has no size");
+ return ERROR_MALFORMED;
+ }
+ } else {
+ // not allowed for non-toplevel atoms, skip it
+ *offset += 4;
+ return OK;
+ }
} else if (chunk_size < 8) {
// The smallest valid chunk is 8 bytes long.
+ ALOGE("invalid chunk size: %" PRIu64, chunk_size);
return ERROR_MALFORMED;
}
@@ -769,7 +809,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
#if 0
static const char kWhitespace[] = " ";
const char *indent = &kWhitespace[sizeof(kWhitespace) - 1 - 2 * depth];
- printf("%sfound chunk '%s' of size %lld\n", indent, chunk, chunk_size);
+ printf("%sfound chunk '%s' of size %" PRIu64 "\n", indent, chunk, chunk_size);
char buffer[256];
size_t n = chunk_size;
@@ -825,7 +865,7 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('e', 'd', 't', 's'):
{
if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
- ALOGV("sampleTable chunk is %d bytes long.", (size_t)chunk_size);
+ ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
if (mDataSource->flags()
& (DataSource::kWantsPrefetching
@@ -912,6 +952,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('e', 'l', 's', 't'):
{
+ *offset += chunk_size;
+
// See 14496-12 8.6.6
uint8_t version;
if (mDataSource->readAt(data_offset, &version, 1) < 1) {
@@ -974,12 +1016,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeyEncoderPadding, paddingsamples);
}
}
- *offset += chunk_size;
break;
}
case FOURCC('f', 'r', 'm', 'a'):
{
+ *offset += chunk_size;
+
uint32_t original_fourcc;
if (mDataSource->readAt(data_offset, &original_fourcc, 4) < 4) {
return ERROR_IO;
@@ -993,12 +1036,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeyChannelCount, num_channels);
mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
}
- *offset += chunk_size;
break;
}
case FOURCC('t', 'e', 'n', 'c'):
{
+ *offset += chunk_size;
+
if (chunk_size < 32) {
return ERROR_MALFORMED;
}
@@ -1043,23 +1087,25 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
- *offset += chunk_size;
break;
}
case FOURCC('t', 'k', 'h', 'd'):
{
+ *offset += chunk_size;
+
status_t err;
if ((err = parseTrackHeader(data_offset, chunk_data_size)) != OK) {
return err;
}
- *offset += chunk_size;
break;
}
case FOURCC('p', 's', 's', 'h'):
{
+ *offset += chunk_size;
+
PsshInfo pssh;
if (mDataSource->readAt(data_offset + 4, &pssh.uuid, 16) < 16) {
@@ -1077,7 +1123,10 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return ERROR_MALFORMED;
}
- pssh.data = new uint8_t[pssh.datalen];
+ pssh.data = new (std::nothrow) uint8_t[pssh.datalen];
+ if (pssh.data == NULL) {
+ return ERROR_MALFORMED;
+ }
ALOGV("allocated pssh @ %p", pssh.data);
ssize_t requested = (ssize_t) pssh.datalen;
if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
@@ -1085,12 +1134,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
mPssh.push_back(pssh);
- *offset += chunk_size;
break;
}
case FOURCC('m', 'd', 'h', 'd'):
{
+ *offset += chunk_size;
+
if (chunk_data_size < 4) {
return ERROR_MALFORMED;
}
@@ -1121,6 +1171,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->timescale = ntohl(timescale);
+ // 14496-12 says all ones means indeterminate, but some files seem to use
+ // 0 instead. We treat both the same.
int64_t duration = 0;
if (version == 1) {
if (mDataSource->readAt(
@@ -1128,7 +1180,9 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
< (ssize_t)sizeof(duration)) {
return ERROR_IO;
}
- duration = ntoh64(duration);
+ if (duration != -1) {
+ duration = ntoh64(duration);
+ }
} else {
uint32_t duration32;
if (mDataSource->readAt(
@@ -1136,13 +1190,14 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
< (ssize_t)sizeof(duration32)) {
return ERROR_IO;
}
- // ffmpeg sets duration to -1, which is incorrect.
if (duration32 != 0xffffffff) {
duration = ntohl(duration32);
}
}
- mLastTrack->meta->setInt64(
- kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
+ if (duration != 0) {
+ mLastTrack->meta->setInt64(
+ kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
+ }
uint8_t lang[2];
off64_t lang_offset;
@@ -1171,7 +1226,6 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setCString(
kKeyMediaLanguage, lang_code);
- *offset += chunk_size;
break;
}
@@ -1281,6 +1335,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('H', '2', '6', '3'):
case FOURCC('h', '2', '6', '3'):
case FOURCC('a', 'v', 'c', '1'):
+ case FOURCC('h', 'v', 'c', '1'):
+ case FOURCC('h', 'e', 'v', '1'):
{
mHasVideo = true;
@@ -1338,11 +1394,12 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->sampleTable->setChunkOffsetParams(
chunk_type, data_offset, chunk_data_size);
+ *offset += chunk_size;
+
if (err != OK) {
return err;
}
- *offset += chunk_size;
break;
}
@@ -1352,11 +1409,12 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->sampleTable->setSampleToChunkParams(
data_offset, chunk_data_size);
+ *offset += chunk_size;
+
if (err != OK) {
return err;
}
- *offset += chunk_size;
break;
}
@@ -1367,6 +1425,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->sampleTable->setSampleSizeParams(
chunk_type, data_offset, chunk_data_size);
+ *offset += chunk_size;
+
if (err != OK) {
return err;
}
@@ -1407,7 +1467,6 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size);
}
- *offset += chunk_size;
// NOTE: setting another piece of metadata invalidates any pointers (such as the
// mimetype) previously obtained, so don't cache them.
@@ -1431,6 +1490,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('s', 't', 't', 's'):
{
+ *offset += chunk_size;
+
status_t err =
mLastTrack->sampleTable->setTimeToSampleParams(
data_offset, chunk_data_size);
@@ -1439,12 +1500,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return err;
}
- *offset += chunk_size;
break;
}
case FOURCC('c', 't', 't', 's'):
{
+ *offset += chunk_size;
+
status_t err =
mLastTrack->sampleTable->setCompositionTimeToSampleParams(
data_offset, chunk_data_size);
@@ -1453,12 +1515,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return err;
}
- *offset += chunk_size;
break;
}
case FOURCC('s', 't', 's', 's'):
{
+ *offset += chunk_size;
+
status_t err =
mLastTrack->sampleTable->setSyncSampleParams(
data_offset, chunk_data_size);
@@ -1467,13 +1530,14 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
return err;
}
- *offset += chunk_size;
break;
}
// @xyz
case FOURCC('\xA9', 'x', 'y', 'z'):
{
+ *offset += chunk_size;
+
// Best case the total data length inside "@xyz" box
// would be 8, for instance "@xyz" + "\x00\x04\x15\xc7" + "0+0/",
// where "\x00\x04" is the text string length with value = 4,
@@ -1502,12 +1566,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
buffer[location_length] = '\0';
mFileMetaData->setCString(kKeyLocation, buffer);
- *offset += chunk_size;
break;
}
case FOURCC('e', 's', 'd', 's'):
{
+ *offset += chunk_size;
+
if (chunk_data_size < 4) {
return ERROR_MALFORMED;
}
@@ -1545,12 +1610,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
}
- *offset += chunk_size;
break;
}
case FOURCC('a', 'v', 'c', 'C'):
{
+ *offset += chunk_size;
+
sp<ABuffer> buffer = new ABuffer(chunk_data_size);
if (mDataSource->readAt(
@@ -1561,12 +1627,27 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setData(
kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
+ break;
+ }
+ case FOURCC('h', 'v', 'c', 'C'):
+ {
+ sp<ABuffer> buffer = new ABuffer(chunk_data_size);
+
+ if (mDataSource->readAt(
+ data_offset, buffer->data(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ mLastTrack->meta->setData(
+ kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size);
+
*offset += chunk_size;
break;
}
case FOURCC('d', '2', '6', '3'):
{
+ *offset += chunk_size;
/*
* d263 contains a fixed 7 bytes part:
* vendor - 4 bytes
@@ -1592,7 +1673,6 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
- *offset += chunk_size;
break;
}
@@ -1600,11 +1680,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
{
uint8_t buffer[4];
if (chunk_data_size < (off64_t)sizeof(buffer)) {
+ *offset += chunk_size;
return ERROR_MALFORMED;
}
if (mDataSource->readAt(
data_offset, buffer, 4) < 4) {
+ *offset += chunk_size;
return ERROR_IO;
}
@@ -1638,6 +1720,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('n', 'a', 'm', 'e'):
case FOURCC('d', 'a', 't', 'a'):
{
+ *offset += chunk_size;
+
if (mPath.size() == 6 && underMetaDataPath(mPath)) {
status_t err = parseITunesMetaData(data_offset, chunk_data_size);
@@ -1646,17 +1730,18 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
}
- *offset += chunk_size;
break;
}
case FOURCC('m', 'v', 'h', 'd'):
{
- if (chunk_data_size < 24) {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 32) {
return ERROR_MALFORMED;
}
- uint8_t header[24];
+ uint8_t header[32];
if (mDataSource->readAt(
data_offset, header, sizeof(header))
< (ssize_t)sizeof(header)) {
@@ -1664,14 +1749,27 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
}
uint64_t creationTime;
+ uint64_t duration = 0;
if (header[0] == 1) {
creationTime = U64_AT(&header[4]);
mHeaderTimescale = U32_AT(&header[20]);
+ duration = U64_AT(&header[24]);
+ if (duration == 0xffffffffffffffff) {
+ duration = 0;
+ }
} else if (header[0] != 0) {
return ERROR_MALFORMED;
} else {
creationTime = U32_AT(&header[4]);
mHeaderTimescale = U32_AT(&header[12]);
+ uint32_t d32 = U32_AT(&header[16]);
+ if (d32 == 0xffffffff) {
+ d32 = 0;
+ }
+ duration = d32;
+ }
+ if (duration != 0) {
+ mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
}
String8 s;
@@ -1679,7 +1777,50 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mFileMetaData->setCString(kKeyDate, s.string());
+ break;
+ }
+
+ case FOURCC('m', 'e', 'h', 'd'):
+ {
*offset += chunk_size;
+
+ if (chunk_data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t flags[4];
+ if (mDataSource->readAt(
+ data_offset, flags, sizeof(flags))
+ < (ssize_t)sizeof(flags)) {
+ return ERROR_IO;
+ }
+
+ uint64_t duration = 0;
+ if (flags[0] == 1) {
+ // 64 bit
+ if (chunk_data_size < 12) {
+ return ERROR_MALFORMED;
+ }
+ mDataSource->getUInt64(data_offset + 4, &duration);
+ if (duration == 0xffffffffffffffff) {
+ duration = 0;
+ }
+ } else if (flags[0] == 0) {
+ // 32 bit
+ uint32_t d32;
+ mDataSource->getUInt32(data_offset + 4, &d32);
+ if (d32 == 0xffffffff) {
+ d32 = 0;
+ }
+ duration = d32;
+ } else {
+ return ERROR_MALFORMED;
+ }
+
+ if (duration != 0) {
+ mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
+ }
+
break;
}
@@ -1700,6 +1841,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('h', 'd', 'l', 'r'):
{
+ *offset += chunk_size;
+
uint32_t buffer;
if (mDataSource->readAt(
data_offset + 8, &buffer, 4) < 4) {
@@ -1714,7 +1857,26 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP);
}
+ break;
+ }
+
+ case FOURCC('t', 'r', 'e', 'x'):
+ {
*offset += chunk_size;
+
+ if (chunk_data_size < 24) {
+ return ERROR_IO;
+ }
+ uint32_t duration;
+ Trex trex;
+ if (!mDataSource->getUInt32(data_offset + 4, &trex.track_ID) ||
+ !mDataSource->getUInt32(data_offset + 8, &trex.default_sample_description_index) ||
+ !mDataSource->getUInt32(data_offset + 12, &trex.default_sample_duration) ||
+ !mDataSource->getUInt32(data_offset + 16, &trex.default_sample_size) ||
+ !mDataSource->getUInt32(data_offset + 20, &trex.default_sample_flags)) {
+ return ERROR_IO;
+ }
+ mTrex.add(trex);
break;
}
@@ -1728,7 +1890,10 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
size = 0;
}
- uint8_t *buffer = new uint8_t[size + chunk_size];
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
if (size > 0) {
memcpy(buffer, data, size);
@@ -1739,6 +1904,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
delete[] buffer;
buffer = NULL;
+ // advance read pointer so we don't end up reading this again
+ *offset += chunk_size;
return ERROR_IO;
}
@@ -1753,6 +1920,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('c', 'o', 'v', 'r'):
{
+ *offset += chunk_size;
+
if (mFileMetaData != NULL) {
ALOGV("chunk_data_size = %lld and data_offset = %lld",
chunk_data_size, data_offset);
@@ -1767,7 +1936,6 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
buffer->data() + kSkipBytesOfDataBox, chunk_data_size - kSkipBytesOfDataBox);
}
- *offset += chunk_size;
break;
}
@@ -1778,25 +1946,27 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('a', 'l', 'b', 'm'):
case FOURCC('y', 'r', 'r', 'c'):
{
+ *offset += chunk_size;
+
status_t err = parse3GPPMetaData(data_offset, chunk_data_size, depth);
if (err != OK) {
return err;
}
- *offset += chunk_size;
break;
}
case FOURCC('I', 'D', '3', '2'):
{
+ *offset += chunk_size;
+
if (chunk_data_size < 6) {
return ERROR_MALFORMED;
}
parseID3v2MetaData(data_offset + 6);
- *offset += chunk_size;
break;
}
@@ -1888,7 +2058,7 @@ status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
offset += 16;
size -= 16;
}
- ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+ ALOGV("sidx pres/off: %" PRIu64 "/%" PRIu64, earliestPresentationTime, firstOffset);
if (size < 4) {
return -EINVAL;
@@ -1920,9 +2090,10 @@ status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
ALOGW("sub-sidx boxes not supported yet");
}
bool sap = d3 & 0x80000000;
- bool saptype = d3 >> 28;
- if (!sap || saptype > 2) {
- ALOGW("not a stream access point, or unsupported type");
+ uint32_t saptype = (d3 >> 28) & 7;
+ if (!sap || (saptype != 1 && saptype != 2)) {
+ // type 1 and 2 are sync samples
+ ALOGW("not a stream access point, or unsupported type: %08x", d3);
}
total_duration += d2;
offset += 12;
@@ -1933,12 +2104,11 @@ status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
mSidxEntries.add(se);
}
- mSidxDuration = total_duration * 1000000 / timeScale;
- ALOGV("duration: %lld", mSidxDuration);
+ uint64_t sidxDuration = total_duration * 1000000 / timeScale;
int64_t metaDuration;
if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
- mLastTrack->meta->setInt64(kKeyDuration, mSidxDuration);
+ mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
}
return OK;
}
@@ -2039,7 +2209,10 @@ status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
return ERROR_MALFORMED;
}
- uint8_t *buffer = new uint8_t[size + 1];
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
if (mDataSource->readAt(
offset, buffer, size) != (ssize_t)size) {
delete[] buffer;
@@ -2226,7 +2399,10 @@ status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int dept
return ERROR_MALFORMED;
}
- uint8_t *buffer = new uint8_t[size];
+ uint8_t *buffer = new (std::nothrow) uint8_t[size];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
if (mDataSource->readAt(
offset, buffer, size) != (ssize_t)size) {
delete[] buffer;
@@ -2405,11 +2581,24 @@ sp<MediaSource> MPEG4Extractor::getTrack(size_t index) {
return NULL;
}
- ALOGV("getTrack called, pssh: %d", mPssh.size());
- return new MPEG4Source(
+ Trex *trex = NULL;
+ int32_t trackId;
+ if (track->meta->findInt32(kKeyTrackID, &trackId)) {
+ for (size_t i = 0; i < mTrex.size(); i++) {
+ Trex *t = &mTrex.editItemAt(index);
+ if (t->track_ID == (uint32_t) trackId) {
+ trex = t;
+ break;
+ }
+ }
+ }
+
+ ALOGV("getTrack called, pssh: %zu", mPssh.size());
+
+ return new MPEG4Source(this,
track->meta, mDataSource, track->timescale, track->sampleTable,
- mSidxEntries, mMoofOffset);
+ mSidxEntries, trex, mMoofOffset);
}
// static
@@ -2425,6 +2614,11 @@ status_t MPEG4Extractor::verifyTrack(Track *track) {
|| type != kTypeAVCC) {
return ERROR_MALFORMED;
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ if (!track->meta->findData(kKeyHVCC, &type, &data, &size)
+ || type != kTypeHVCC) {
+ return ERROR_MALFORMED;
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
if (!track->meta->findData(kKeyESDS, &type, &data, &size)
@@ -2433,14 +2627,67 @@ status_t MPEG4Extractor::verifyTrack(Track *track) {
}
}
- if (!track->sampleTable->isValid()) {
+ if (track->sampleTable == NULL || !track->sampleTable->isValid()) {
// Make sure we have all the metadata we need.
+ ALOGE("stbl atom missing/invalid.");
return ERROR_MALFORMED;
}
return OK;
}
+typedef enum {
+ //AOT_NONE = -1,
+ //AOT_NULL_OBJECT = 0,
+ //AOT_AAC_MAIN = 1, /**< Main profile */
+ AOT_AAC_LC = 2, /**< Low Complexity object */
+ //AOT_AAC_SSR = 3,
+ //AOT_AAC_LTP = 4,
+ AOT_SBR = 5,
+ //AOT_AAC_SCAL = 6,
+ //AOT_TWIN_VQ = 7,
+ //AOT_CELP = 8,
+ //AOT_HVXC = 9,
+ //AOT_RSVD_10 = 10, /**< (reserved) */
+ //AOT_RSVD_11 = 11, /**< (reserved) */
+ //AOT_TTSI = 12, /**< TTSI Object */
+ //AOT_MAIN_SYNTH = 13, /**< Main Synthetic object */
+ //AOT_WAV_TAB_SYNTH = 14, /**< Wavetable Synthesis object */
+ //AOT_GEN_MIDI = 15, /**< General MIDI object */
+ //AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */
+ AOT_ER_AAC_LC = 17, /**< Error Resilient(ER) AAC Low Complexity */
+ //AOT_RSVD_18 = 18, /**< (reserved) */
+ //AOT_ER_AAC_LTP = 19, /**< Error Resilient(ER) AAC LTP object */
+ AOT_ER_AAC_SCAL = 20, /**< Error Resilient(ER) AAC Scalable object */
+ //AOT_ER_TWIN_VQ = 21, /**< Error Resilient(ER) TwinVQ object */
+ AOT_ER_BSAC = 22, /**< Error Resilient(ER) BSAC object */
+ AOT_ER_AAC_LD = 23, /**< Error Resilient(ER) AAC LowDelay object */
+ //AOT_ER_CELP = 24, /**< Error Resilient(ER) CELP object */
+ //AOT_ER_HVXC = 25, /**< Error Resilient(ER) HVXC object */
+ //AOT_ER_HILN = 26, /**< Error Resilient(ER) HILN object */
+ //AOT_ER_PARA = 27, /**< Error Resilient(ER) Parametric object */
+ //AOT_RSVD_28 = 28, /**< might become SSC */
+ AOT_PS = 29, /**< PS, Parametric Stereo (includes SBR) */
+ //AOT_MPEGS = 30, /**< MPEG Surround */
+
+ AOT_ESCAPE = 31, /**< Signal AOT uses more than 5 bits */
+
+ //AOT_MP3ONMP4_L1 = 32, /**< MPEG-Layer1 in mp4 */
+ //AOT_MP3ONMP4_L2 = 33, /**< MPEG-Layer2 in mp4 */
+ //AOT_MP3ONMP4_L3 = 34, /**< MPEG-Layer3 in mp4 */
+ //AOT_RSVD_35 = 35, /**< might become DST */
+ //AOT_RSVD_36 = 36, /**< might become ALS */
+ //AOT_AAC_SLS = 37, /**< AAC + SLS */
+ //AOT_SLS = 38, /**< SLS */
+ //AOT_ER_AAC_ELD = 39, /**< AAC Enhanced Low Delay */
+
+ //AOT_USAC = 42, /**< USAC */
+ //AOT_SAOC = 43, /**< SAOC */
+ //AOT_LD_MPEGS = 44, /**< Low Delay MPEG Surround */
+
+ //AOT_RSVD50 = 50, /**< Interim AOT for Rsvd50 */
+} AUDIO_OBJECT_TYPE;
+
status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
const void *esds_data, size_t esds_size) {
ESDS esds(esds_data, esds_size);
@@ -2523,7 +2770,7 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
sampleRate = kSamplingRate[freqIndex];
}
- if (objectType == 5 || objectType == 29) { // SBR specific config per 14496-3 table 1.13
+ if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13
uint32_t extFreqIndex = br.getBits(4);
int32_t extSampleRate;
if (extFreqIndex == 15) {
@@ -2541,6 +2788,131 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
// mLastTrack->meta->setInt32(kKeyExtSampleRate, extSampleRate);
}
+ switch (numChannels) {
+ // values defined in 14496-3_2009 amendment-4 Table 1.19 - Channel Configuration
+ case 0:
+ case 1:// FC
+ case 2:// FL FR
+ case 3:// FC, FL FR
+ case 4:// FC, FL FR, RC
+ case 5:// FC, FL FR, SL SR
+ case 6:// FC, FL FR, SL SR, LFE
+ //numChannels already contains the right value
+ break;
+ case 11:// FC, FL FR, SL SR, RC, LFE
+ numChannels = 7;
+ break;
+ case 7: // FC, FCL FCR, FL FR, SL SR, LFE
+ case 12:// FC, FL FR, SL SR, RL RR, LFE
+ case 14:// FC, FL FR, SL SR, LFE, FHL FHR
+ numChannels = 8;
+ break;
+ default:
+ return ERROR_UNSUPPORTED;
+ }
+
+ {
+ if (objectType == AOT_SBR || objectType == AOT_PS) {
+ objectType = br.getBits(5);
+
+ if (objectType == AOT_ESCAPE) {
+ objectType = 32 + br.getBits(6);
+ }
+ }
+ if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC ||
+ objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL ||
+ objectType == AOT_ER_BSAC) {
+ const int32_t frameLengthFlag = br.getBits(1);
+
+ const int32_t dependsOnCoreCoder = br.getBits(1);
+
+ if (dependsOnCoreCoder ) {
+ const int32_t coreCoderDelay = br.getBits(14);
+ }
+
+ int32_t extensionFlag = -1;
+ if (br.numBitsLeft() > 0) {
+ extensionFlag = br.getBits(1);
+ } else {
+ switch (objectType) {
+ // 14496-3 4.5.1.1 extensionFlag
+ case AOT_AAC_LC:
+ extensionFlag = 0;
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_SCAL:
+ case AOT_ER_BSAC:
+ case AOT_ER_AAC_LD:
+ extensionFlag = 1;
+ break;
+ default:
+ TRESPASS();
+ break;
+ }
+ ALOGW("csd missing extension flag; assuming %d for object type %u.",
+ extensionFlag, objectType);
+ }
+
+ if (numChannels == 0) {
+ int32_t channelsEffectiveNum = 0;
+ int32_t channelsNum = 0;
+ const int32_t ElementInstanceTag = br.getBits(4);
+ const int32_t Profile = br.getBits(2);
+ const int32_t SamplingFrequencyIndex = br.getBits(4);
+ const int32_t NumFrontChannelElements = br.getBits(4);
+ const int32_t NumSideChannelElements = br.getBits(4);
+ const int32_t NumBackChannelElements = br.getBits(4);
+ const int32_t NumLfeChannelElements = br.getBits(2);
+ const int32_t NumAssocDataElements = br.getBits(3);
+ const int32_t NumValidCcElements = br.getBits(4);
+
+ const int32_t MonoMixdownPresent = br.getBits(1);
+ if (MonoMixdownPresent != 0) {
+ const int32_t MonoMixdownElementNumber = br.getBits(4);
+ }
+
+ const int32_t StereoMixdownPresent = br.getBits(1);
+ if (StereoMixdownPresent != 0) {
+ const int32_t StereoMixdownElementNumber = br.getBits(4);
+ }
+
+ const int32_t MatrixMixdownIndexPresent = br.getBits(1);
+ if (MatrixMixdownIndexPresent != 0) {
+ const int32_t MatrixMixdownIndex = br.getBits(2);
+ const int32_t PseudoSurroundEnable = br.getBits(1);
+ }
+
+ int i;
+ for (i=0; i < NumFrontChannelElements; i++) {
+ const int32_t FrontElementIsCpe = br.getBits(1);
+ const int32_t FrontElementTagSelect = br.getBits(4);
+ channelsNum += FrontElementIsCpe ? 2 : 1;
+ }
+
+ for (i=0; i < NumSideChannelElements; i++) {
+ const int32_t SideElementIsCpe = br.getBits(1);
+ const int32_t SideElementTagSelect = br.getBits(4);
+ channelsNum += SideElementIsCpe ? 2 : 1;
+ }
+
+ for (i=0; i < NumBackChannelElements; i++) {
+ const int32_t BackElementIsCpe = br.getBits(1);
+ const int32_t BackElementTagSelect = br.getBits(4);
+ channelsNum += BackElementIsCpe ? 2 : 1;
+ }
+ channelsEffectiveNum = channelsNum;
+
+ for (i=0; i < NumLfeChannelElements; i++) {
+ const int32_t LfeElementTagSelect = br.getBits(4);
+ channelsNum += 1;
+ }
+ ALOGV("mpeg4 audio channelsNum = %d", channelsNum);
+ ALOGV("mpeg4 audio channelsEffectiveNum = %d", channelsEffectiveNum);
+ numChannels = channelsNum;
+ }
+ }
+ }
+
if (numChannels == 0) {
return ERROR_UNSUPPORTED;
}
@@ -2571,19 +2943,23 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
////////////////////////////////////////////////////////////////////////////////
MPEG4Source::MPEG4Source(
+ const sp<MPEG4Extractor> &owner,
const sp<MetaData> &format,
const sp<DataSource> &dataSource,
int32_t timeScale,
const sp<SampleTable> &sampleTable,
Vector<SidxEntry> &sidx,
+ const Trex *trex,
off64_t firstMoofOffset)
- : mFormat(format),
+ : mOwner(owner),
+ mFormat(format),
mDataSource(dataSource),
mTimescale(timeScale),
mSampleTable(sampleTable),
mCurrentSampleIndex(0),
mCurrentFragmentIndex(0),
mSegments(sidx),
+ mTrex(trex),
mFirstMoofOffset(firstMoofOffset),
mCurrentMoofOffset(firstMoofOffset),
mCurrentTime(0),
@@ -2592,6 +2968,7 @@ MPEG4Source::MPEG4Source(
mCurrentSampleInfoOffsetsAllocSize(0),
mCurrentSampleInfoOffsets(NULL),
mIsAVC(false),
+ mIsHEVC(false),
mNALLengthSize(0),
mStarted(false),
mGroup(NULL),
@@ -2599,6 +2976,8 @@ MPEG4Source::MPEG4Source(
mWantsNALFragments(false),
mSrcBuffer(NULL) {
+ memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
+
mFormat->findInt32(kKeyCryptoMode, &mCryptoMode);
mDefaultIVSize = 0;
mFormat->findInt32(kKeyCryptoDefaultIVSize, &mDefaultIVSize);
@@ -2616,6 +2995,7 @@ MPEG4Source::MPEG4Source(
CHECK(success);
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+ mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
if (mIsAVC) {
uint32_t type;
@@ -2630,6 +3010,18 @@ MPEG4Source::MPEG4Source(
// The number of bytes used to encode the length of a NAL unit.
mNALLengthSize = 1 + (ptr[4] & 3);
+ } else if (mIsHEVC) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ CHECK(format->findData(kKeyHVCC, &type, &data, &size));
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 7);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+
+ mNALLengthSize = 1 + (ptr[14 + 7] & 3);
}
CHECK(format->findInt32(kKeyTrackID, &mTrackId));
@@ -2668,7 +3060,11 @@ status_t MPEG4Source::start(MetaData *params) {
mGroup->add_buffer(new MediaBuffer(max_size));
- mSrcBuffer = new uint8_t[max_size];
+ mSrcBuffer = new (std::nothrow) uint8_t[max_size];
+ if (mSrcBuffer == NULL) {
+ // file probably specified a bad max size
+ return ERROR_MALFORMED;
+ }
mStarted = true;
@@ -2741,9 +3137,20 @@ status_t MPEG4Source::parseChunk(off64_t *offset) {
}
}
if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
- // *offset points to the mdat box following this moof
- parseChunk(offset); // doesn't actually parse it, just updates offset
- mNextMoofOffset = *offset;
+ // *offset points to the box following this moof. Find the next moof from there.
+
+ while (true) {
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ return ERROR_END_OF_STREAM;
+ }
+ chunk_size = ntohl(hdr[0]);
+ chunk_type = ntohl(hdr[1]);
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ mNextMoofOffset = *offset;
+ break;
+ }
+ *offset += chunk_size;
+ }
}
break;
}
@@ -2802,7 +3209,8 @@ status_t MPEG4Source::parseChunk(off64_t *offset) {
return OK;
}
-status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size) {
+status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(
+ off64_t offset, off64_t /* size */) {
ALOGV("parseSampleAuxiliaryInformationSizes");
// 14496-12 8.7.12
uint8_t version;
@@ -2864,7 +3272,8 @@ status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(off64_t offset, off64
return OK;
}
-status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size) {
+status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(
+ off64_t offset, off64_t /* size */) {
ALOGV("parseSampleAuxiliaryInformationOffsets");
// 14496-12 8.7.13
uint8_t version;
@@ -3140,8 +3549,8 @@ status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
} else if (mTrackFragmentHeaderInfo.mFlags
& TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
- } else {
- sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
+ } else if (mTrex) {
+ sampleDuration = mTrex->default_sample_duration;
}
if (flags & kSampleSizePresent) {
@@ -3168,7 +3577,7 @@ status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
sampleCtsOffset = 0;
}
- if (size < sampleCount * bytesPerSample) {
+ if (size < (off64_t)sampleCount * bytesPerSample) {
return -EINVAL;
}
@@ -3202,7 +3611,7 @@ status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
offset += 4;
}
- ALOGV("adding sample %d at offset 0x%08llx, size %u, duration %u, "
+ ALOGV("adding sample %d at offset 0x%08" PRIx64 ", size %u, duration %u, "
" flags 0x%08x", i + 1,
dataOffset, sampleSize, sampleDuration,
(flags & kFirstSampleFlagsPresent) && i == 0
@@ -3210,6 +3619,7 @@ status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
tmp.offset = dataOffset;
tmp.size = sampleSize;
tmp.duration = sampleDuration;
+ tmp.compositionOffset = sampleCtsOffset;
mCurrentSamples.add(tmp);
dataOffset += sampleSize;
@@ -3281,7 +3691,7 @@ status_t MPEG4Source::read(
uint32_t sampleIndex;
status_t err = mSampleTable->findSampleAtTime(
- seekTimeUs * mTimescale / 1000000,
+ seekTimeUs, 1000000, mTimescale,
&sampleIndex, findFlags);
if (mode == ReadOptions::SEEK_CLOSEST) {
@@ -3343,7 +3753,7 @@ status_t MPEG4Source::read(
off64_t offset;
size_t size;
- uint32_t cts;
+ uint32_t cts, stts;
bool isSyncSample;
bool newBuffer = false;
if (mBuffer == NULL) {
@@ -3351,7 +3761,7 @@ status_t MPEG4Source::read(
status_t err =
mSampleTable->getMetaDataForSample(
- mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample);
+ mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
if (err != OK) {
return err;
@@ -3365,7 +3775,7 @@ status_t MPEG4Source::read(
}
}
- if (!mIsAVC || mWantsNALFragments) {
+ if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -3382,6 +3792,8 @@ status_t MPEG4Source::read(
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data()->setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
mBuffer->meta_data()->setInt64(
@@ -3395,7 +3807,7 @@ status_t MPEG4Source::read(
++mCurrentSampleIndex;
}
- if (!mIsAVC) {
+ if (!mIsAVC && !mIsHEVC) {
*out = mBuffer;
mBuffer = NULL;
@@ -3504,6 +3916,8 @@ status_t MPEG4Source::read(
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data()->setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
mBuffer->meta_data()->setInt64(
@@ -3546,7 +3960,7 @@ status_t MPEG4Source::fragmentedRead(
const SidxEntry *se = &mSegments[i];
if (totalTime + se->mDurationUs > seekTimeUs) {
// The requested time is somewhere in this segment
- if ((mode == ReadOptions::SEEK_NEXT_SYNC) ||
+ if ((mode == ReadOptions::SEEK_NEXT_SYNC && seekTimeUs > totalTime) ||
(mode == ReadOptions::SEEK_CLOSEST_SYNC &&
(seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
// requested next sync, or closest sync and it was closer to the end of
@@ -3559,11 +3973,19 @@ status_t MPEG4Source::fragmentedRead(
totalTime += se->mDurationUs;
totalOffset += se->mSize;
}
- mCurrentMoofOffset = totalOffset;
- mCurrentSamples.clear();
- mCurrentSampleIndex = 0;
- parseChunk(&totalOffset);
- mCurrentTime = totalTime * mTimescale / 1000000ll;
+ mCurrentMoofOffset = totalOffset;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ parseChunk(&totalOffset);
+ mCurrentTime = totalTime * mTimescale / 1000000ll;
+ } else {
+ // without sidx boxes, we can only seek to 0
+ mCurrentMoofOffset = mFirstMoofOffset;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ off64_t tmp = mCurrentMoofOffset;
+ parseChunk(&tmp);
+ mCurrentTime = 0;
}
if (mBuffer != NULL) {
@@ -3575,7 +3997,7 @@ status_t MPEG4Source::fragmentedRead(
}
off64_t offset = 0;
- size_t size;
+ size_t size = 0;
uint32_t cts = 0;
bool isSyncSample = false;
bool newBuffer = false;
@@ -3583,22 +4005,24 @@ status_t MPEG4Source::fragmentedRead(
newBuffer = true;
if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- // move to next fragment
- Sample lastSample = mCurrentSamples[mCurrentSamples.size() - 1];
- off64_t nextMoof = mNextMoofOffset; // lastSample.offset + lastSample.size;
+ // move to next fragment if there is one
+ if (mNextMoofOffset <= mCurrentMoofOffset) {
+ return ERROR_END_OF_STREAM;
+ }
+ off64_t nextMoof = mNextMoofOffset;
mCurrentMoofOffset = nextMoof;
mCurrentSamples.clear();
mCurrentSampleIndex = 0;
parseChunk(&nextMoof);
- if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- return ERROR_END_OF_STREAM;
- }
+ if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+ return ERROR_END_OF_STREAM;
+ }
}
const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
offset = smpl->offset;
size = smpl->size;
- cts = mCurrentTime;
+ cts = mCurrentTime + smpl->compositionOffset;
mCurrentTime += smpl->duration;
isSyncSample = (mCurrentSampleIndex == 0); // XXX
@@ -3626,7 +4050,7 @@ status_t MPEG4Source::fragmentedRead(
bufmeta->setData(kKeyCryptoKey, 0, mCryptoKey, 16);
}
- if (!mIsAVC || mWantsNALFragments) {
+ if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
@@ -3643,6 +4067,8 @@ status_t MPEG4Source::fragmentedRead(
mBuffer->set_range(0, size);
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data()->setInt64(
+ kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
mBuffer->meta_data()->setInt64(
@@ -3656,7 +4082,7 @@ status_t MPEG4Source::fragmentedRead(
++mCurrentSampleIndex;
}
- if (!mIsAVC) {
+ if (!mIsAVC && !mIsHEVC) {
*out = mBuffer;
mBuffer = NULL;
@@ -3766,6 +4192,8 @@ status_t MPEG4Source::fragmentedRead(
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data()->setInt64(
+ kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
mBuffer->meta_data()->setInt64(
@@ -3828,6 +4256,8 @@ static bool isCompatibleBrand(uint32_t fourcc) {
FOURCC('i', 's', 'o', 'm'),
FOURCC('i', 's', 'o', '2'),
FOURCC('a', 'v', 'c', '1'),
+ FOURCC('h', 'v', 'c', '1'),
+ FOURCC('h', 'e', 'v', '1'),
FOURCC('3', 'g', 'p', '4'),
FOURCC('m', 'p', '4', '1'),
FOURCC('m', 'p', '4', '2'),
@@ -3899,7 +4329,7 @@ static bool BetterSniffMPEG4(
char chunkstring[5];
MakeFourCCString(chunkType, chunkstring);
- ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
+ ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, offset);
switch (chunkType) {
case FOURCC('f', 't', 'y', 'p'):
{
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 42885dd..4b8440b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -16,12 +16,17 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG4Writer"
-#include <utils/Log.h>
#include <arpa/inet.h>
-
+#include <fcntl.h>
+#include <inttypes.h>
#include <pthread.h>
#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/Log.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MPEG4Writer.h>
@@ -33,13 +38,15 @@
#include <media/stagefright/Utils.h>
#include <media/mediarecorder.h>
#include <cutils/properties.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
#include "include/ESDS.h"
+#define WARN_UNLESS(condition, message, ...) \
+( (CONDITION(condition)) ? false : ({ \
+ ALOGW("Condition %s failed " message, #condition, ##__VA_ARGS__); \
+ true; \
+}))
+
namespace android {
static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
@@ -408,7 +415,7 @@ status_t MPEG4Writer::dump(
}
status_t MPEG4Writer::Track::dump(
- int fd, const Vector<String16>& args) const {
+ int fd, const Vector<String16>& /* args */) const {
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
@@ -419,7 +426,7 @@ status_t MPEG4Writer::Track::dump(
result.append(buffer);
snprintf(buffer, SIZE, " frames encoded : %d\n", mStszTableEntries->count());
result.append(buffer);
- snprintf(buffer, SIZE, " duration encoded : %lld us\n", mTrackDurationUs);
+ snprintf(buffer, SIZE, " duration encoded : %" PRId64 " us\n", mTrackDurationUs);
result.append(buffer);
::write(fd, result.string(), result.size());
return OK;
@@ -434,7 +441,7 @@ status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
// At most 2 tracks can be supported.
if (mTracks.size() >= 2) {
- ALOGE("Too many tracks (%d) to add", mTracks.size());
+ ALOGE("Too many tracks (%zu) to add", mTracks.size());
return ERROR_UNSUPPORTED;
}
@@ -548,8 +555,8 @@ int64_t MPEG4Writer::estimateMoovBoxSize(int32_t bitRate) {
size = MAX_MOOV_BOX_SIZE;
}
- ALOGI("limits: %lld/%lld bytes/us, bit rate: %d bps and the estimated"
- " moov size %lld bytes",
+ ALOGI("limits: %" PRId64 "/%" PRId64 " bytes/us, bit rate: %d bps and the"
+ " estimated moov size %" PRId64 " bytes",
mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
return factor * size;
}
@@ -585,8 +592,8 @@ status_t MPEG4Writer::start(MetaData *param) {
// If file size is set to be larger than the 32 bit file
// size limit, treat it as an error.
if (mMaxFileSizeLimitBytes > kMax32BitFileSize) {
- ALOGW("32-bit file size limit (%lld bytes) too big. "
- "It is changed to %lld bytes",
+ ALOGW("32-bit file size limit (%" PRId64 " bytes) too big. "
+ "It is changed to %" PRId64 " bytes",
mMaxFileSizeLimitBytes, kMax32BitFileSize);
mMaxFileSizeLimitBytes = kMax32BitFileSize;
}
@@ -847,7 +854,7 @@ status_t MPEG4Writer::reset() {
}
if (mTracks.size() > 1) {
- ALOGD("Duration from tracks range is [%lld, %lld] us",
+ ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us",
minDurationUs, maxDurationUs);
}
@@ -974,13 +981,16 @@ void MPEG4Writer::writeFtypBox(MetaData *param) {
if (param && param->findInt32(kKeyFileType, &fileType) &&
fileType != OUTPUT_FORMAT_MPEG_4) {
writeFourcc("3gp4");
+ writeInt32(0);
+ writeFourcc("isom");
+ writeFourcc("3gp4");
} else {
+ writeFourcc("mp42");
+ writeInt32(0);
writeFourcc("isom");
+ writeFourcc("mp42");
}
- writeInt32(0);
- writeFourcc("isom");
- writeFourcc("3gp4");
endBox();
}
@@ -1311,12 +1321,12 @@ bool MPEG4Writer::reachedEOS() {
}
void MPEG4Writer::setStartTimestampUs(int64_t timeUs) {
- ALOGI("setStartTimestampUs: %lld", timeUs);
+ ALOGI("setStartTimestampUs: %" PRId64, timeUs);
CHECK_GE(timeUs, 0ll);
Mutex::Autolock autoLock(mLock);
if (mStartTimestampUs < 0 || mStartTimestampUs > timeUs) {
mStartTimestampUs = timeUs;
- ALOGI("Earliest track starting time: %lld", mStartTimestampUs);
+ ALOGI("Earliest track starting time: %" PRId64, mStartTimestampUs);
}
}
@@ -1406,7 +1416,7 @@ void MPEG4Writer::Track::addOneSttsTableEntry(
size_t sampleCount, int32_t duration) {
if (duration == 0) {
- ALOGW("0-duration samples found: %d", sampleCount);
+ ALOGW("0-duration samples found: %zu", sampleCount);
}
mSttsTableEntries->add(htonl(sampleCount));
mSttsTableEntries->add(htonl(duration));
@@ -1517,7 +1527,7 @@ void MPEG4Writer::Track::initTrackingProgressStatus(MetaData *params) {
{
int64_t timeUs;
if (params && params->findInt64(kKeyTrackTimeStatus, &timeUs)) {
- ALOGV("Receive request to track progress status for every %lld us", timeUs);
+ ALOGV("Receive request to track progress status for every %" PRId64 " us", timeUs);
mTrackEveryTimeDurationUs = timeUs;
mTrackingProgressStatus = true;
}
@@ -1551,7 +1561,7 @@ void MPEG4Writer::bufferChunk(const Chunk& chunk) {
}
void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
- ALOGV("writeChunkToFile: %lld from %s track",
+ ALOGV("writeChunkToFile: %" PRId64 " from %s track",
chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
int32_t isFirstSample = true;
@@ -1586,7 +1596,7 @@ void MPEG4Writer::writeAllChunks() {
sendSessionSummary();
mChunkInfos.clear();
- ALOGD("%d chunks are written in the last batch", outstandingChunks);
+ ALOGD("%zu chunks are written in the last batch", outstandingChunks);
}
bool MPEG4Writer::findChunkToWrite(Chunk *chunk) {
@@ -1727,7 +1737,7 @@ status_t MPEG4Writer::Track::start(MetaData *params) {
startTimeOffsetUs = kInitialDelayTimeUs;
}
startTimeUs += startTimeOffsetUs;
- ALOGI("Start time offset: %lld us", startTimeOffsetUs);
+ ALOGI("Start time offset: %" PRId64 " us", startTimeOffsetUs);
}
meta->setInt64(kKeyTime, startTimeUs);
@@ -1762,7 +1772,7 @@ status_t MPEG4Writer::Track::pause() {
}
status_t MPEG4Writer::Track::stop() {
- ALOGD("Stopping %s track", mIsAudio? "Audio": "Video");
+ ALOGD("%s track stopping", mIsAudio? "Audio": "Video");
if (!mStarted) {
ALOGE("Stop() called but track is not started");
return ERROR_END_OF_STREAM;
@@ -1773,18 +1783,13 @@ status_t MPEG4Writer::Track::stop() {
}
mDone = true;
+ ALOGD("%s track source stopping", mIsAudio? "Audio": "Video");
+ mSource->stop();
+ ALOGD("%s track source stopped", mIsAudio? "Audio": "Video");
+
void *dummy;
pthread_join(mThread, &dummy);
-
- status_t err = (status_t) dummy;
-
- ALOGD("Stopping %s track source", mIsAudio? "Audio": "Video");
- {
- status_t status = mSource->stop();
- if (err == OK && status != OK && status != ERROR_END_OF_STREAM) {
- err = status;
- }
- }
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
return err;
@@ -1799,7 +1804,7 @@ void *MPEG4Writer::Track::ThreadWrapper(void *me) {
Track *track = static_cast<Track *>(me);
status_t err = track->threadEntry();
- return (void *) err;
+ return (void *)(uintptr_t)err;
}
static void getNalUnitType(uint8_t byte, uint8_t* type) {
@@ -1812,7 +1817,7 @@ static void getNalUnitType(uint8_t byte, uint8_t* type) {
static const uint8_t *findNextStartCode(
const uint8_t *data, size_t length) {
- ALOGV("findNextStartCode: %p %d", data, length);
+ ALOGV("findNextStartCode: %p %zu", data, length);
size_t bytesLeft = length;
while (bytesLeft > 4 &&
@@ -1871,7 +1876,7 @@ status_t MPEG4Writer::Track::copyAVCCodecSpecificData(
// 2 bytes for each of the parameter set length field
// plus the 7 bytes for the header
if (size < 4 + 7) {
- ALOGE("Codec specific data length too short: %d", size);
+ ALOGE("Codec specific data length too short: %zu", size);
return ERROR_MALFORMED;
}
@@ -1940,7 +1945,7 @@ status_t MPEG4Writer::Track::parseAVCCodecSpecificData(
}
if (nSeqParamSets > 0x1F) {
- ALOGE("Too many seq parameter sets (%d) found", nSeqParamSets);
+ ALOGE("Too many seq parameter sets (%zu) found", nSeqParamSets);
return ERROR_MALFORMED;
}
}
@@ -1953,7 +1958,7 @@ status_t MPEG4Writer::Track::parseAVCCodecSpecificData(
return ERROR_MALFORMED;
}
if (nPicParamSets > 0xFF) {
- ALOGE("Too many pic parameter sets (%d) found", nPicParamSets);
+ ALOGE("Too many pic parameter sets (%zd) found", nPicParamSets);
return ERROR_MALFORMED;
}
}
@@ -1983,7 +1988,7 @@ status_t MPEG4Writer::Track::makeAVCCodecSpecificData(
}
if (size < 4) {
- ALOGE("Codec specific data length too short: %d", size);
+ ALOGE("Codec specific data length too short: %zu", size);
return ERROR_MALFORMED;
}
@@ -2099,6 +2104,7 @@ status_t MPEG4Writer::Track::threadEntry() {
status_t err = OK;
MediaBuffer *buffer;
+ const char *trackName = mIsAudio ? "Audio" : "Video";
while (!mDone && (err = mSource->read(&buffer)) == OK) {
if (buffer->range_length() == 0) {
buffer->release();
@@ -2194,15 +2200,27 @@ status_t MPEG4Writer::Track::threadEntry() {
if (mResumed) {
int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- CHECK_GE(durExcludingEarlierPausesUs, 0ll);
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- CHECK_GE(pausedDurationUs, lastDurationUs);
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
previousPausedDurationUs += pausedDurationUs - lastDurationUs;
mResumed = false;
}
timestampUs -= previousPausedDurationUs;
- CHECK_GE(timestampUs, 0ll);
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
if (!mIsAudio) {
/*
* Composition time: timestampUs
@@ -2214,15 +2232,23 @@ status_t MPEG4Writer::Track::threadEntry() {
decodingTimeUs -= previousPausedDurationUs;
cttsOffsetTimeUs =
timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- CHECK_GE(cttsOffsetTimeUs, 0ll);
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
timestampUs = decodingTimeUs;
- ALOGV("decoding time: %lld and ctts offset time: %lld",
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
timestampUs, cttsOffsetTimeUs);
// Update ctts box table if necessary
currCttsOffsetTimeTicks =
(cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- CHECK_LE(currCttsOffsetTimeTicks, 0x0FFFFFFFFLL);
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
if (mStszTableEntries->count() == 0) {
// Force the first ctts table entry to have one single entry
// so that we can do adjustment for the initial track start
@@ -2260,9 +2286,13 @@ status_t MPEG4Writer::Track::threadEntry() {
}
}
- CHECK_GE(timestampUs, 0ll);
- ALOGV("%s media time stamp: %lld and previous paused duration %lld",
- mIsAudio? "Audio": "Video", timestampUs, previousPausedDurationUs);
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
if (timestampUs > mTrackDurationUs) {
mTrackDurationUs = timestampUs;
}
@@ -2276,11 +2306,28 @@ status_t MPEG4Writer::Track::threadEntry() {
((timestampUs * mTimeScale + 500000LL) / 1000000LL -
(lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
if (currDurationTicks < 0ll) {
- ALOGE("timestampUs %lld < lastTimestampUs %lld for %s track",
- timestampUs, lastTimestampUs, mIsAudio? "Audio": "Video");
+ ALOGE("timestampUs %" PRId64 " < lastTimestampUs %" PRId64 " for %s track",
+ timestampUs, lastTimestampUs, trackName);
+ copy->release();
return UNKNOWN_ERROR;
}
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+
mStszTableEntries->add(htonl(sampleSize));
if (mStszTableEntries->count() > 2) {
@@ -2300,8 +2347,8 @@ status_t MPEG4Writer::Track::threadEntry() {
}
previousSampleSize = sampleSize;
}
- ALOGV("%s timestampUs/lastTimestampUs: %lld/%lld",
- mIsAudio? "Audio": "Video", timestampUs, lastTimestampUs);
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
lastDurationUs = timestampUs - lastTimestampUs;
lastDurationTicks = currDurationTicks;
lastTimestampUs = timestampUs;
@@ -2406,9 +2453,9 @@ status_t MPEG4Writer::Track::threadEntry() {
sendTrackSummary(hasMultipleTracks);
ALOGI("Received total/0-length (%d/%d) buffers and encoded %d frames. - %s",
- count, nZeroLengthFrames, mStszTableEntries->count(), mIsAudio? "audio": "video");
+ count, nZeroLengthFrames, mStszTableEntries->count(), trackName);
if (mIsAudio) {
- ALOGI("Audio track drift time: %lld us", mOwner->getDriftTimeUs());
+ ALOGI("Audio track drift time: %" PRId64 " us", mOwner->getDriftTimeUs());
}
if (err == ERROR_END_OF_STREAM) {
@@ -2491,11 +2538,11 @@ void MPEG4Writer::Track::sendTrackSummary(bool hasMultipleTracks) {
}
void MPEG4Writer::Track::trackProgressStatus(int64_t timeUs, status_t err) {
- ALOGV("trackProgressStatus: %lld us", timeUs);
+ ALOGV("trackProgressStatus: %" PRId64 " us", timeUs);
if (mTrackEveryTimeDurationUs > 0 &&
timeUs - mPreviousTrackTimeUs >= mTrackEveryTimeDurationUs) {
- ALOGV("Fire time tracking progress status at %lld us", timeUs);
+ ALOGV("Fire time tracking progress status at %" PRId64 " us", timeUs);
mOwner->trackProgressStatus(mTrackId, timeUs - mPreviousTrackTimeUs, err);
mPreviousTrackTimeUs = timeUs;
}
@@ -2529,13 +2576,13 @@ void MPEG4Writer::trackProgressStatus(
}
void MPEG4Writer::setDriftTimeUs(int64_t driftTimeUs) {
- ALOGV("setDriftTimeUs: %lld us", driftTimeUs);
+ ALOGV("setDriftTimeUs: %" PRId64 " us", driftTimeUs);
Mutex::Autolock autolock(mLock);
mDriftTimeUs = driftTimeUs;
}
int64_t MPEG4Writer::getDriftTimeUs() {
- ALOGV("getDriftTimeUs: %lld us", mDriftTimeUs);
+ ALOGV("getDriftTimeUs: %" PRId64 " us", mDriftTimeUs);
Mutex::Autolock autolock(mLock);
return mDriftTimeUs;
}
@@ -2991,7 +3038,7 @@ void MPEG4Writer::Track::writeCttsBox() {
return;
}
- ALOGV("ctts box has %d entries with range [%lld, %lld]",
+ ALOGV("ctts box has %d entries with range [%" PRId64 ", %" PRId64 "]",
mCttsTableEntries->count(), mMinCttsOffsetTimeUs, mMaxCttsOffsetTimeUs);
mOwner->beginBox("ctts");
diff --git a/media/libstagefright/MediaAdapter.cpp b/media/libstagefright/MediaAdapter.cpp
index 2484212..d680e0c 100644
--- a/media/libstagefright/MediaAdapter.cpp
+++ b/media/libstagefright/MediaAdapter.cpp
@@ -36,7 +36,7 @@ MediaAdapter::~MediaAdapter() {
CHECK(mCurrentMediaBuffer == NULL);
}
-status_t MediaAdapter::start(MetaData *params) {
+status_t MediaAdapter::start(MetaData * /* params */) {
Mutex::Autolock autoLock(mAdapterLock);
if (!mStarted) {
mStarted = true;
@@ -75,7 +75,7 @@ void MediaAdapter::signalBufferReturned(MediaBuffer *buffer) {
}
status_t MediaAdapter::read(
- MediaBuffer **buffer, const ReadOptions *options) {
+ MediaBuffer **buffer, const ReadOptions * /* options */) {
Mutex::Autolock autoLock(mAdapterLock);
if (!mStarted) {
ALOGV("Read before even started!");
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp
index 11b80bf..1f80a47 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/MediaBuffer.cpp
@@ -27,7 +27,6 @@
#include <media/stagefright/MetaData.h>
#include <ui/GraphicBuffer.h>
-#include <sys/atomics.h>
namespace android {
@@ -92,7 +91,7 @@ void MediaBuffer::release() {
return;
}
- int prevCount = __atomic_dec(&mRefCount);
+ int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
if (prevCount == 1) {
if (mObserver == NULL) {
delete this;
@@ -112,7 +111,7 @@ void MediaBuffer::claim() {
}
void MediaBuffer::add_ref() {
- (void) __atomic_inc(&mRefCount);
+ (void) __sync_fetch_and_add(&mRefCount, 1);
}
void *MediaBuffer::data() const {
@@ -135,7 +134,7 @@ size_t MediaBuffer::range_length() const {
void MediaBuffer::set_range(size_t offset, size_t length) {
if ((mGraphicBuffer == NULL) && (offset + length > mSize)) {
- ALOGE("offset = %d, length = %d, mSize = %d", offset, length, mSize);
+ ALOGE("offset = %zu, length = %zu, mSize = %zu", offset, length, mSize);
}
CHECK((mGraphicBuffer != NULL) || (offset + length <= mSize));
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
index 80aae51..6ac6d4a 100644
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ b/media/libstagefright/MediaBufferGroup.cpp
@@ -55,7 +55,8 @@ void MediaBufferGroup::add_buffer(MediaBuffer *buffer) {
mLastBuffer = buffer;
}
-status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
+status_t MediaBufferGroup::acquire_buffer(
+ MediaBuffer **out, bool nonBlocking) {
Mutex::Autolock autoLock(mLock);
for (;;) {
@@ -70,6 +71,11 @@ status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
}
}
+ if (nonBlocking) {
+ *out = NULL;
+ return WOULD_BLOCK;
+ }
+
// All buffers are in use. Block until one of them is returned to us.
mCondition.wait(mLock);
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 8af1aaf..df47bd5 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -16,12 +16,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaCodec"
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaCodec.h>
+#include <inttypes.h>
+#include "include/avc_utils.h"
#include "include/SoftwareRenderer.h"
+#include <binder/IBatteryStats.h>
+#include <binder/IServiceManager.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -31,45 +32,119 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/NativeWindowWrapper.h>
-
-#include "include/avc_utils.h"
+#include <private/android_filesystem_config.h>
+#include <utils/Log.h>
+#include <utils/Singleton.h>
namespace android {
+struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
+ BatteryNotifier();
+
+ void noteStartVideo();
+ void noteStopVideo();
+ void noteStartAudio();
+ void noteStopAudio();
+
+private:
+ int32_t mVideoRefCount;
+ int32_t mAudioRefCount;
+ sp<IBatteryStats> mBatteryStatService;
+};
+
+ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
+
+MediaCodec::BatteryNotifier::BatteryNotifier() :
+ mVideoRefCount(0),
+ mAudioRefCount(0) {
+ // get battery service
+ const sp<IServiceManager> sm(defaultServiceManager());
+ if (sm != NULL) {
+ const String16 name("batterystats");
+ mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
+ if (mBatteryStatService == NULL) {
+ ALOGE("batterystats service unavailable!");
+ }
+ }
+}
+
+void MediaCodec::BatteryNotifier::noteStartVideo() {
+ if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+ mBatteryStatService->noteStartVideo(AID_MEDIA);
+ }
+ mVideoRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopVideo() {
+ if (mVideoRefCount == 0) {
+ ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
+ return;
+ }
+
+ mVideoRefCount--;
+ if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
+ mBatteryStatService->noteStopVideo(AID_MEDIA);
+ }
+}
+
+void MediaCodec::BatteryNotifier::noteStartAudio() {
+ if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+ mBatteryStatService->noteStartAudio(AID_MEDIA);
+ }
+ mAudioRefCount++;
+}
+
+void MediaCodec::BatteryNotifier::noteStopAudio() {
+ if (mAudioRefCount == 0) {
+ ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
+ return;
+ }
+
+ mAudioRefCount--;
+ if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
+ mBatteryStatService->noteStopAudio(AID_MEDIA);
+ }
+}
// static
sp<MediaCodec> MediaCodec::CreateByType(
- const sp<ALooper> &looper, const char *mime, bool encoder) {
+ const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err) {
sp<MediaCodec> codec = new MediaCodec(looper);
- if (codec->init(mime, true /* nameIsType */, encoder) != OK) {
- return NULL;
- }
- return codec;
+ const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
+ if (err != NULL) {
+ *err = ret;
+ }
+ return ret == OK ? codec : NULL; // NULL deallocates codec.
}
// static
sp<MediaCodec> MediaCodec::CreateByComponentName(
- const sp<ALooper> &looper, const char *name) {
+ const sp<ALooper> &looper, const char *name, status_t *err) {
sp<MediaCodec> codec = new MediaCodec(looper);
- if (codec->init(name, false /* nameIsType */, false /* encoder */) != OK) {
- return NULL;
- }
- return codec;
+ const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
+ if (err != NULL) {
+ *err = ret;
+ }
+ return ret == OK ? codec : NULL; // NULL deallocates codec.
}
MediaCodec::MediaCodec(const sp<ALooper> &looper)
: mState(UNINITIALIZED),
mLooper(looper),
- mCodec(new ACodec),
+ mCodec(NULL),
mReplyID(0),
mFlags(0),
+ mStickyError(OK),
mSoftRenderer(NULL),
+ mBatteryStatNotified(false),
+ mIsVideo(false),
mDequeueInputTimeoutGeneration(0),
mDequeueInputReplyID(0),
mDequeueOutputTimeoutGeneration(0),
@@ -97,29 +172,42 @@ status_t MediaCodec::PostAndAwaitResponse(
return err;
}
-status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) {
+// static
+void MediaCodec::PostReplyWithError(int32_t replyID, int32_t err) {
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+}
+
+status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
+ // save init parameters for reset
+ mInitName = name;
+ mInitNameIsType = nameIsType;
+ mInitIsEncoder = encoder;
+
// Current video decoders do not return from OMX_FillThisBuffer
// quickly, violating the OpenMAX specs, until that is remedied
// we need to invest in an extra looper to free the main event
// queue.
+ mCodec = new ACodec;
bool needDedicatedLooper = false;
- if (nameIsType && !strncasecmp(name, "video/", 6)) {
+ if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
needDedicatedLooper = true;
} else {
AString tmp = name;
if (tmp.endsWith(".secure")) {
tmp.erase(tmp.size() - 7, 7);
}
- const MediaCodecList *mcl = MediaCodecList::getInstance();
+ const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
ssize_t codecIdx = mcl->findCodecByName(tmp.c_str());
if (codecIdx >= 0) {
- Vector<AString> types;
- if (mcl->getSupportedTypes(codecIdx, &types) == OK) {
- for (int i = 0; i < types.size(); i++) {
- if (types[i].startsWith("video/")) {
- needDedicatedLooper = true;
- break;
- }
+ const sp<MediaCodecInfo> info = mcl->getCodecInfo(codecIdx);
+ Vector<AString> mimes;
+ info->getSupportedMimes(&mimes);
+ for (size_t i = 0; i < mimes.size(); i++) {
+ if (mimes[i].startsWith("video/")) {
+ needDedicatedLooper = true;
+ break;
}
}
}
@@ -153,6 +241,14 @@ status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) {
return PostAndAwaitResponse(msg, &response);
}
+status_t MediaCodec::setCallback(const sp<AMessage> &callback) {
+ sp<AMessage> msg = new AMessage(kWhatSetCallback, id());
+ msg->setMessage("callback", callback);
+
+ sp<AMessage> response;
+ return PostAndAwaitResponse(msg, &response);
+}
+
status_t MediaCodec::configure(
const sp<AMessage> &format,
const sp<Surface> &nativeWindow,
@@ -174,7 +270,20 @@ status_t MediaCodec::configure(
}
sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
+ status_t err = PostAndAwaitResponse(msg, &response);
+
+ if (err != OK && err != INVALID_OPERATION) {
+ // MediaCodec now set state to UNINITIALIZED upon any fatal error.
+ // To maintain backward-compatibility, do a reset() to put codec
+ // back into INITIALIZED state.
+ // But don't reset if the err is INVALID_OPERATION, which means
+ // the configure failure is due to wrong state.
+
+ ALOGE("configure failed with err 0x%08x, resetting...", err);
+ reset();
+ }
+
+ return err;
}
status_t MediaCodec::createInputSurface(
@@ -218,6 +327,41 @@ status_t MediaCodec::release() {
return PostAndAwaitResponse(msg, &response);
}
+status_t MediaCodec::reset() {
+ /* When external-facing MediaCodec object is created,
+ it is already initialized. Thus, reset is essentially
+ release() followed by init(), plus clearing the state */
+
+ status_t err = release();
+
+ // unregister handlers
+ if (mCodec != NULL) {
+ if (mCodecLooper != NULL) {
+ mCodecLooper->unregisterHandler(mCodec->id());
+ } else {
+ mLooper->unregisterHandler(mCodec->id());
+ }
+ mCodec = NULL;
+ }
+ mLooper->unregisterHandler(id());
+
+ mFlags = 0; // clear all flags
+ mStickyError = OK;
+
+ // reset state not reset by setState(UNINITIALIZED)
+ mReplyID = 0;
+ mDequeueInputReplyID = 0;
+ mDequeueOutputReplyID = 0;
+ mDequeueInputTimeoutGeneration = 0;
+ mDequeueOutputTimeoutGeneration = 0;
+ mHaveInputSurface = false;
+
+ if (err == OK) {
+ err = init(mInitName, mInitNameIsType, mInitIsEncoder);
+ }
+ return err;
+}
+
status_t MediaCodec::queueInputBuffer(
size_t index,
size_t offset,
@@ -323,6 +467,16 @@ status_t MediaCodec::renderOutputBufferAndRelease(size_t index) {
return PostAndAwaitResponse(msg, &response);
}
+status_t MediaCodec::renderOutputBufferAndRelease(size_t index, int64_t timestampNs) {
+ sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+ msg->setSize("index", index);
+ msg->setInt32("render", true);
+ msg->setInt64("timestampNs", timestampNs);
+
+ sp<AMessage> response;
+ return PostAndAwaitResponse(msg, &response);
+}
+
status_t MediaCodec::releaseOutputBuffer(size_t index) {
sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
msg->setSize("index", index);
@@ -352,6 +506,20 @@ status_t MediaCodec::getOutputFormat(sp<AMessage> *format) const {
return OK;
}
+status_t MediaCodec::getInputFormat(sp<AMessage> *format) const {
+ sp<AMessage> msg = new AMessage(kWhatGetInputFormat, id());
+
+ sp<AMessage> response;
+ status_t err;
+ if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+ return err;
+ }
+
+ CHECK(response->findMessage("format", format));
+
+ return OK;
+}
+
status_t MediaCodec::getName(AString *name) const {
sp<AMessage> msg = new AMessage(kWhatGetName, id());
@@ -384,6 +552,55 @@ status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const {
return PostAndAwaitResponse(msg, &response);
}
+status_t MediaCodec::getOutputBuffer(size_t index, sp<ABuffer> *buffer) {
+ sp<AMessage> format;
+ return getBufferAndFormat(kPortIndexOutput, index, buffer, &format);
+}
+
+status_t MediaCodec::getOutputFormat(size_t index, sp<AMessage> *format) {
+ sp<ABuffer> buffer;
+ return getBufferAndFormat(kPortIndexOutput, index, &buffer, format);
+}
+
+status_t MediaCodec::getInputBuffer(size_t index, sp<ABuffer> *buffer) {
+ sp<AMessage> format;
+ return getBufferAndFormat(kPortIndexInput, index, buffer, &format);
+}
+
+bool MediaCodec::isExecuting() const {
+ return mState == STARTED || mState == FLUSHED;
+}
+
+status_t MediaCodec::getBufferAndFormat(
+ size_t portIndex, size_t index,
+ sp<ABuffer> *buffer, sp<AMessage> *format) {
+ // use mutex instead of a context switch
+
+ buffer->clear();
+ format->clear();
+ if (!isExecuting()) {
+ return INVALID_OPERATION;
+ }
+
+ // we do not want mPortBuffers to change during this section
+ // we also don't want mOwnedByClient to change during this
+ Mutex::Autolock al(mBufferLock);
+ Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+ if (index < buffers->size()) {
+ const BufferInfo &info = buffers->itemAt(index);
+ if (info.mOwnedByClient) {
+ // by the time buffers array is initialized, crypto is set
+ if (portIndex == kPortIndexInput && mCrypto != NULL) {
+ *buffer = info.mEncryptedData;
+ } else {
+ *buffer = info.mData;
+ }
+ *format = info.mFormat;
+ }
+ }
+ return OK;
+}
+
status_t MediaCodec::flush() {
sp<AMessage> msg = new AMessage(kWhatFlush, id());
@@ -407,9 +624,7 @@ void MediaCodec::requestActivityNotification(const sp<AMessage> &notify) {
void MediaCodec::cancelPendingDequeueOperations() {
if (mFlags & kFlagDequeueInputPending) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
- response->postReply(mDequeueInputReplyID);
+ PostReplyWithError(mDequeueInputReplyID, INVALID_OPERATION);
++mDequeueInputTimeoutGeneration;
mDequeueInputReplyID = 0;
@@ -417,9 +632,7 @@ void MediaCodec::cancelPendingDequeueOperations() {
}
if (mFlags & kFlagDequeueOutputPending) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
- response->postReply(mDequeueOutputReplyID);
+ PostReplyWithError(mDequeueOutputReplyID, INVALID_OPERATION);
++mDequeueOutputTimeoutGeneration;
mDequeueOutputReplyID = 0;
@@ -428,14 +641,12 @@ void MediaCodec::cancelPendingDequeueOperations() {
}
bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
- if (mState != STARTED
- || (mFlags & kFlagStickyError)
+ if (!isExecuting() || (mFlags & kFlagIsAsync)
|| (newRequest && (mFlags & kFlagDequeueInputPending))) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
-
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ return true;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
return true;
}
@@ -456,10 +667,11 @@ bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
bool MediaCodec::handleDequeueOutputBuffer(uint32_t replyID, bool newRequest) {
sp<AMessage> response = new AMessage;
- if (mState != STARTED
- || (mFlags & kFlagStickyError)
+ if (!isExecuting() || (mFlags & kFlagIsAsync)
|| (newRequest && (mFlags & kFlagDequeueOutputPending))) {
response->setInt32("err", INVALID_OPERATION);
+ } else if (mFlags & kFlagStickyError) {
+ response->setInt32("err", getStickyError());
} else if (mFlags & kFlagOutputBuffersChanged) {
response->setInt32("err", INFO_OUTPUT_BUFFERS_CHANGED);
mFlags &= ~kFlagOutputBuffersChanged;
@@ -516,22 +728,20 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findInt32("what", &what));
switch (what) {
- case ACodec::kWhatError:
+ case CodecBase::kWhatError:
{
- int32_t omxError, internalError;
- CHECK(msg->findInt32("omx-error", &omxError));
- CHECK(msg->findInt32("err", &internalError));
-
- ALOGE("Codec reported an error. "
- "(omx error 0x%08x, internalError %d)",
- omxError, internalError);
+ int32_t err, actionCode;
+ CHECK(msg->findInt32("err", &err));
+ CHECK(msg->findInt32("actionCode", &actionCode));
- if (omxError == OMX_ErrorResourcesLost
- && internalError == DEAD_OBJECT) {
+ ALOGE("Codec reported err %#x, actionCode %d, while in state %d",
+ err, actionCode, mState);
+ if (err == DEAD_OBJECT) {
mFlags |= kFlagSawMediaServerDie;
+ mFlags &= ~kFlagIsComponentAllocated;
}
- bool sendErrorReponse = true;
+ bool sendErrorResponse = true;
switch (mState) {
case INITIALIZING:
@@ -542,13 +752,15 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
case CONFIGURING:
{
- setState(INITIALIZED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : INITIALIZED);
break;
}
case STARTING:
{
- setState(CONFIGURED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : CONFIGURED);
break;
}
@@ -558,7 +770,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// Ignore the error, assuming we'll still get
// the shutdown complete notification.
- sendErrorReponse = false;
+ sendErrorResponse = false;
if (mFlags & kFlagSawMediaServerDie) {
// MediaServer died, there definitely won't
@@ -569,7 +781,9 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// STOPPING->UNINITIALIZED, instead of the
// usual STOPPING->INITIALIZED state.
setState(UNINITIALIZED);
-
+ if (mState == RELEASING) {
+ mComponentName.clear();
+ }
(new AMessage)->postReply(mReplyID);
}
break;
@@ -577,44 +791,80 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
case FLUSHING:
{
- setState(STARTED);
+ if (actionCode == ACTION_CODE_FATAL) {
+ setState(UNINITIALIZED);
+ } else {
+ setState(
+ (mFlags & kFlagIsAsync) ? FLUSHED : STARTED);
+ }
break;
}
+ case FLUSHED:
case STARTED:
{
- sendErrorReponse = false;
+ sendErrorResponse = false;
- mFlags |= kFlagStickyError;
+ setStickyError(err);
postActivityNotificationIfPossible();
cancelPendingDequeueOperations();
+
+ if (mFlags & kFlagIsAsync) {
+ onError(err, actionCode);
+ }
+ switch (actionCode) {
+ case ACTION_CODE_TRANSIENT:
+ break;
+ case ACTION_CODE_RECOVERABLE:
+ setState(INITIALIZED);
+ break;
+ default:
+ setState(UNINITIALIZED);
+ break;
+ }
break;
}
default:
{
- sendErrorReponse = false;
+ sendErrorResponse = false;
- mFlags |= kFlagStickyError;
+ setStickyError(err);
postActivityNotificationIfPossible();
+
+ // actionCode in an uninitialized state is always fatal.
+ if (mState == UNINITIALIZED) {
+ actionCode = ACTION_CODE_FATAL;
+ }
+ if (mFlags & kFlagIsAsync) {
+ onError(err, actionCode);
+ }
+ switch (actionCode) {
+ case ACTION_CODE_TRANSIENT:
+ break;
+ case ACTION_CODE_RECOVERABLE:
+ setState(INITIALIZED);
+ break;
+ default:
+ setState(UNINITIALIZED);
+ break;
+ }
break;
}
}
- if (sendErrorReponse) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", UNKNOWN_ERROR);
-
- response->postReply(mReplyID);
+ if (sendErrorResponse) {
+ PostReplyWithError(mReplyID, err);
}
break;
}
- case ACodec::kWhatComponentAllocated:
+ case CodecBase::kWhatComponentAllocated:
{
CHECK_EQ(mState, INITIALIZING);
setState(INITIALIZED);
+ mFlags |= kFlagIsComponentAllocated;
CHECK(msg->findString("componentName", &mComponentName));
@@ -634,21 +884,24 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatComponentConfigured:
+ case CodecBase::kWhatComponentConfigured:
{
CHECK_EQ(mState, CONFIGURING);
- setState(CONFIGURED);
// reset input surface flag
mHaveInputSurface = false;
+ CHECK(msg->findMessage("input-format", &mInputFormat));
+ CHECK(msg->findMessage("output-format", &mOutputFormat));
+
+ setState(CONFIGURED);
(new AMessage)->postReply(mReplyID);
break;
}
- case ACodec::kWhatInputSurfaceCreated:
+ case CodecBase::kWhatInputSurfaceCreated:
{
- // response to ACodec::kWhatCreateInputSurface
+ // response to initiateCreateInputSurface()
status_t err = NO_ERROR;
sp<AMessage> response = new AMessage();
if (!msg->findInt32("err", &err)) {
@@ -664,9 +917,9 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatSignaledInputEOS:
+ case CodecBase::kWhatSignaledInputEOS:
{
- // response to ACodec::kWhatSignalEndOfInputStream
+ // response to signalEndOfInputStream()
sp<AMessage> response = new AMessage();
status_t err;
if (msg->findInt32("err", &err)) {
@@ -677,8 +930,9 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
}
- case ACodec::kWhatBuffersAllocated:
+ case CodecBase::kWhatBuffersAllocated:
{
+ Mutex::Autolock al(mBufferLock);
int32_t portIndex;
CHECK(msg->findInt32("portIndex", &portIndex));
@@ -695,8 +949,8 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
sp<RefBase> obj;
CHECK(msg->findObject("portDesc", &obj));
- sp<ACodec::PortDescription> portDesc =
- static_cast<ACodec::PortDescription *>(obj.get());
+ sp<CodecBase::PortDescription> portDesc =
+ static_cast<CodecBase::PortDescription *>(obj.get());
size_t numBuffers = portDesc->countBuffers();
@@ -729,40 +983,18 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatOutputFormatChanged:
+ case CodecBase::kWhatOutputFormatChanged:
{
ALOGV("codec output format changed");
- if ((mFlags & kFlagIsSoftwareCodec)
- && mNativeWindow != NULL) {
+ if (mSoftRenderer == NULL &&
+ mNativeWindow != NULL &&
+ (mFlags & kFlagIsSoftwareCodec)) {
AString mime;
CHECK(msg->findString("mime", &mime));
- if (!strncasecmp("video/", mime.c_str(), 6)) {
- delete mSoftRenderer;
- mSoftRenderer = NULL;
-
- int32_t width, height;
- CHECK(msg->findInt32("width", &width));
- CHECK(msg->findInt32("height", &height));
-
- int32_t cropLeft, cropTop, cropRight, cropBottom;
- CHECK(msg->findRect("crop",
- &cropLeft, &cropTop, &cropRight, &cropBottom));
-
- int32_t colorFormat;
- CHECK(msg->findInt32(
- "color-format", &colorFormat));
-
- sp<MetaData> meta = new MetaData;
- meta->setInt32(kKeyWidth, width);
- meta->setInt32(kKeyHeight, height);
- meta->setRect(kKeyCropRect,
- cropLeft, cropTop, cropRight, cropBottom);
- meta->setInt32(kKeyColorFormat, colorFormat);
-
- mSoftRenderer =
- new SoftwareRenderer(mNativeWindow, meta);
+ if (mime.startsWithIgnoreCase("video/")) {
+ mSoftRenderer = new SoftwareRenderer(mNativeWindow);
}
}
@@ -773,6 +1005,8 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// collect codec specific data and amend the output
// format as necessary.
mFlags |= kFlagGatherCodecSpecificData;
+ } else if (mFlags & kFlagIsAsync) {
+ onOutputFormatChanged();
} else {
mFlags |= kFlagOutputFormatChanged;
postActivityNotificationIfPossible();
@@ -780,7 +1014,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatFillThisBuffer:
+ case CodecBase::kWhatFillThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexInput, msg);
@@ -807,7 +1041,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
ALOGE("queueCSDInputBuffer failed w/ error %d",
err);
- mFlags |= kFlagStickyError;
+ setStickyError(err);
postActivityNotificationIfPossible();
cancelPendingDequeueOperations();
@@ -815,7 +1049,11 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- if (mFlags & kFlagDequeueInputPending) {
+ if (mFlags & kFlagIsAsync) {
+ if (!mHaveInputSurface) {
+ onInputBufferAvailable();
+ }
+ } else if (mFlags & kFlagDequeueInputPending) {
CHECK(handleDequeueInputBuffer(mDequeueInputReplyID));
++mDequeueInputTimeoutGeneration;
@@ -827,7 +1065,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatDrainThisBuffer:
+ case CodecBase::kWhatDrainThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexOutput, msg);
@@ -862,10 +1100,16 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
}
mFlags &= ~kFlagGatherCodecSpecificData;
- mFlags |= kFlagOutputFormatChanged;
+ if (mFlags & kFlagIsAsync) {
+ onOutputFormatChanged();
+ } else {
+ mFlags |= kFlagOutputFormatChanged;
+ }
}
- if (mFlags & kFlagDequeueOutputPending) {
+ if (mFlags & kFlagIsAsync) {
+ onOutputBufferAvailable();
+ } else if (mFlags & kFlagDequeueOutputPending) {
CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID));
++mDequeueOutputTimeoutGeneration;
@@ -878,32 +1122,42 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- case ACodec::kWhatEOS:
+ case CodecBase::kWhatEOS:
{
// We already notify the client of this by using the
// corresponding flag in "onOutputBufferReady".
break;
}
- case ACodec::kWhatShutdownCompleted:
+ case CodecBase::kWhatShutdownCompleted:
{
if (mState == STOPPING) {
setState(INITIALIZED);
} else {
CHECK_EQ(mState, RELEASING);
setState(UNINITIALIZED);
+ mComponentName.clear();
}
+ mFlags &= ~kFlagIsComponentAllocated;
(new AMessage)->postReply(mReplyID);
break;
}
- case ACodec::kWhatFlushCompleted:
+ case CodecBase::kWhatFlushCompleted:
{
- CHECK_EQ(mState, FLUSHING);
- setState(STARTED);
+ if (mState != FLUSHING) {
+ ALOGW("received FlushCompleted message in state %d",
+ mState);
+ break;
+ }
- mCodec->signalResume();
+ if (mFlags & kFlagIsAsync) {
+ setState(FLUSHED);
+ } else {
+ setState(STARTED);
+ mCodec->signalResume();
+ }
(new AMessage)->postReply(mReplyID);
break;
@@ -921,10 +1175,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->senderAwaitsResponse(&replyID));
if (mState != UNINITIALIZED) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -954,16 +1205,45 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatSetCallback:
+ {
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ if (mState == UNINITIALIZED
+ || mState == INITIALIZING
+ || isExecuting()) {
+ // callback can't be set after codec is executing,
+ // or before it's initialized (as the callback
+ // will be cleared when it goes to INITIALIZED)
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ }
+
+ sp<AMessage> callback;
+ CHECK(msg->findMessage("callback", &callback));
+
+ mCallback = callback;
+
+ if (mCallback != NULL) {
+ ALOGI("MediaCodec will operate in async mode");
+ mFlags |= kFlagIsAsync;
+ } else {
+ mFlags &= ~kFlagIsAsync;
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->postReply(replyID);
+ break;
+ }
+
case kWhatConfigure:
{
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
if (mState != INITIALIZED) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -983,10 +1263,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
->getSurfaceTextureClient());
if (err != OK) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, err);
break;
}
} else {
@@ -1024,10 +1301,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// Must be configured, but can't have been started yet.
if (mState != CONFIGURED) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -1041,11 +1315,11 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != CONFIGURED) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (mState == FLUSHED) {
+ mCodec->signalResume();
+ PostReplyWithError(replyID, OK);
+ } else if (mState != CONFIGURED) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -1065,20 +1339,21 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != INITIALIZED
- && mState != CONFIGURED && mState != STARTED) {
- // We may be in "UNINITIALIZED" state already without the
+ if (!(mFlags & kFlagIsComponentAllocated) && mState != INITIALIZED
+ && mState != CONFIGURED && !isExecuting()) {
+ // We may be in "UNINITIALIZED" state already and
+ // also shutdown the encoder/decoder without the
// client being aware of this if media server died while
// we were being stopped. The client would assume that
// after stop() returned, it would be safe to call release()
// and it should be in this case, no harm to allow a release()
// if we're already uninitialized.
- // Similarly stopping a stopped MediaCodec should be benign.
sp<AMessage> response = new AMessage;
- response->setInt32(
- "err",
- mState == targetState ? OK : INVALID_OPERATION);
-
+ status_t err = mState == targetState ? OK : INVALID_OPERATION;
+ response->setInt32("err", err);
+ if (err == OK && targetState == UNINITIALIZED) {
+ mComponentName.clear();
+ }
response->postReply(replyID);
break;
}
@@ -1087,6 +1362,9 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
// It's dead, Jim. Don't expect initiateShutdown to yield
// any useful results now...
setState(UNINITIALIZED);
+ if (targetState == UNINITIALIZED) {
+ mComponentName.clear();
+ }
(new AMessage)->postReply(replyID);
break;
}
@@ -1106,11 +1384,15 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
+ if (mFlags & kFlagIsAsync) {
+ ALOGE("dequeueOutputBuffer can't be used in async mode");
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ }
+
if (mHaveInputSurface) {
ALOGE("dequeueInputBuffer can't be used with input surface");
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
- response->postReply(replyID);
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -1122,9 +1404,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findInt64("timeoutUs", &timeoutUs));
if (timeoutUs == 0ll) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", -EAGAIN);
- response->postReply(replyID);
+ PostReplyWithError(replyID, -EAGAIN);
break;
}
@@ -1153,9 +1433,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(mFlags & kFlagDequeueInputPending);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", -EAGAIN);
- response->postReply(mDequeueInputReplyID);
+ PostReplyWithError(mDequeueInputReplyID, -EAGAIN);
mFlags &= ~kFlagDequeueInputPending;
mDequeueInputReplyID = 0;
@@ -1167,19 +1445,17 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != STARTED || (mFlags & kFlagStickyError)) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (!isExecuting()) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
status_t err = onQueueInputBuffer(msg);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
+ PostReplyWithError(replyID, err);
break;
}
@@ -1188,6 +1464,12 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
+ if (mFlags & kFlagIsAsync) {
+ ALOGE("dequeueOutputBuffer can't be used in async mode");
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ }
+
if (handleDequeueOutputBuffer(replyID, true /* new request */)) {
break;
}
@@ -1196,9 +1478,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->findInt64("timeoutUs", &timeoutUs));
if (timeoutUs == 0ll) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", -EAGAIN);
- response->postReply(replyID);
+ PostReplyWithError(replyID, -EAGAIN);
break;
}
@@ -1227,9 +1507,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(mFlags & kFlagDequeueOutputPending);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", -EAGAIN);
- response->postReply(mDequeueOutputReplyID);
+ PostReplyWithError(mDequeueOutputReplyID, -EAGAIN);
mFlags &= ~kFlagDequeueOutputPending;
mDequeueOutputReplyID = 0;
@@ -1241,19 +1519,17 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != STARTED || (mFlags & kFlagStickyError)) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (!isExecuting()) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
status_t err = onReleaseOutputBuffer(msg);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
+ PostReplyWithError(replyID, err);
break;
}
@@ -1262,11 +1538,11 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != STARTED || (mFlags & kFlagStickyError)) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (!isExecuting()) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
@@ -1280,11 +1556,11 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != STARTED || (mFlags & kFlagStickyError)) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (!isExecuting() || (mFlags & kFlagIsAsync)) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
@@ -1314,15 +1590,16 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (mState != STARTED || (mFlags & kFlagStickyError)) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if (!isExecuting()) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
mReplyID = replyID;
+ // TODO: skip flushing if already FLUSHED
setState(FLUSHING);
mCodec->signalFlush();
@@ -1330,23 +1607,28 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case kWhatGetInputFormat:
case kWhatGetOutputFormat:
{
+ sp<AMessage> format =
+ (msg->what() == kWhatGetOutputFormat ? mOutputFormat : mInputFormat);
+
uint32_t replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if ((mState != STARTED && mState != FLUSHING)
- || (mFlags & kFlagStickyError)
- || mOutputFormat == NULL) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ if ((mState != CONFIGURED && mState != STARTING &&
+ mState != STARTED && mState != FLUSHING &&
+ mState != FLUSHED)
+ || format == NULL) {
+ PostReplyWithError(replyID, INVALID_OPERATION);
+ break;
+ } else if (mFlags & kFlagStickyError) {
+ PostReplyWithError(replyID, getStickyError());
break;
}
sp<AMessage> response = new AMessage;
- response->setMessage("format", mOutputFormat);
+ response->setMessage("format", format);
response->postReply(replyID);
break;
}
@@ -1372,10 +1654,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
CHECK(msg->senderAwaitsResponse(&replyID));
if (mComponentName.empty()) {
- sp<AMessage> response = new AMessage;
- response->setInt32("err", INVALID_OPERATION);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, INVALID_OPERATION);
break;
}
@@ -1395,10 +1674,7 @@ void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
status_t err = onSetParameters(params);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
-
- response->postReply(replyID);
+ PostReplyWithError(replyID, err);
break;
}
@@ -1421,14 +1697,14 @@ void MediaCodec::extractCSD(const sp<AMessage> &format) {
++i;
}
- ALOGV("Found %u pieces of codec specific data.", mCSD.size());
+ ALOGV("Found %zu pieces of codec specific data.", mCSD.size());
}
status_t MediaCodec::queueCSDInputBuffer(size_t bufferIndex) {
CHECK(!mCSD.empty());
- BufferInfo *info =
- &mPortBuffers[kPortIndexInput].editItemAt(bufferIndex);
+ const BufferInfo *info =
+ &mPortBuffers[kPortIndexInput].itemAt(bufferIndex);
sp<ABuffer> csd = *mCSD.begin();
mCSD.erase(mCSD.begin());
@@ -1463,18 +1739,23 @@ void MediaCodec::setState(State newState) {
mCrypto.clear();
setNativeWindow(NULL);
+ mInputFormat.clear();
mOutputFormat.clear();
mFlags &= ~kFlagOutputFormatChanged;
mFlags &= ~kFlagOutputBuffersChanged;
mFlags &= ~kFlagStickyError;
mFlags &= ~kFlagIsEncoder;
mFlags &= ~kFlagGatherCodecSpecificData;
+ mFlags &= ~kFlagIsAsync;
+ mStickyError = OK;
mActivityNotify.clear();
+ mCallback.clear();
}
if (newState == UNINITIALIZED) {
- mComponentName.clear();
+ // return any straggling buffers, e.g. if we got here on an error
+ returnBuffersToCodec();
// The component is gone, mediaserver's probably back up already
// but should definitely be back up should we try to instantiate
@@ -1485,6 +1766,8 @@ void MediaCodec::setState(State newState) {
mState = newState;
cancelPendingDequeueOperations();
+
+ updateBatteryStat();
}
void MediaCodec::returnBuffersToCodec() {
@@ -1494,6 +1777,7 @@ void MediaCodec::returnBuffersToCodec() {
void MediaCodec::returnBuffersToCodecOnPort(int32_t portIndex) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+ Mutex::Autolock al(mBufferLock);
Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
@@ -1520,8 +1804,8 @@ size_t MediaCodec::updateBuffers(
int32_t portIndex, const sp<AMessage> &msg) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
- void *bufferID;
- CHECK(msg->findPointer("buffer-id", &bufferID));
+ uint32_t bufferID;
+ CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
@@ -1532,6 +1816,8 @@ size_t MediaCodec::updateBuffers(
CHECK(info->mNotify == NULL);
CHECK(msg->findMessage("reply", &info->mNotify));
+ info->mFormat =
+ (portIndex == kPortIndexInput) ? mInputFormat : mOutputFormat;
mAvailPortBuffers[portIndex].push_back(i);
return i;
@@ -1648,11 +1934,15 @@ status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) {
info->mData->setRange(0, result);
}
+ // synchronization boundary for getBufferAndFormat
+ {
+ Mutex::Autolock al(mBufferLock);
+ info->mOwnedByClient = false;
+ }
reply->setBuffer("buffer", info->mData);
reply->post();
info->mNotify = NULL;
- info->mOwnedByClient = false;
return OK;
}
@@ -1666,7 +1956,7 @@ status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) {
render = 0;
}
- if (mState != STARTED) {
+ if (!isExecuting()) {
return -EINVAL;
}
@@ -1680,18 +1970,40 @@ status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) {
return -EACCES;
}
- if (render && (info->mData == NULL || info->mData->size() != 0)) {
+ // synchronization boundary for getBufferAndFormat
+ {
+ Mutex::Autolock al(mBufferLock);
+ info->mOwnedByClient = false;
+ }
+
+ if (render && info->mData != NULL && info->mData->size() != 0) {
info->mNotify->setInt32("render", true);
+ int64_t timestampNs = 0;
+ if (msg->findInt64("timestampNs", &timestampNs)) {
+ info->mNotify->setInt64("timestampNs", timestampNs);
+ } else {
+ // TODO: it seems like we should use the timestamp
+ // in the (media)buffer as it potentially came from
+ // an input surface, but we did not propagate it prior to
+ // API 20. Perhaps check for target SDK version.
+#if 0
+ if (info->mData->meta()->findInt64("timeUs", &timestampNs)) {
+ ALOGV("using buffer PTS of %" PRId64, timestampNs);
+ timestampNs *= 1000;
+ }
+#endif
+ }
+
if (mSoftRenderer != NULL) {
mSoftRenderer->render(
- info->mData->data(), info->mData->size(), NULL);
+ info->mData->data(), info->mData->size(),
+ timestampNs, NULL, info->mFormat);
}
}
info->mNotify->post();
info->mNotify = NULL;
- info->mOwnedByClient = false;
return OK;
}
@@ -1710,7 +2022,22 @@ ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) {
BufferInfo *info = &mPortBuffers[portIndex].editItemAt(index);
CHECK(!info->mOwnedByClient);
- info->mOwnedByClient = true;
+ {
+ Mutex::Autolock al(mBufferLock);
+ info->mOwnedByClient = true;
+
+ // set image-data
+ if (info->mFormat != NULL) {
+ sp<ABuffer> imageData;
+ if (info->mFormat->findBuffer("image-data", &imageData)) {
+ info->mData->meta()->setBuffer("image-data", imageData);
+ }
+ int32_t left, top, right, bottom;
+ if (info->mFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ info->mData->meta()->setRect("crop-rect", left, top, right, bottom);
+ }
+ }
+ }
return index;
}
@@ -1748,16 +2075,100 @@ status_t MediaCodec::setNativeWindow(
return OK;
}
+void MediaCodec::onInputBufferAvailable() {
+ int32_t index;
+ while ((index = dequeuePortBuffer(kPortIndexInput)) >= 0) {
+ sp<AMessage> msg = mCallback->dup();
+ msg->setInt32("callbackID", CB_INPUT_AVAILABLE);
+ msg->setInt32("index", index);
+ msg->post();
+ }
+}
+
+void MediaCodec::onOutputBufferAvailable() {
+ int32_t index;
+ while ((index = dequeuePortBuffer(kPortIndexOutput)) >= 0) {
+ const sp<ABuffer> &buffer =
+ mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+ sp<AMessage> msg = mCallback->dup();
+ msg->setInt32("callbackID", CB_OUTPUT_AVAILABLE);
+ msg->setInt32("index", index);
+ msg->setSize("offset", buffer->offset());
+ msg->setSize("size", buffer->size());
+
+ int64_t timeUs;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ msg->setInt64("timeUs", timeUs);
+
+ int32_t omxFlags;
+ CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
+
+ uint32_t flags = 0;
+ if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
+ flags |= BUFFER_FLAG_SYNCFRAME;
+ }
+ if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+ flags |= BUFFER_FLAG_CODECCONFIG;
+ }
+ if (omxFlags & OMX_BUFFERFLAG_EOS) {
+ flags |= BUFFER_FLAG_EOS;
+ }
+
+ msg->setInt32("flags", flags);
+
+ msg->post();
+ }
+}
+
+void MediaCodec::onError(status_t err, int32_t actionCode, const char *detail) {
+ if (mCallback != NULL) {
+ sp<AMessage> msg = mCallback->dup();
+ msg->setInt32("callbackID", CB_ERROR);
+ msg->setInt32("err", err);
+ msg->setInt32("actionCode", actionCode);
+
+ if (detail != NULL) {
+ msg->setString("detail", detail);
+ }
+
+ msg->post();
+ }
+}
+
+void MediaCodec::onOutputFormatChanged() {
+ if (mCallback != NULL) {
+ sp<AMessage> msg = mCallback->dup();
+ msg->setInt32("callbackID", CB_OUTPUT_FORMAT_CHANGED);
+ msg->setMessage("format", mOutputFormat);
+ msg->post();
+ }
+}
+
+
void MediaCodec::postActivityNotificationIfPossible() {
if (mActivityNotify == NULL) {
return;
}
- if ((mFlags & (kFlagStickyError
+ bool isErrorOrOutputChanged =
+ (mFlags & (kFlagStickyError
| kFlagOutputBuffersChanged
- | kFlagOutputFormatChanged))
+ | kFlagOutputFormatChanged));
+
+ if (isErrorOrOutputChanged
|| !mAvailPortBuffers[kPortIndexInput].empty()
|| !mAvailPortBuffers[kPortIndexOutput].empty()) {
+ mActivityNotify->setInt32("input-buffers",
+ mAvailPortBuffers[kPortIndexInput].size());
+
+ if (isErrorOrOutputChanged) {
+ // we want consumer to dequeue as many times as it can
+ mActivityNotify->setInt32("output-buffers", INT32_MAX);
+ } else {
+ mActivityNotify->setInt32("output-buffers",
+ mAvailPortBuffers[kPortIndexOutput].size());
+ }
mActivityNotify->post();
mActivityNotify.clear();
}
@@ -1818,4 +2229,34 @@ status_t MediaCodec::amendOutputFormatWithCodecSpecificData(
return OK;
}
+void MediaCodec::updateBatteryStat() {
+ if (mState == CONFIGURED && !mBatteryStatNotified) {
+ AString mime;
+ CHECK(mOutputFormat != NULL &&
+ mOutputFormat->findString("mime", &mime));
+
+ mIsVideo = mime.startsWithIgnoreCase("video/");
+
+ BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+ if (mIsVideo) {
+ notifier.noteStartVideo();
+ } else {
+ notifier.noteStartAudio();
+ }
+
+ mBatteryStatNotified = true;
+ } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
+ BatteryNotifier& notifier(BatteryNotifier::getInstance());
+
+ if (mIsVideo) {
+ notifier.noteStopVideo();
+ } else {
+ notifier.noteStopAudio();
+ }
+
+ mBatteryStatNotified = false;
+ }
+}
+
} // namespace android
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index b74b2e2..5b8be46 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,12 +18,19 @@
#define LOG_TAG "MediaCodecList"
#include <utils/Log.h>
-#include <media/stagefright/MediaCodecList.h>
+#include <binder/IServiceManager.h>
+
+#include <media/IMediaCodecList.h>
+#include <media/IMediaPlayerService.h>
+#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/OMXCodec.h>
+
#include <utils/threads.h>
#include <libexpat/expat.h>
@@ -32,30 +39,129 @@ namespace android {
static Mutex sInitMutex;
+static MediaCodecList *gCodecList = NULL;
+
// static
-MediaCodecList *MediaCodecList::sCodecList;
+sp<IMediaCodecList> MediaCodecList::sCodecList;
// static
-const MediaCodecList *MediaCodecList::getInstance() {
+sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
Mutex::Autolock autoLock(sInitMutex);
- if (sCodecList == NULL) {
- sCodecList = new MediaCodecList;
+ if (gCodecList == NULL) {
+ gCodecList = new MediaCodecList;
+ if (gCodecList->initCheck() == OK) {
+ sCodecList = gCodecList;
+ }
}
- return sCodecList->initCheck() == OK ? sCodecList : NULL;
+ return sCodecList;
+}
+
+static Mutex sRemoteInitMutex;
+
+sp<IMediaCodecList> MediaCodecList::sRemoteList;
+
+// static
+sp<IMediaCodecList> MediaCodecList::getInstance() {
+ Mutex::Autolock _l(sRemoteInitMutex);
+ if (sRemoteList == NULL) {
+ sp<IBinder> binder =
+ defaultServiceManager()->getService(String16("media.player"));
+ sp<IMediaPlayerService> service =
+ interface_cast<IMediaPlayerService>(binder);
+ if (service.get() != NULL) {
+ sRemoteList = service->getCodecList();
+ }
+
+ if (sRemoteList == NULL) {
+ // if failed to get remote list, create local list
+ sRemoteList = getLocalInstance();
+ }
+ }
+ return sRemoteList;
}
MediaCodecList::MediaCodecList()
: mInitCheck(NO_INIT) {
- FILE *file = fopen("/etc/media_codecs.xml", "r");
+ parseTopLevelXMLFile("/etc/media_codecs.xml");
+}
- if (file == NULL) {
- ALOGW("unable to open media codecs configuration xml file.");
+void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml) {
+ // get href_base
+ char *href_base_end = strrchr(codecs_xml, '/');
+ if (href_base_end != NULL) {
+ mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
+ }
+
+ mInitCheck = OK; // keeping this here for safety
+ mCurrentSection = SECTION_TOPLEVEL;
+ mDepth = 0;
+
+ OMXClient client;
+ mInitCheck = client.connect();
+ if (mInitCheck != OK) {
+ return;
+ }
+ mOMX = client.interface();
+ parseXMLFile(codecs_xml);
+ mOMX.clear();
+
+ if (mInitCheck != OK) {
+ mCodecInfos.clear();
return;
}
- parseXMLFile(file);
+ for (size_t i = mCodecInfos.size(); i-- > 0;) {
+ const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
+
+ if (info.mCaps.size() == 0) {
+ // No types supported by this component???
+ ALOGW("Component %s does not support any type of media?",
+ info.mName.c_str());
+
+ mCodecInfos.removeAt(i);
+#if LOG_NDEBUG == 0
+ } else {
+ for (size_t type_ix = 0; type_ix < info.mCaps.size(); ++type_ix) {
+ AString mime = info.mCaps.keyAt(type_ix);
+ const sp<MediaCodecInfo::Capabilities> &caps = info.mCaps.valueAt(type_ix);
+
+ ALOGV("%s codec info for %s: %s", info.mName.c_str(), mime.c_str(),
+ caps->getDetails()->debugString().c_str());
+ ALOGV(" flags=%d", caps->getFlags());
+ {
+ Vector<uint32_t> colorFormats;
+ caps->getSupportedColorFormats(&colorFormats);
+ AString nice;
+ for (size_t ix = 0; ix < colorFormats.size(); ix++) {
+ if (ix > 0) {
+ nice.append(", ");
+ }
+ nice.append(colorFormats.itemAt(ix));
+ }
+ ALOGV(" colors=[%s]", nice.c_str());
+ }
+ {
+ Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+ caps->getSupportedProfileLevels(&profileLevels);
+ AString nice;
+ for (size_t ix = 0; ix < profileLevels.size(); ix++) {
+ if (ix > 0) {
+ nice.append(", ");
+ }
+ const MediaCodecInfo::ProfileLevel &pl =
+ profileLevels.itemAt(ix);
+ nice.append(pl.mProfile);
+ nice.append("/");
+ nice.append(pl.mLevel);
+ }
+ ALOGV(" levels=[%s]", nice.c_str());
+ }
+ }
+#endif
+ }
+ }
#if 0
for (size_t i = 0; i < mCodecInfos.size(); ++i) {
@@ -75,9 +181,6 @@ MediaCodecList::MediaCodecList()
ALOGI("%s", line.c_str());
}
#endif
-
- fclose(file);
- file = NULL;
}
MediaCodecList::~MediaCodecList() {
@@ -87,10 +190,14 @@ status_t MediaCodecList::initCheck() const {
return mInitCheck;
}
-void MediaCodecList::parseXMLFile(FILE *file) {
- mInitCheck = OK;
- mCurrentSection = SECTION_TOPLEVEL;
- mDepth = 0;
+void MediaCodecList::parseXMLFile(const char *path) {
+ FILE *file = fopen(path, "r");
+
+ if (file == NULL) {
+ ALOGW("unable to open media codecs configuration xml file: %s", path);
+ mInitCheck = NAME_NOT_FOUND;
+ return;
+ }
XML_Parser parser = ::XML_ParserCreate(NULL);
CHECK(parser != NULL);
@@ -103,7 +210,7 @@ void MediaCodecList::parseXMLFile(FILE *file) {
while (mInitCheck == OK) {
void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
if (buff == NULL) {
- ALOGE("failed to in call to XML_GetBuffer()");
+ ALOGE("failed in call to XML_GetBuffer()");
mInitCheck = UNKNOWN_ERROR;
break;
}
@@ -115,8 +222,9 @@ void MediaCodecList::parseXMLFile(FILE *file) {
break;
}
- if (::XML_ParseBuffer(parser, bytes_read, bytes_read == 0)
- != XML_STATUS_OK) {
+ XML_Status status = ::XML_ParseBuffer(parser, bytes_read, bytes_read == 0);
+ if (status != XML_STATUS_OK) {
+ ALOGE("malformed (%s)", ::XML_ErrorString(::XML_GetErrorCode(parser)));
mInitCheck = ERROR_MALFORMED;
break;
}
@@ -128,25 +236,8 @@ void MediaCodecList::parseXMLFile(FILE *file) {
::XML_ParserFree(parser);
- if (mInitCheck == OK) {
- for (size_t i = mCodecInfos.size(); i-- > 0;) {
- CodecInfo *info = &mCodecInfos.editItemAt(i);
-
- if (info->mTypes == 0) {
- // No types supported by this component???
-
- ALOGW("Component %s does not support any type of media?",
- info->mName.c_str());
-
- mCodecInfos.removeAt(i);
- }
- }
- }
-
- if (mInitCheck != OK) {
- mCodecInfos.clear();
- mCodecQuirks.clear();
- }
+ fclose(file);
+ file = NULL;
}
// static
@@ -160,12 +251,65 @@ void MediaCodecList::EndElementHandlerWrapper(void *me, const char *name) {
static_cast<MediaCodecList *>(me)->endElementHandler(name);
}
+status_t MediaCodecList::includeXMLFile(const char **attrs) {
+ const char *href = NULL;
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "href")) {
+ if (attrs[i + 1] == NULL) {
+ return -EINVAL;
+ }
+ href = attrs[i + 1];
+ ++i;
+ } else {
+ return -EINVAL;
+ }
+ ++i;
+ }
+
+ // For security reasons and for simplicity, file names can only contain
+ // [a-zA-Z0-9_.] and must start with media_codecs_ and end with .xml
+ for (i = 0; href[i] != '\0'; i++) {
+ if (href[i] == '.' || href[i] == '_' ||
+ (href[i] >= '0' && href[i] <= '9') ||
+ (href[i] >= 'A' && href[i] <= 'Z') ||
+ (href[i] >= 'a' && href[i] <= 'z')) {
+ continue;
+ }
+ ALOGE("invalid include file name: %s", href);
+ return -EINVAL;
+ }
+
+ AString filename = href;
+ if (!filename.startsWith("media_codecs_") ||
+ !filename.endsWith(".xml")) {
+ ALOGE("invalid include file name: %s", href);
+ return -EINVAL;
+ }
+ filename.insert(mHrefBase, 0);
+
+ parseXMLFile(filename.c_str());
+ return mInitCheck;
+}
+
void MediaCodecList::startElementHandler(
const char *name, const char **attrs) {
if (mInitCheck != OK) {
return;
}
+ bool inType = true;
+
+ if (!strcmp(name, "Include")) {
+ mInitCheck = includeXMLFile(attrs);
+ if (mInitCheck == OK) {
+ mPastSections.push(mCurrentSection);
+ mCurrentSection = SECTION_INCLUDE;
+ }
+ ++mDepth;
+ return;
+ }
+
switch (mCurrentSection) {
case SECTION_TOPLEVEL:
{
@@ -206,6 +350,25 @@ void MediaCodecList::startElementHandler(
mInitCheck = addQuirk(attrs);
} else if (!strcmp(name, "Type")) {
mInitCheck = addTypeFromAttributes(attrs);
+ mCurrentSection =
+ (mCurrentSection == SECTION_DECODER
+ ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+ }
+ }
+ inType = false;
+ // fall through
+
+ case SECTION_DECODER_TYPE:
+ case SECTION_ENCODER_TYPE:
+ {
+ // ignore limits and features specified outside of type
+ bool outside = !inType && !mCurrentInfo->mHasSoleMime;
+ if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+ ALOGW("ignoring %s specified outside of a Type", name);
+ } else if (!strcmp(name, "Limit")) {
+ mInitCheck = addLimit(attrs);
+ } else if (!strcmp(name, "Feature")) {
+ mInitCheck = addFeature(attrs);
}
break;
}
@@ -239,10 +402,25 @@ void MediaCodecList::endElementHandler(const char *name) {
break;
}
+ case SECTION_DECODER_TYPE:
+ case SECTION_ENCODER_TYPE:
+ {
+ if (!strcmp(name, "Type")) {
+ mCurrentSection =
+ (mCurrentSection == SECTION_DECODER_TYPE
+ ? SECTION_DECODER : SECTION_ENCODER);
+
+ mCurrentInfo->complete();
+ }
+ break;
+ }
+
case SECTION_DECODER:
{
if (!strcmp(name, "MediaCodec")) {
mCurrentSection = SECTION_DECODERS;
+ mCurrentInfo->complete();
+ mCurrentInfo = NULL;
}
break;
}
@@ -251,6 +429,17 @@ void MediaCodecList::endElementHandler(const char *name) {
{
if (!strcmp(name, "MediaCodec")) {
mCurrentSection = SECTION_ENCODERS;
+ mCurrentInfo->complete();;
+ mCurrentInfo = NULL;
+ }
+ break;
+ }
+
+ case SECTION_INCLUDE:
+ {
+ if (!strcmp(name, "Include") && mPastSections.size() > 0) {
+ mCurrentSection = mPastSections.top();
+ mPastSections.pop();
}
break;
}
@@ -292,23 +481,37 @@ status_t MediaCodecList::addMediaCodecFromAttributes(
return -EINVAL;
}
- addMediaCodec(encoder, name, type);
-
+ mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+ // The next step involves trying to load the codec, which may
+ // fail. Only list the codec if this succeeds.
+ // However, keep mCurrentInfo object around until parsing
+ // of full codec info is completed.
+ if (initializeCapabilities(type) == OK) {
+ mCodecInfos.push_back(mCurrentInfo);
+ }
return OK;
}
-void MediaCodecList::addMediaCodec(
- bool encoder, const char *name, const char *type) {
- mCodecInfos.push();
- CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
- info->mName = name;
- info->mIsEncoder = encoder;
- info->mTypes = 0;
- info->mQuirks = 0;
+status_t MediaCodecList::initializeCapabilities(const char *type) {
+ if (type == NULL) {
+ return OK;
+ }
- if (type != NULL) {
- addType(type);
+ ALOGV("initializeCapabilities %s:%s",
+ mCurrentInfo->mName.c_str(), type);
+
+ CodecCapabilities caps;
+ status_t err = QueryCodec(
+ mOMX,
+ mCurrentInfo->mName.c_str(),
+ type,
+ mCurrentInfo->mIsEncoder,
+ &caps);
+ if (err != OK) {
+ return err;
}
+
+ return mCurrentInfo->initializeCapabilities(caps);
}
status_t MediaCodecList::addQuirk(const char **attrs) {
@@ -333,24 +536,7 @@ status_t MediaCodecList::addQuirk(const char **attrs) {
return -EINVAL;
}
- uint32_t bit;
- ssize_t index = mCodecQuirks.indexOfKey(name);
- if (index < 0) {
- bit = mCodecQuirks.size();
-
- if (bit == 32) {
- ALOGW("Too many distinct quirk names in configuration.");
- return OK;
- }
-
- mCodecQuirks.add(name, bit);
- } else {
- bit = mCodecQuirks.valueAt(index);
- }
-
- CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
- info->mQuirks |= 1ul << bit;
-
+ mCurrentInfo->addQuirk(name);
return OK;
}
@@ -376,172 +562,291 @@ status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
return -EINVAL;
}
- addType(name);
-
- return OK;
-}
-
-void MediaCodecList::addType(const char *name) {
- uint32_t bit;
- ssize_t index = mTypes.indexOfKey(name);
- if (index < 0) {
- bit = mTypes.size();
-
- if (bit == 32) {
- ALOGW("Too many distinct type names in configuration.");
- return;
- }
-
- mTypes.add(name, bit);
- } else {
- bit = mTypes.valueAt(index);
+ status_t ret = mCurrentInfo->addMime(name);
+ if (ret != OK) {
+ return ret;
}
- CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
- info->mTypes |= 1ul << bit;
+ // The next step involves trying to load the codec, which may
+ // fail. Handle this gracefully (by not reporting such mime).
+ if (initializeCapabilities(name) != OK) {
+ mCurrentInfo->removeMime(name);
+ }
+ return OK;
}
+// legacy method for non-advanced codecs
ssize_t MediaCodecList::findCodecByType(
const char *type, bool encoder, size_t startIndex) const {
- ssize_t typeIndex = mTypes.indexOfKey(type);
-
- if (typeIndex < 0) {
- return -ENOENT;
- }
-
- uint32_t typeMask = 1ul << mTypes.valueAt(typeIndex);
+ static const char *advancedFeatures[] = {
+ "feature-secure-playback",
+ "feature-tunneled-playback",
+ };
- while (startIndex < mCodecInfos.size()) {
- const CodecInfo &info = mCodecInfos.itemAt(startIndex);
+ size_t numCodecs = mCodecInfos.size();
+ for (; startIndex < numCodecs; ++startIndex) {
+ const MediaCodecInfo &info = *mCodecInfos.itemAt(startIndex).get();
- if (info.mIsEncoder == encoder && (info.mTypes & typeMask)) {
- return startIndex;
+ if (info.isEncoder() != encoder) {
+ continue;
+ }
+ sp<MediaCodecInfo::Capabilities> capabilities = info.getCapabilitiesFor(type);
+ if (capabilities == NULL) {
+ continue;
+ }
+ const sp<AMessage> &details = capabilities->getDetails();
+
+ int32_t required;
+ bool isAdvanced = false;
+ for (size_t ix = 0; ix < ARRAY_SIZE(advancedFeatures); ix++) {
+ if (details->findInt32(advancedFeatures[ix], &required) &&
+ required != 0) {
+ isAdvanced = true;
+ break;
+ }
}
- ++startIndex;
- }
-
- return -ENOENT;
-}
-
-ssize_t MediaCodecList::findCodecByName(const char *name) const {
- for (size_t i = 0; i < mCodecInfos.size(); ++i) {
- const CodecInfo &info = mCodecInfos.itemAt(i);
-
- if (info.mName == name) {
- return i;
+ if (!isAdvanced) {
+ return startIndex;
}
}
return -ENOENT;
}
-size_t MediaCodecList::countCodecs() const {
- return mCodecInfos.size();
+static status_t limitFoundMissingAttr(AString name, const char *attr, bool found = true) {
+ ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+ (found ? "" : "no "), attr);
+ return -EINVAL;
}
-const char *MediaCodecList::getCodecName(size_t index) const {
- if (index >= mCodecInfos.size()) {
- return NULL;
- }
-
- const CodecInfo &info = mCodecInfos.itemAt(index);
- return info.mName.c_str();
+static status_t limitError(AString name, const char *msg) {
+ ALOGE("limit '%s' %s", name.c_str(), msg);
+ return -EINVAL;
}
-bool MediaCodecList::isEncoder(size_t index) const {
- if (index >= mCodecInfos.size()) {
- return NULL;
- }
-
- const CodecInfo &info = mCodecInfos.itemAt(index);
- return info.mIsEncoder;
+static status_t limitInvalidAttr(AString name, const char *attr, AString value) {
+ ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+ attr, value.c_str());
+ return -EINVAL;
}
-bool MediaCodecList::codecHasQuirk(
- size_t index, const char *quirkName) const {
- if (index >= mCodecInfos.size()) {
- return NULL;
- }
+status_t MediaCodecList::addLimit(const char **attrs) {
+ sp<AMessage> msg = new AMessage();
- const CodecInfo &info = mCodecInfos.itemAt(index);
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (attrs[i + 1] == NULL) {
+ return -EINVAL;
+ }
- if (info.mQuirks != 0) {
- ssize_t index = mCodecQuirks.indexOfKey(quirkName);
- if (index >= 0 && info.mQuirks & (1ul << mCodecQuirks.valueAt(index))) {
- return true;
+ // attributes with values
+ if (!strcmp(attrs[i], "name")
+ || !strcmp(attrs[i], "default")
+ || !strcmp(attrs[i], "in")
+ || !strcmp(attrs[i], "max")
+ || !strcmp(attrs[i], "min")
+ || !strcmp(attrs[i], "range")
+ || !strcmp(attrs[i], "ranges")
+ || !strcmp(attrs[i], "scale")
+ || !strcmp(attrs[i], "value")) {
+ msg->setString(attrs[i], attrs[i + 1]);
+ ++i;
+ } else {
+ return -EINVAL;
}
+ ++i;
}
- return false;
-}
+ AString name;
+ if (!msg->findString("name", &name)) {
+ ALOGE("limit with no 'name' attribute");
+ return -EINVAL;
+ }
-status_t MediaCodecList::getSupportedTypes(
- size_t index, Vector<AString> *types) const {
- types->clear();
+ // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+ // quality: range + default + [scale]
+ // complexity: range + default
+ bool found;
+
+ if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+ || name == "blocks-per-second" || name == "complexity"
+ || name == "frame-rate" || name == "quality" || name == "size") {
+ AString min, max;
+ if (msg->findString("min", &min) && msg->findString("max", &max)) {
+ min.append("-");
+ min.append(max);
+ if (msg->contains("range") || msg->contains("value")) {
+ return limitError(name, "has 'min' and 'max' as well as 'range' or "
+ "'value' attributes");
+ }
+ msg->setString("range", min);
+ } else if (msg->contains("min") || msg->contains("max")) {
+ return limitError(name, "has only 'min' or 'max' attribute");
+ } else if (msg->findString("value", &max)) {
+ min = max;
+ min.append("-");
+ min.append(max);
+ if (msg->contains("range")) {
+ return limitError(name, "has both 'range' and 'value' attributes");
+ }
+ msg->setString("range", min);
+ }
- if (index >= mCodecInfos.size()) {
- return -ERANGE;
- }
+ AString range, scale = "linear", def, in_;
+ if (!msg->findString("range", &range)) {
+ return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+ }
- const CodecInfo &info = mCodecInfos.itemAt(index);
+ if ((name == "quality" || name == "complexity") ^
+ (found = msg->findString("default", &def))) {
+ return limitFoundMissingAttr(name, "default", found);
+ }
+ if (name != "quality" && msg->findString("scale", &scale)) {
+ return limitFoundMissingAttr(name, "scale");
+ }
+ if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+ return limitFoundMissingAttr(name, "in", found);
+ }
- for (size_t i = 0; i < mTypes.size(); ++i) {
- uint32_t typeMask = 1ul << mTypes.valueAt(i);
+ if (name == "aspect-ratio") {
+ if (!(in_ == "pixels") && !(in_ == "blocks")) {
+ return limitInvalidAttr(name, "in", in_);
+ }
+ in_.erase(5, 1); // (pixel|block)-aspect-ratio
+ in_.append("-");
+ in_.append(name);
+ name = in_;
+ }
+ if (name == "quality") {
+ mCurrentInfo->addDetail("quality-scale", scale);
+ }
+ if (name == "quality" || name == "complexity") {
+ AString tag = name;
+ tag.append("-default");
+ mCurrentInfo->addDetail(tag, def);
+ }
+ AString tag = name;
+ tag.append("-range");
+ mCurrentInfo->addDetail(tag, range);
+ } else {
+ AString max, value, ranges;
+ if (msg->contains("default")) {
+ return limitFoundMissingAttr(name, "default");
+ } else if (msg->contains("in")) {
+ return limitFoundMissingAttr(name, "in");
+ } else if ((name == "channel-count") ^
+ (found = msg->findString("max", &max))) {
+ return limitFoundMissingAttr(name, "max", found);
+ } else if (msg->contains("min")) {
+ return limitFoundMissingAttr(name, "min");
+ } else if (msg->contains("range")) {
+ return limitFoundMissingAttr(name, "range");
+ } else if ((name == "sample-rate") ^
+ (found = msg->findString("ranges", &ranges))) {
+ return limitFoundMissingAttr(name, "ranges", found);
+ } else if (msg->contains("scale")) {
+ return limitFoundMissingAttr(name, "scale");
+ } else if ((name == "alignment" || name == "block-size") ^
+ (found = msg->findString("value", &value))) {
+ return limitFoundMissingAttr(name, "value", found);
+ }
- if (info.mTypes & typeMask) {
- types->push(mTypes.keyAt(i));
+ if (max.size()) {
+ AString tag = "max-";
+ tag.append(name);
+ mCurrentInfo->addDetail(tag, max);
+ } else if (value.size()) {
+ mCurrentInfo->addDetail(name, value);
+ } else if (ranges.size()) {
+ AString tag = name;
+ tag.append("-ranges");
+ mCurrentInfo->addDetail(tag, ranges);
+ } else {
+ ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
}
}
-
return OK;
}
-status_t MediaCodecList::getCodecCapabilities(
- size_t index, const char *type,
- Vector<ProfileLevel> *profileLevels,
- Vector<uint32_t> *colorFormats,
- uint32_t *flags) const {
- profileLevels->clear();
- colorFormats->clear();
-
- if (index >= mCodecInfos.size()) {
- return -ERANGE;
+static bool parseBoolean(const char *s) {
+ if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+ return true;
}
+ char *end;
+ unsigned long res = strtoul(s, &end, 10);
+ return *s != '\0' && *end == '\0' && res > 0;
+}
- const CodecInfo &info = mCodecInfos.itemAt(index);
+status_t MediaCodecList::addFeature(const char **attrs) {
+ size_t i = 0;
+ const char *name = NULL;
+ int32_t optional = -1;
+ int32_t required = -1;
+ const char *value = NULL;
- OMXClient client;
- status_t err = client.connect();
- if (err != OK) {
- return err;
- }
+ while (attrs[i] != NULL) {
+ if (attrs[i + 1] == NULL) {
+ return -EINVAL;
+ }
- CodecCapabilities caps;
- err = QueryCodec(
- client.interface(),
- info.mName.c_str(), type, info.mIsEncoder, &caps);
+ // attributes with values
+ if (!strcmp(attrs[i], "name")) {
+ name = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+ int value = (int)parseBoolean(attrs[i + 1]);
+ if (!strcmp(attrs[i], "optional")) {
+ optional = value;
+ } else {
+ required = value;
+ }
+ ++i;
+ } else if (!strcmp(attrs[i], "value")) {
+ value = attrs[i + 1];
+ ++i;
+ } else {
+ return -EINVAL;
+ }
+ ++i;
+ }
+ if (name == NULL) {
+ ALOGE("feature with no 'name' attribute");
+ return -EINVAL;
+ }
- if (err != OK) {
- return err;
+ if (optional == required && optional != -1) {
+ ALOGE("feature '%s' is both/neither optional and required", name);
+ return -EINVAL;
}
- for (size_t i = 0; i < caps.mProfileLevels.size(); ++i) {
- const CodecProfileLevel &src = caps.mProfileLevels.itemAt(i);
+ if ((optional != -1 || required != -1) && (value != NULL)) {
+ ALOGE("feature '%s' has both a value and optional/required attribute", name);
+ return -EINVAL;
+ }
- ProfileLevel profileLevel;
- profileLevel.mProfile = src.mProfile;
- profileLevel.mLevel = src.mLevel;
- profileLevels->push(profileLevel);
+ if (value != NULL) {
+ mCurrentInfo->addFeature(name, value);
+ } else {
+ mCurrentInfo->addFeature(name, (required == 1) || (optional == 0));
}
+ return OK;
+}
- for (size_t i = 0; i < caps.mColorFormats.size(); ++i) {
- colorFormats->push(caps.mColorFormats.itemAt(i));
+ssize_t MediaCodecList::findCodecByName(const char *name) const {
+ for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+ const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
+
+ if (info.mName == name) {
+ return i;
+ }
}
- *flags = caps.mFlags;
+ return -ENOENT;
+}
- return OK;
+size_t MediaCodecList::countCodecs() const {
+ return mCodecInfos.size();
}
} // namespace android
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
new file mode 100644
index 0000000..0fecda8
--- /dev/null
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -0,0 +1,890 @@
+/*
+ * Copyright 2014, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecSource"
+#define DEBUG_DRIFT_TIME 0
+
+#include <inttypes.h>
+
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+struct MediaCodecSource::Puller : public AHandler {
+ Puller(const sp<MediaSource> &source);
+
+ status_t start(const sp<MetaData> &meta, const sp<AMessage> &notify);
+ void stop();
+
+ void pause();
+ void resume();
+
+protected:
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual ~Puller();
+
+private:
+ enum {
+ kWhatStart = 'msta',
+ kWhatStop,
+ kWhatPull,
+ kWhatPause,
+ kWhatResume,
+ };
+
+ sp<MediaSource> mSource;
+ sp<AMessage> mNotify;
+ sp<ALooper> mLooper;
+ int32_t mPullGeneration;
+ bool mIsAudio;
+ bool mPaused;
+ bool mReachedEOS;
+
+ status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
+ void schedulePull();
+ void handleEOS();
+
+ DISALLOW_EVIL_CONSTRUCTORS(Puller);
+};
+
+MediaCodecSource::Puller::Puller(const sp<MediaSource> &source)
+ : mSource(source),
+ mLooper(new ALooper()),
+ mPullGeneration(0),
+ mIsAudio(false),
+ mPaused(false),
+ mReachedEOS(false) {
+ sp<MetaData> meta = source->getFormat();
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ mIsAudio = !strncasecmp(mime, "audio/", 6);
+
+ mLooper->setName("pull_looper");
+}
+
+MediaCodecSource::Puller::~Puller() {
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+}
+
+status_t MediaCodecSource::Puller::postSynchronouslyAndReturnError(
+ const sp<AMessage> &msg) {
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (!response->findInt32("err", &err)) {
+ err = OK;
+ }
+
+ return err;
+}
+
+status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta,
+ const sp<AMessage> &notify) {
+ ALOGV("puller (%s) start", mIsAudio ? "audio" : "video");
+ mLooper->start(
+ false /* runOnCallingThread */,
+ false /* canCallJava */,
+ PRIORITY_AUDIO);
+ mLooper->registerHandler(this);
+ mNotify = notify;
+
+ sp<AMessage> msg = new AMessage(kWhatStart, id());
+ msg->setObject("meta", meta);
+ return postSynchronouslyAndReturnError(msg);
+}
+
+void MediaCodecSource::Puller::stop() {
+ // Stop source from caller's thread instead of puller's looper.
+ // mSource->stop() is thread-safe, doing it outside the puller's
+ // looper allows us to at least stop if source gets stuck.
+ // If source gets stuck in read(), the looper would never
+ // be able to process the stop(), which could lead to ANR.
+
+ ALOGV("source (%s) stopping", mIsAudio ? "audio" : "video");
+ mSource->stop();
+ ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
+
+ (new AMessage(kWhatStop, id()))->post();
+}
+
+void MediaCodecSource::Puller::pause() {
+ (new AMessage(kWhatPause, id()))->post();
+}
+
+void MediaCodecSource::Puller::resume() {
+ (new AMessage(kWhatResume, id()))->post();
+}
+
+void MediaCodecSource::Puller::schedulePull() {
+ sp<AMessage> msg = new AMessage(kWhatPull, id());
+ msg->setInt32("generation", mPullGeneration);
+ msg->post();
+}
+
+void MediaCodecSource::Puller::handleEOS() {
+ if (!mReachedEOS) {
+ ALOGV("puller (%s) posting EOS", mIsAudio ? "audio" : "video");
+ mReachedEOS = true;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setPointer("accessUnit", NULL);
+ notify->post();
+ }
+}
+
+void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatStart:
+ {
+ sp<RefBase> obj;
+ CHECK(msg->findObject("meta", &obj));
+
+ mReachedEOS = false;
+
+ status_t err = mSource->start(static_cast<MetaData *>(obj.get()));
+
+ if (err == OK) {
+ schedulePull();
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatStop:
+ {
+ ++mPullGeneration;
+
+ handleEOS();
+ break;
+ }
+
+ case kWhatPull:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mPullGeneration) {
+ break;
+ }
+
+ MediaBuffer *mbuf;
+ status_t err = mSource->read(&mbuf);
+
+ if (mPaused) {
+ if (err == OK) {
+ mbuf->release();
+ mbuf = NULL;
+ }
+
+ msg->post();
+ break;
+ }
+
+ if (err != OK) {
+ if (err == ERROR_END_OF_STREAM) {
+ ALOGV("stream ended, mbuf %p", mbuf);
+ } else {
+ ALOGE("error %d reading stream.", err);
+ }
+ handleEOS();
+ } else {
+ sp<AMessage> notify = mNotify->dup();
+
+ notify->setPointer("accessUnit", mbuf);
+ notify->post();
+
+ msg->post();
+ }
+ break;
+ }
+
+ case kWhatPause:
+ {
+ mPaused = true;
+ break;
+ }
+
+ case kWhatResume:
+ {
+ mPaused = false;
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+// static
+sp<MediaCodecSource> MediaCodecSource::Create(
+ const sp<ALooper> &looper,
+ const sp<AMessage> &format,
+ const sp<MediaSource> &source,
+ uint32_t flags) {
+ sp<MediaCodecSource> mediaSource =
+ new MediaCodecSource(looper, format, source, flags);
+
+ if (mediaSource->init() == OK) {
+ return mediaSource;
+ }
+ return NULL;
+}
+
+status_t MediaCodecSource::start(MetaData* params) {
+ sp<AMessage> msg = new AMessage(kWhatStart, mReflector->id());
+ msg->setObject("meta", params);
+ return postSynchronouslyAndReturnError(msg);
+}
+
+status_t MediaCodecSource::stop() {
+ sp<AMessage> msg = new AMessage(kWhatStop, mReflector->id());
+ status_t err = postSynchronouslyAndReturnError(msg);
+
+ // mPuller->stop() needs to be done outside MediaCodecSource's looper,
+ // as it contains a synchronous call to stop the underlying MediaSource,
+ // which often waits for all outstanding MediaBuffers to return, but
+ // MediaBuffers are only returned when MediaCodecSource looper gets
+ // to process them.
+
+ if (mPuller != NULL) {
+ ALOGI("puller (%s) stopping", mIsVideo ? "video" : "audio");
+ mPuller->stop();
+ ALOGI("puller (%s) stopped", mIsVideo ? "video" : "audio");
+ }
+
+ return err;
+}
+
+status_t MediaCodecSource::pause() {
+ (new AMessage(kWhatPause, mReflector->id()))->post();
+ return OK;
+}
+
+sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
+ CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+ return mGraphicBufferProducer;
+}
+
+status_t MediaCodecSource::read(
+ MediaBuffer** buffer, const ReadOptions* /* options */) {
+ Mutex::Autolock autolock(mOutputBufferLock);
+
+ *buffer = NULL;
+ while (mOutputBufferQueue.size() == 0 && !mEncoderReachedEOS) {
+ mOutputBufferCond.wait(mOutputBufferLock);
+ }
+ if (!mEncoderReachedEOS) {
+ *buffer = *mOutputBufferQueue.begin();
+ mOutputBufferQueue.erase(mOutputBufferQueue.begin());
+ return OK;
+ }
+ return mErrorCode;
+}
+
+void MediaCodecSource::signalBufferReturned(MediaBuffer *buffer) {
+ buffer->setObserver(0);
+ buffer->release();
+}
+
+MediaCodecSource::MediaCodecSource(
+ const sp<ALooper> &looper,
+ const sp<AMessage> &outputFormat,
+ const sp<MediaSource> &source,
+ uint32_t flags)
+ : mLooper(looper),
+ mOutputFormat(outputFormat),
+ mMeta(new MetaData),
+ mFlags(flags),
+ mIsVideo(false),
+ mStarted(false),
+ mStopping(false),
+ mDoMoreWorkPending(false),
+ mFirstSampleTimeUs(-1ll),
+ mEncoderReachedEOS(false),
+ mErrorCode(OK) {
+ CHECK(mLooper != NULL);
+
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ if (!strncasecmp("video/", mime.c_str(), 6)) {
+ mIsVideo = true;
+ }
+
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ mPuller = new Puller(source);
+ }
+}
+
+MediaCodecSource::~MediaCodecSource() {
+ releaseEncoder();
+
+ mCodecLooper->stop();
+ mLooper->unregisterHandler(mReflector->id());
+}
+
+status_t MediaCodecSource::init() {
+ status_t err = initEncoder();
+
+ if (err != OK) {
+ releaseEncoder();
+ }
+
+ return err;
+}
+
+status_t MediaCodecSource::initEncoder() {
+ mReflector = new AHandlerReflector<MediaCodecSource>(this);
+ mLooper->registerHandler(mReflector);
+
+ mCodecLooper = new ALooper;
+ mCodecLooper->setName("codec_looper");
+ mCodecLooper->start();
+
+ if (mFlags & FLAG_USE_METADATA_INPUT) {
+ mOutputFormat->setInt32("store-metadata-in-buffers", 1);
+ }
+
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ mOutputFormat->setInt32("create-input-buffers-suspended", 1);
+ }
+
+ AString outputMIME;
+ CHECK(mOutputFormat->findString("mime", &outputMIME));
+
+ mEncoder = MediaCodec::CreateByType(
+ mCodecLooper, outputMIME.c_str(), true /* encoder */);
+
+ if (mEncoder == NULL) {
+ return NO_INIT;
+ }
+
+ ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
+
+ status_t err = mEncoder->configure(
+ mOutputFormat,
+ NULL /* nativeWindow */,
+ NULL /* crypto */,
+ MediaCodec::CONFIGURE_FLAG_ENCODE);
+
+ if (err != OK) {
+ return err;
+ }
+
+ mEncoder->getOutputFormat(&mOutputFormat);
+ convertMessageToMetaData(mOutputFormat, mMeta);
+
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ CHECK(mIsVideo);
+
+ err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ err = mEncoder->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ err = mEncoder->getInputBuffers(&mEncoderInputBuffers);
+
+ if (err != OK) {
+ return err;
+ }
+
+ err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+
+ if (err != OK) {
+ return err;
+ }
+
+ mEncoderReachedEOS = false;
+ mErrorCode = OK;
+
+ return OK;
+}
+
+void MediaCodecSource::releaseEncoder() {
+ if (mEncoder == NULL) {
+ return;
+ }
+
+ mEncoder->release();
+ mEncoder.clear();
+
+ while (!mInputBufferQueue.empty()) {
+ MediaBuffer *mbuf = *mInputBufferQueue.begin();
+ mInputBufferQueue.erase(mInputBufferQueue.begin());
+ if (mbuf != NULL) {
+ mbuf->release();
+ }
+ }
+
+ for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) {
+ sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i);
+ accessUnit->setMediaBufferBase(NULL);
+ }
+
+ mEncoderInputBuffers.clear();
+ mEncoderOutputBuffers.clear();
+}
+
+status_t MediaCodecSource::postSynchronouslyAndReturnError(
+ const sp<AMessage> &msg) {
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (!response->findInt32("err", &err)) {
+ err = OK;
+ }
+
+ return err;
+}
+
+void MediaCodecSource::signalEOS(status_t err) {
+ if (!mEncoderReachedEOS) {
+ ALOGV("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
+ {
+ Mutex::Autolock autoLock(mOutputBufferLock);
+ // release all unread media buffers
+ for (List<MediaBuffer*>::iterator it = mOutputBufferQueue.begin();
+ it != mOutputBufferQueue.end(); it++) {
+ (*it)->release();
+ }
+ mOutputBufferQueue.clear();
+ mEncoderReachedEOS = true;
+ mErrorCode = err;
+ mOutputBufferCond.signal();
+ }
+
+ releaseEncoder();
+ }
+ if (mStopping && mEncoderReachedEOS) {
+ ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
+ // posting reply to everyone that's waiting
+ List<uint32_t>::iterator it;
+ for (it = mStopReplyIDQueue.begin();
+ it != mStopReplyIDQueue.end(); it++) {
+ (new AMessage)->postReply(*it);
+ }
+ mStopReplyIDQueue.clear();
+ mStopping = false;
+ }
+}
+
+void MediaCodecSource::suspend() {
+ CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+ if (mEncoder != NULL) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("drop-input-frames", true);
+ mEncoder->setParameters(params);
+ }
+}
+
+void MediaCodecSource::resume(int64_t skipFramesBeforeUs) {
+ CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
+ if (mEncoder != NULL) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("drop-input-frames", false);
+ if (skipFramesBeforeUs > 0) {
+ params->setInt64("skip-frames-before", skipFramesBeforeUs);
+ }
+ mEncoder->setParameters(params);
+ }
+}
+
+void MediaCodecSource::scheduleDoMoreWork() {
+ if (mDoMoreWorkPending) {
+ return;
+ }
+
+ mDoMoreWorkPending = true;
+
+ if (mEncoderActivityNotify == NULL) {
+ mEncoderActivityNotify = new AMessage(
+ kWhatEncoderActivity, mReflector->id());
+ }
+ mEncoder->requestActivityNotification(mEncoderActivityNotify);
+}
+
+status_t MediaCodecSource::feedEncoderInputBuffers() {
+ while (!mInputBufferQueue.empty()
+ && !mAvailEncoderInputIndices.empty()) {
+ MediaBuffer* mbuf = *mInputBufferQueue.begin();
+ mInputBufferQueue.erase(mInputBufferQueue.begin());
+
+ size_t bufferIndex = *mAvailEncoderInputIndices.begin();
+ mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
+
+ int64_t timeUs = 0ll;
+ uint32_t flags = 0;
+ size_t size = 0;
+
+ if (mbuf != NULL) {
+ CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+
+ // push decoding time for video, or drift time for audio
+ if (mIsVideo) {
+ mDecodingTimeQueue.push_back(timeUs);
+ } else {
+#if DEBUG_DRIFT_TIME
+ if (mFirstSampleTimeUs < 0ll) {
+ mFirstSampleTimeUs = timeUs;
+ }
+
+ int64_t driftTimeUs = 0;
+ if (mbuf->meta_data()->findInt64(kKeyDriftTime, &driftTimeUs)
+ && driftTimeUs) {
+ driftTimeUs = timeUs - mFirstSampleTimeUs - driftTimeUs;
+ }
+ mDriftTimeQueue.push_back(driftTimeUs);
+#endif // DEBUG_DRIFT_TIME
+ }
+
+ size = mbuf->size();
+
+ memcpy(mEncoderInputBuffers.itemAt(bufferIndex)->data(),
+ mbuf->data(), size);
+
+ if (mIsVideo) {
+ // video encoder will release MediaBuffer when done
+ // with underlying data.
+ mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase(
+ mbuf);
+ } else {
+ mbuf->release();
+ }
+ } else {
+ flags = MediaCodec::BUFFER_FLAG_EOS;
+ }
+
+ status_t err = mEncoder->queueInputBuffer(
+ bufferIndex, 0, size, timeUs, flags);
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ return OK;
+}
+
+status_t MediaCodecSource::doMoreWork(int32_t numInput, int32_t numOutput) {
+ status_t err = OK;
+
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ while (numInput-- > 0) {
+ size_t bufferIndex;
+ err = mEncoder->dequeueInputBuffer(&bufferIndex);
+
+ if (err != OK) {
+ break;
+ }
+
+ mAvailEncoderInputIndices.push_back(bufferIndex);
+ }
+
+ feedEncoderInputBuffers();
+ }
+
+ while (numOutput-- > 0) {
+ size_t bufferIndex;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ uint32_t flags;
+ native_handle_t* handle = NULL;
+ err = mEncoder->dequeueOutputBuffer(
+ &bufferIndex, &offset, &size, &timeUs, &flags);
+
+ if (err != OK) {
+ if (err == INFO_FORMAT_CHANGED) {
+ continue;
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+ continue;
+ }
+
+ if (err == -EAGAIN) {
+ err = OK;
+ }
+ break;
+ }
+ if (!(flags & MediaCodec::BUFFER_FLAG_EOS)) {
+ sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+
+ MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
+ memcpy(mbuf->data(), outbuf->data(), outbuf->size());
+
+ if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
+ if (mIsVideo) {
+ int64_t decodingTimeUs;
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ // GraphicBufferSource is supposed to discard samples
+ // queued before start, and offset timeUs by start time
+ CHECK_GE(timeUs, 0ll);
+ // TODO:
+ // Decoding time for surface source is unavailable,
+ // use presentation time for now. May need to move
+ // this logic into MediaCodec.
+ decodingTimeUs = timeUs;
+ } else {
+ CHECK(!mDecodingTimeQueue.empty());
+ decodingTimeUs = *(mDecodingTimeQueue.begin());
+ mDecodingTimeQueue.erase(mDecodingTimeQueue.begin());
+ }
+ mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
+
+ ALOGV("[video] time %" PRId64 " us (%.2f secs), dts/pts diff %" PRId64,
+ timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
+ } else {
+ int64_t driftTimeUs = 0;
+#if DEBUG_DRIFT_TIME
+ CHECK(!mDriftTimeQueue.empty());
+ driftTimeUs = *(mDriftTimeQueue.begin());
+ mDriftTimeQueue.erase(mDriftTimeQueue.begin());
+ mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
+#endif // DEBUG_DRIFT_TIME
+ ALOGV("[audio] time %" PRId64 " us (%.2f secs), drift %" PRId64,
+ timeUs, timeUs / 1E6, driftTimeUs);
+ }
+ mbuf->meta_data()->setInt64(kKeyTime, timeUs);
+ } else {
+ mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
+ }
+ if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
+ mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
+ }
+ mbuf->setObserver(this);
+ mbuf->add_ref();
+
+ {
+ Mutex::Autolock autoLock(mOutputBufferLock);
+ mOutputBufferQueue.push_back(mbuf);
+ mOutputBufferCond.signal();
+ }
+ }
+
+ mEncoder->releaseOutputBuffer(bufferIndex);
+
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ err = ERROR_END_OF_STREAM;
+ break;
+ }
+ }
+
+ return err;
+}
+
+status_t MediaCodecSource::onStart(MetaData *params) {
+ if (mStopping) {
+ ALOGE("Failed to start while we're stopping");
+ return INVALID_OPERATION;
+ }
+
+ if (mStarted) {
+ ALOGI("MediaCodecSource (%s) resuming", mIsVideo ? "video" : "audio");
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ resume();
+ } else {
+ CHECK(mPuller != NULL);
+ mPuller->resume();
+ }
+ return OK;
+ }
+
+ ALOGI("MediaCodecSource (%s) starting", mIsVideo ? "video" : "audio");
+
+ status_t err = OK;
+
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ int64_t startTimeUs;
+ if (!params || !params->findInt64(kKeyTime, &startTimeUs)) {
+ startTimeUs = -1ll;
+ }
+ resume(startTimeUs);
+ scheduleDoMoreWork();
+ } else {
+ CHECK(mPuller != NULL);
+ sp<AMessage> notify = new AMessage(
+ kWhatPullerNotify, mReflector->id());
+ err = mPuller->start(params, notify);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ ALOGI("MediaCodecSource (%s) started", mIsVideo ? "video" : "audio");
+
+ mStarted = true;
+ return OK;
+}
+
+void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatPullerNotify:
+ {
+ MediaBuffer *mbuf;
+ CHECK(msg->findPointer("accessUnit", (void**)&mbuf));
+
+ if (mbuf == NULL) {
+ ALOGV("puller (%s) reached EOS",
+ mIsVideo ? "video" : "audio");
+ signalEOS();
+ }
+
+ if (mEncoder == NULL) {
+ ALOGV("got msg '%s' after encoder shutdown.",
+ msg->debugString().c_str());
+
+ if (mbuf != NULL) {
+ mbuf->release();
+ }
+
+ break;
+ }
+
+ mInputBufferQueue.push_back(mbuf);
+
+ feedEncoderInputBuffers();
+ scheduleDoMoreWork();
+
+ break;
+ }
+ case kWhatEncoderActivity:
+ {
+ mDoMoreWorkPending = false;
+
+ if (mEncoder == NULL) {
+ break;
+ }
+
+ int32_t numInput, numOutput;
+
+ if (!msg->findInt32("input-buffers", &numInput)) {
+ numInput = INT32_MAX;
+ }
+ if (!msg->findInt32("output-buffers", &numOutput)) {
+ numOutput = INT32_MAX;
+ }
+
+ status_t err = doMoreWork(numInput, numOutput);
+
+ if (err == OK) {
+ scheduleDoMoreWork();
+ } else {
+ // reached EOS, or error
+ signalEOS(err);
+ }
+
+ break;
+ }
+ case kWhatStart:
+ {
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ sp<RefBase> obj;
+ CHECK(msg->findObject("meta", &obj));
+ MetaData *params = static_cast<MetaData *>(obj.get());
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", onStart(params));
+ response->postReply(replyID);
+ break;
+ }
+ case kWhatStop:
+ {
+ ALOGI("encoder (%s) stopping", mIsVideo ? "video" : "audio");
+
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ if (mEncoderReachedEOS) {
+ // if we already reached EOS, reply and return now
+ ALOGI("encoder (%s) already stopped",
+ mIsVideo ? "video" : "audio");
+ (new AMessage)->postReply(replyID);
+ break;
+ }
+
+ mStopReplyIDQueue.push_back(replyID);
+ if (mStopping) {
+ // nothing to do if we're already stopping, reply will be posted
+ // to all when we're stopped.
+ break;
+ }
+
+ mStopping = true;
+
+ // if using surface, signal source EOS and wait for EOS to come back.
+ // otherwise, release encoder and post EOS if haven't done already
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ mEncoder->signalEndOfInputStream();
+ } else {
+ signalEOS();
+ }
+ break;
+ }
+ case kWhatPause:
+ {
+ if (mFlags && FLAG_USE_SURFACE_INPUT) {
+ suspend();
+ } else {
+ CHECK(mPuller != NULL);
+ mPuller->pause();
+ }
+ break;
+ }
+ default:
+ TRESPASS();
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 340cba7..d48dd84 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -23,6 +23,7 @@ const char *MEDIA_MIMETYPE_IMAGE_JPEG = "image/jpeg";
const char *MEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
const char *MEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
const char *MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
+const char *MEDIA_MIMETYPE_VIDEO_HEVC = "video/hevc";
const char *MEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
@@ -36,6 +37,7 @@ const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II = "audio/mpeg-L2";
const char *MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm";
const char *MEDIA_MIMETYPE_AUDIO_QCELP = "audio/qcelp";
const char *MEDIA_MIMETYPE_AUDIO_VORBIS = "audio/vorbis";
+const char *MEDIA_MIMETYPE_AUDIO_OPUS = "audio/opus";
const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW = "audio/g711-alaw";
const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw";
const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw";
@@ -56,5 +58,7 @@ const char *MEDIA_MIMETYPE_CONTAINER_WVM = "video/wvm";
const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
+const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
+const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
} // namespace android
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index d87e910..c7c6f34 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -16,6 +16,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaMuxer"
+
+#include "webm/WebmWriter.h"
+
#include <utils/Log.h>
#include <media/stagefright/MediaMuxer.h>
@@ -36,19 +39,30 @@
namespace android {
MediaMuxer::MediaMuxer(const char *path, OutputFormat format)
- : mState(UNINITIALIZED) {
+ : mFormat(format),
+ mState(UNINITIALIZED) {
if (format == OUTPUT_FORMAT_MPEG_4) {
mWriter = new MPEG4Writer(path);
+ } else if (format == OUTPUT_FORMAT_WEBM) {
+ mWriter = new WebmWriter(path);
+ }
+
+ if (mWriter != NULL) {
mFileMeta = new MetaData;
mState = INITIALIZED;
}
-
}
MediaMuxer::MediaMuxer(int fd, OutputFormat format)
- : mState(UNINITIALIZED) {
+ : mFormat(format),
+ mState(UNINITIALIZED) {
if (format == OUTPUT_FORMAT_MPEG_4) {
mWriter = new MPEG4Writer(fd);
+ } else if (format == OUTPUT_FORMAT_WEBM) {
+ mWriter = new WebmWriter(fd);
+ }
+
+ if (mWriter != NULL) {
mFileMeta = new MetaData;
mState = INITIALIZED;
}
@@ -109,8 +123,13 @@ status_t MediaMuxer::setLocation(int latitude, int longitude) {
ALOGE("setLocation() must be called before start().");
return INVALID_OPERATION;
}
+ if (mFormat != OUTPUT_FORMAT_MPEG_4) {
+ ALOGE("setLocation() is only supported for .mp4 output.");
+ return INVALID_OPERATION;
+ }
+
ALOGV("Setting location: latitude = %d, longitude = %d", latitude, longitude);
- return mWriter->setGeoData(latitude, longitude);
+ return static_cast<MPEG4Writer*>(mWriter.get())->setGeoData(latitude, longitude);
}
status_t MediaMuxer::start() {
@@ -157,7 +176,7 @@ status_t MediaMuxer::writeSampleData(const sp<ABuffer> &buffer, size_t trackInde
}
if (trackIndex >= mTrackList.size()) {
- ALOGE("WriteSampleData() get an invalid index %d", trackIndex);
+ ALOGE("WriteSampleData() get an invalid index %zu", trackIndex);
return -EINVAL;
}
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index fd0e79c..576471a 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -32,6 +32,19 @@ void MediaSource::ReadOptions::reset() {
mOptions = 0;
mSeekTimeUs = 0;
mLatenessUs = 0;
+ mNonBlocking = false;
+}
+
+void MediaSource::ReadOptions::setNonBlocking() {
+ mNonBlocking = true;
+}
+
+void MediaSource::ReadOptions::clearNonBlocking() {
+ mNonBlocking = false;
+}
+
+bool MediaSource::ReadOptions::getNonBlocking() const {
+ return mNonBlocking;
}
void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index 1daead7..74234a6 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MetaData"
+#include <inttypes.h>
#include <utils/Log.h>
#include <stdlib.h>
@@ -307,7 +308,7 @@ String8 MetaData::typed_data::asString() const {
const void *data = storage();
switch(mType) {
case TYPE_NONE:
- out = String8::format("no type, size %d)", mSize);
+ out = String8::format("no type, size %zu)", mSize);
break;
case TYPE_C_STRING:
out = String8::format("(char*) %s", (const char *)data);
@@ -316,7 +317,7 @@ String8 MetaData::typed_data::asString() const {
out = String8::format("(int32_t) %d", *(int32_t *)data);
break;
case TYPE_INT64:
- out = String8::format("(int64_t) %lld", *(int64_t *)data);
+ out = String8::format("(int64_t) %" PRId64, *(int64_t *)data);
break;
case TYPE_FLOAT:
out = String8::format("(float) %f", *(float *)data);
@@ -333,7 +334,7 @@ String8 MetaData::typed_data::asString() const {
}
default:
- out = String8::format("(unknown type %d, size %d)", mType, mSize);
+ out = String8::format("(unknown type %d, size %zu)", mType, mSize);
if (mSize <= 48) { // if it's less than three lines of hex data, dump it
AString foo;
hexdump(data, mSize, 0, &foo);
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 05e599b..bd0a41d 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "NuCachedSource2"
#include <utils/Log.h>
@@ -135,7 +137,7 @@ size_t PageCache::releaseFromStart(size_t maxBytes) {
}
void PageCache::copy(size_t from, void *data, size_t size) {
- ALOGV("copy from %d size %d", from, size);
+ ALOGV("copy from %zu size %zu", from, size);
if (size == 0) {
return;
@@ -189,6 +191,7 @@ NuCachedSource2::NuCachedSource2(
mFinalStatus(OK),
mLastAccessPos(0),
mFetching(true),
+ mDisconnecting(false),
mLastFetchTimeUs(-1),
mNumRetriesLeft(kMaxNumRetries),
mHighwaterThresholdBytes(kDefaultHighWaterThreshold),
@@ -213,7 +216,14 @@ NuCachedSource2::NuCachedSource2(
mLooper->setName("NuCachedSource2");
mLooper->registerHandler(mReflector);
- mLooper->start();
+
+ // Since it may not be obvious why our looper thread needs to be
+ // able to call into java since it doesn't appear to do so at all...
+ // IMediaHTTPConnection may be (and most likely is) implemented in JAVA
+ // and a local JAVA IBinder will call directly into JNI methods.
+ // So whenever we call DataSource::readAt it may end up in a call to
+ // IMediaHTTPConnection::readAt and therefore call back into JAVA.
+ mLooper->start(false /* runOnCallingThread */, true /* canCallJava */);
Mutex::Autolock autoLock(mLock);
(new AMessage(kWhatFetchMore, mReflector->id()))->post();
@@ -235,6 +245,27 @@ status_t NuCachedSource2::getEstimatedBandwidthKbps(int32_t *kbps) {
return ERROR_UNSUPPORTED;
}
+void NuCachedSource2::disconnect() {
+ if (mSource->flags() & kIsHTTPBasedSource) {
+ ALOGV("disconnecting HTTPBasedSource");
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ // set mDisconnecting to true, if a fetch returns after
+ // this, the source will be marked as EOS.
+ mDisconnecting = true;
+
+ // explicitly signal mCondition so that the pending readAt()
+ // will immediately return
+ mCondition.signal();
+ }
+
+ // explicitly disconnect from the source, to allow any
+ // pending reads to return more promptly
+ static_cast<HTTPBase *>(mSource.get())->disconnect();
+ }
+}
+
status_t NuCachedSource2::setCacheStatCollectFreq(int32_t freqMs) {
if (mSource->flags() & kIsHTTPBasedSource) {
HTTPBase *source = static_cast<HTTPBase *>(mSource.get());
@@ -298,7 +329,11 @@ void NuCachedSource2::fetchInternal() {
Mutex::Autolock autoLock(mLock);
- if (err == ERROR_UNSUPPORTED || err == -EPIPE) {
+ if (mDisconnecting) {
+ mNumRetriesLeft = 0;
+ mFinalStatus = ERROR_END_OF_STREAM;
+ return;
+ } else if (err == ERROR_UNSUPPORTED || err == -EPIPE) {
// These are errors that are not likely to go away even if we
// retry, i.e. the server doesn't support range requests or similar.
mNumRetriesLeft = 0;
@@ -318,7 +353,14 @@ void NuCachedSource2::fetchInternal() {
Mutex::Autolock autoLock(mLock);
- if (n < 0) {
+ if (n == 0 || mDisconnecting) {
+ ALOGI("ERROR_END_OF_STREAM");
+
+ mNumRetriesLeft = 0;
+ mFinalStatus = ERROR_END_OF_STREAM;
+
+ mCache->releasePage(page);
+ } else if (n < 0) {
mFinalStatus = n;
if (n == ERROR_UNSUPPORTED || n == -EPIPE) {
// These are errors that are not likely to go away even if we
@@ -326,14 +368,7 @@ void NuCachedSource2::fetchInternal() {
mNumRetriesLeft = 0;
}
- ALOGE("source returned error %ld, %d retries left", n, mNumRetriesLeft);
- mCache->releasePage(page);
- } else if (n == 0) {
- ALOGI("ERROR_END_OF_STREAM");
-
- mNumRetriesLeft = 0;
- mFinalStatus = ERROR_END_OF_STREAM;
-
+ ALOGE("source returned error %zd, %d retries left", n, mNumRetriesLeft);
mCache->releasePage(page);
} else {
if (mFinalStatus != OK) {
@@ -421,6 +456,10 @@ void NuCachedSource2::onRead(const sp<AMessage> &msg) {
}
Mutex::Autolock autoLock(mLock);
+ if (mDisconnecting) {
+ mCondition.signal();
+ return;
+ }
CHECK(mAsyncResult == NULL);
@@ -457,16 +496,19 @@ void NuCachedSource2::restartPrefetcherIfNecessary_l(
size_t actualBytes = mCache->releaseFromStart(maxBytes);
mCacheOffset += actualBytes;
- ALOGI("restarting prefetcher, totalSize = %d", mCache->totalSize());
+ ALOGI("restarting prefetcher, totalSize = %zu", mCache->totalSize());
mFetching = true;
}
ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) {
Mutex::Autolock autoSerializer(mSerializer);
- ALOGV("readAt offset %lld, size %d", offset, size);
+ ALOGV("readAt offset %lld, size %zu", offset, size);
Mutex::Autolock autoLock(mLock);
+ if (mDisconnecting) {
+ return ERROR_END_OF_STREAM;
+ }
// If the request can be completely satisfied from the cache, do so.
@@ -488,10 +530,15 @@ ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) {
CHECK(mAsyncResult == NULL);
msg->post();
- while (mAsyncResult == NULL) {
+ while (mAsyncResult == NULL && !mDisconnecting) {
mCondition.wait(mLock);
}
+ if (mDisconnecting) {
+ mAsyncResult.clear();
+ return ERROR_END_OF_STREAM;
+ }
+
int32_t result;
CHECK(mAsyncResult->findInt32("result", &result));
@@ -532,7 +579,7 @@ size_t NuCachedSource2::approxDataRemaining_l(status_t *finalStatus) const {
ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) {
CHECK_LE(size, (size_t)mHighwaterThresholdBytes);
- ALOGV("readInternal offset %lld size %d", offset, size);
+ ALOGV("readInternal offset %lld size %zu", offset, size);
Mutex::Autolock autoLock(mLock);
@@ -641,7 +688,7 @@ void NuCachedSource2::updateCacheParamsFromString(const char *s) {
ssize_t lowwaterMarkKb, highwaterMarkKb;
int keepAliveSecs;
- if (sscanf(s, "%ld/%ld/%d",
+ if (sscanf(s, "%zd/%zd/%d",
&lowwaterMarkKb, &highwaterMarkKb, &keepAliveSecs) != 3) {
ALOGE("Failed to parse cache parameters from '%s'.", s);
return;
@@ -672,7 +719,7 @@ void NuCachedSource2::updateCacheParamsFromString(const char *s) {
mKeepAliveIntervalUs = kDefaultKeepAliveIntervalUs;
}
- ALOGV("lowwater = %d bytes, highwater = %d bytes, keepalive = %lld us",
+ ALOGV("lowwater = %zu bytes, highwater = %zu bytes, keepalive = %" PRId64 " us",
mLowwaterThresholdBytes,
mHighwaterThresholdBytes,
mKeepAliveIntervalUs);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 7bc7da2..f24cf3a 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -58,7 +58,9 @@ NuMediaExtractor::~NuMediaExtractor() {
}
status_t NuMediaExtractor::setDataSource(
- const char *path, const KeyedVector<String8, String8> *headers) {
+ const sp<IMediaHTTPService> &httpService,
+ const char *path,
+ const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
if (mImpl != NULL) {
@@ -66,7 +68,7 @@ status_t NuMediaExtractor::setDataSource(
}
sp<DataSource> dataSource =
- DataSource::CreateFromURI(path, headers);
+ DataSource::CreateFromURI(httpService, path, headers);
if (dataSource == NULL) {
return -ENOENT;
@@ -387,7 +389,7 @@ ssize_t NuMediaExtractor::fetchTrackSamples(
info->mFinalResult = err;
if (info->mFinalResult != ERROR_END_OF_STREAM) {
- ALOGW("read on track %d failed with error %d",
+ ALOGW("read on track %zu failed with error %d",
info->mTrackIndex, err);
}
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 9f9352d..ca031aa 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -16,6 +16,11 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "OMXClient"
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
#include <utils/Log.h>
#include <binder/IServiceManager.h>
@@ -73,6 +78,10 @@ struct MuxOMX : public IOMX {
node_id node, OMX_U32 port_index, OMX_BOOL enable,
OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
+ virtual status_t configureVideoTunnelMode(
+ node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
+
virtual status_t enableGraphicBuffers(
node_id node, OMX_U32 port_index, OMX_BOOL enable);
@@ -141,7 +150,7 @@ private:
const sp<IOMX> &getOMX(node_id node) const;
const sp<IOMX> &getOMX_l(node_id node) const;
- static bool IsSoftwareComponent(const char *name);
+ static bool CanLiveLocally(const char *name);
DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
};
@@ -164,8 +173,15 @@ bool MuxOMX::isLocalNode_l(node_id node) const {
}
// static
-bool MuxOMX::IsSoftwareComponent(const char *name) {
+bool MuxOMX::CanLiveLocally(const char *name) {
+#ifdef __LP64__
+ (void)name; // disable unused parameter warning
+ // 64 bit processes always run OMX remote on MediaServer
+ return false;
+#else
+ // 32 bit processes run only OMX.google.* components locally
return !strncasecmp(name, "OMX.google.", 11);
+#endif
}
const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
@@ -197,7 +213,7 @@ status_t MuxOMX::allocateNode(
sp<IOMX> omx;
- if (IsSoftwareComponent(name)) {
+ if (CanLiveLocally(name)) {
if (mLocalOMX == NULL) {
mLocalOMX = new OMX;
}
@@ -279,6 +295,13 @@ status_t MuxOMX::prepareForAdaptivePlayback(
node, port_index, enable, maxFrameWidth, maxFrameHeight);
}
+status_t MuxOMX::configureVideoTunnelMode(
+ node_id node, OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ return getOMX(node)->configureVideoTunnelMode(
+ node, portIndex, enable, audioHwSync, sidebandHandle);
+}
+
status_t MuxOMX::enableGraphicBuffers(
node_id node, OMX_U32 port_index, OMX_BOOL enable) {
return getOMX(node)->enableGraphicBuffers(node, port_index, enable);
@@ -382,7 +405,7 @@ status_t OMXClient::connect() {
mOMX = service->getOMX();
CHECK(mOMX.get() != NULL);
- if (!mOMX->livesLocally(NULL /* node */, getpid())) {
+ if (!mOMX->livesLocally(0 /* node */, getpid())) {
ALOGI("Using client-side OMX mux.");
mOMX = new MuxOMX(mOMX);
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 625922f..a8806c8 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -14,8 +14,15 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "OMXCodec"
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
#include <utils/Log.h>
#include "include/AACEncoder.h"
@@ -28,6 +35,7 @@
#include <HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/IMediaPlayerService.h>
+#include <media/stagefright/ACodec.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
@@ -94,6 +102,7 @@ static sp<MediaSource> InstantiateSoftwareEncoder(
#define CODEC_LOGI(x, ...) ALOGI("[%s] "x, mComponentName, ##__VA_ARGS__)
#define CODEC_LOGV(x, ...) ALOGV("[%s] "x, mComponentName, ##__VA_ARGS__)
+#define CODEC_LOGW(x, ...) ALOGW("[%s] "x, mComponentName, ##__VA_ARGS__)
#define CODEC_LOGE(x, ...) ALOGE("[%s] "x, mComponentName, ##__VA_ARGS__)
struct OMXCodecObserver : public BnOMXObserver {
@@ -127,6 +136,7 @@ private:
template<class T>
static void InitOMXParams(T *params) {
+ COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(OMX_PTR) == 4); // check OMX_PTR is 4 bytes.
params->nSize = sizeof(T);
params->nVersion.s.nVersionMajor = 1;
params->nVersion.s.nVersionMinor = 0;
@@ -188,7 +198,7 @@ void OMXCodec::findMatchingCodecs(
Vector<CodecNameAndQuirks> *matchingCodecs) {
matchingCodecs->clear();
- const MediaCodecList *list = MediaCodecList::getInstance();
+ const sp<IMediaCodecList> list = MediaCodecList::getInstance();
if (list == NULL) {
return;
}
@@ -204,7 +214,9 @@ void OMXCodec::findMatchingCodecs(
index = matchIndex + 1;
- const char *componentName = list->getCodecName(matchIndex);
+ const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
+ CHECK(info != NULL);
+ const char *componentName = info->getCodecName();
// If a specific codec is requested, skip the non-matching ones.
if (matchComponentName && strcmp(componentName, matchComponentName)) {
@@ -222,7 +234,7 @@ void OMXCodec::findMatchingCodecs(
ssize_t index = matchingCodecs->add();
CodecNameAndQuirks *entry = &matchingCodecs->editItemAt(index);
entry->mName = String8(componentName);
- entry->mQuirks = getComponentQuirks(list, matchIndex);
+ entry->mQuirks = getComponentQuirks(info);
ALOGV("matching '%s' quirks 0x%08x",
entry->mName.string(), entry->mQuirks);
@@ -236,18 +248,15 @@ void OMXCodec::findMatchingCodecs(
// static
uint32_t OMXCodec::getComponentQuirks(
- const MediaCodecList *list, size_t index) {
+ const sp<MediaCodecInfo> &info) {
uint32_t quirks = 0;
- if (list->codecHasQuirk(
- index, "requires-allocate-on-input-ports")) {
+ if (info->hasQuirk("requires-allocate-on-input-ports")) {
quirks |= kRequiresAllocateBufferOnInputPorts;
}
- if (list->codecHasQuirk(
- index, "requires-allocate-on-output-ports")) {
+ if (info->hasQuirk("requires-allocate-on-output-ports")) {
quirks |= kRequiresAllocateBufferOnOutputPorts;
}
- if (list->codecHasQuirk(
- index, "output-buffers-are-unreadable")) {
+ if (info->hasQuirk("output-buffers-are-unreadable")) {
quirks |= kOutputBuffersAreUnreadable;
}
@@ -256,8 +265,7 @@ uint32_t OMXCodec::getComponentQuirks(
// static
bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) {
- const MediaCodecList *list = MediaCodecList::getInstance();
-
+ const sp<IMediaCodecList> list = MediaCodecList::getInstance();
if (list == NULL) {
return false;
}
@@ -268,7 +276,9 @@ bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) {
return false;
}
- *quirks = getComponentQuirks(list, index);
+ const sp<MediaCodecInfo> info = list->getCodecInfo(index);
+ CHECK(info != NULL);
+ *quirks = getComponentQuirks(info);
return true;
}
@@ -372,6 +382,57 @@ sp<MediaSource> OMXCodec::Create(
return NULL;
}
+status_t OMXCodec::parseHEVCCodecSpecificData(
+ const void *data, size_t size,
+ unsigned *profile, unsigned *level) {
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ // verify minimum size and configurationVersion == 1.
+ if (size < 7 || ptr[0] != 1) {
+ return ERROR_MALFORMED;
+ }
+
+ *profile = (ptr[1] & 31);
+ *level = ptr[12];
+
+ ptr += 22;
+ size -= 22;
+
+ size_t numofArrays = (char)ptr[0];
+ ptr += 1;
+ size -= 1;
+ size_t j = 0, i = 0;
+ for (i = 0; i < numofArrays; i++) {
+ ptr += 1;
+ size -= 1;
+
+ // Num of nals
+ size_t numofNals = U16_AT(ptr);
+ ptr += 2;
+ size -= 2;
+
+ for (j = 0;j < numofNals;j++) {
+ if (size < 2) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t length = U16_AT(ptr);
+
+ ptr += 2;
+ size -= 2;
+
+ if (size < length) {
+ return ERROR_MALFORMED;
+ }
+ addCodecSpecificData(ptr, length);
+
+ ptr += length;
+ size -= length;
+ }
+ }
+ return OK;
+}
+
status_t OMXCodec::parseAVCCodecSpecificData(
const void *data, size_t size,
unsigned *profile, unsigned *level) {
@@ -484,11 +545,32 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CODEC_LOGI(
"AVC profile = %u (%s), level = %u",
profile, AVCProfileToString(profile), level);
+ } else if (meta->findData(kKeyHVCC, &type, &data, &size)) {
+ // Parse the HEVCDecoderConfigurationRecord
+
+ unsigned profile, level;
+ status_t err;
+ if ((err = parseHEVCCodecSpecificData(
+ data, size, &profile, &level)) != OK) {
+ ALOGE("Malformed HEVC codec specific data.");
+ return err;
+ }
+
+ CODEC_LOGI(
+ "HEVC profile = %u , level = %u",
+ profile, level);
} else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
addCodecSpecificData(data, size);
CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
addCodecSpecificData(data, size);
+ } else if (meta->findData(kKeyOpusHeader, &type, &data, &size)) {
+ addCodecSpecificData(data, size);
+
+ CHECK(meta->findData(kKeyOpusCodecDelay, &type, &data, &size));
+ addCodecSpecificData(data, size);
+ CHECK(meta->findData(kKeyOpusSeekPreRoll, &type, &data, &size));
+ addCodecSpecificData(data, size);
}
}
@@ -679,7 +761,7 @@ status_t OMXCodec::setVideoPortFormatType(
// CHECK_EQ(format.nIndex, index);
#if 1
- CODEC_LOGV("portIndex: %ld, index: %ld, eCompressionFormat=%d eColorFormat=%d",
+ CODEC_LOGV("portIndex: %u, index: %u, eCompressionFormat=%d eColorFormat=%d",
portIndex,
index, format.eCompressionFormat, format.eColorFormat);
#endif
@@ -781,7 +863,7 @@ status_t OMXCodec::isColorFormatSupported(
portFormat.nIndex = index;
if (index >= kMaxColorFormatSupported) {
- CODEC_LOGE("More than %ld color formats are supported???", index);
+ CODEC_LOGE("More than %u color formats are supported???", index);
break;
}
}
@@ -806,6 +888,8 @@ void OMXCodec::setVideoInputFormat(
OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
compressionFormat = OMX_VIDEO_CodingAVC;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+ compressionFormat = OMX_VIDEO_CodingHEVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
@@ -910,7 +994,6 @@ static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) {
return 0;
}
OMX_U32 ret = frameRate * iFramesInterval - 1;
- CHECK(ret > 1);
return ret;
}
@@ -1201,6 +1284,8 @@ status_t OMXCodec::setVideoOutputFormat(
compressionFormat = OMX_VIDEO_CodingAVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG4;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+ compressionFormat = OMX_VIDEO_CodingHEVC;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP8, mime)) {
@@ -1387,12 +1472,16 @@ void OMXCodec::setComponentRole(
"audio_decoder.aac", "audio_encoder.aac" },
{ MEDIA_MIMETYPE_AUDIO_VORBIS,
"audio_decoder.vorbis", "audio_encoder.vorbis" },
+ { MEDIA_MIMETYPE_AUDIO_OPUS,
+ "audio_decoder.opus", "audio_encoder.opus" },
{ MEDIA_MIMETYPE_AUDIO_G711_MLAW,
"audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW,
"audio_decoder.g711alaw", "audio_encoder.g711alaw" },
{ MEDIA_MIMETYPE_VIDEO_AVC,
"video_decoder.avc", "video_encoder.avc" },
+ { MEDIA_MIMETYPE_VIDEO_HEVC,
+ "video_decoder.hevc", "video_encoder.hevc" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4,
"video_decoder.mpeg4", "video_encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_H263,
@@ -1462,7 +1551,7 @@ OMXCodec::~OMXCodec() {
status_t err = mOMX->freeNode(mNode);
CHECK_EQ(err, (status_t)OK);
- mNode = NULL;
+ mNode = 0;
setState(DEAD);
clearCodecSpecificData();
@@ -1615,15 +1704,15 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
info.mMediaBuffer = NULL;
if (portIndex == kPortIndexOutput) {
- if (!(mOMXLivesLocally
- && (mQuirks & kRequiresAllocateBufferOnOutputPorts)
- && (mQuirks & kDefersOutputBufferAllocation))) {
- // If the node does not fill in the buffer ptr at this time,
- // we will defer creating the MediaBuffer until receiving
- // the first FILL_BUFFER_DONE notification instead.
- info.mMediaBuffer = new MediaBuffer(info.mData, info.mSize);
- info.mMediaBuffer->setObserver(this);
- }
+ // Fail deferred MediaBuffer creation until FILL_BUFFER_DONE;
+ // this legacy mode is no longer supported.
+ LOG_ALWAYS_FATAL_IF((mOMXLivesLocally
+ && (mQuirks & kRequiresAllocateBufferOnOutputPorts)
+ && (mQuirks & kDefersOutputBufferAllocation)),
+ "allocateBuffersOnPort cannot defer buffer allocation");
+
+ info.mMediaBuffer = new MediaBuffer(info.mData, info.mSize);
+ info.mMediaBuffer->setObserver(this);
}
mPortBuffers[portIndex].push(info);
@@ -1794,21 +1883,42 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
strerror(-err), -err);
return err;
}
-
- // XXX: Is this the right logic to use? It's not clear to me what the OMX
- // buffer counts refer to - how do they account for the renderer holding on
- // to buffers?
- if (def.nBufferCountActual < def.nBufferCountMin + minUndequeuedBufs) {
- OMX_U32 newBufferCount = def.nBufferCountMin + minUndequeuedBufs;
+ // FIXME: assume that surface is controlled by app (native window
+ // returns the number for the case when surface is not controlled by app)
+ // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+ // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
+
+ // Use conservative allocation while also trying to reduce starvation
+ //
+ // 1. allocate at least nBufferCountMin + minUndequeuedBuffers - that is the
+ // minimum needed for the consumer to be able to work
+ // 2. try to allocate two (2) additional buffers to reduce starvation from
+ // the consumer
+ // plus an extra buffer to account for incorrect minUndequeuedBufs
+ CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
+ def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
+
+ for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
+ OMX_U32 newBufferCount =
+ def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
def.nBufferCountActual = newBufferCount;
err = mOMX->setParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
- if (err != OK) {
- CODEC_LOGE("setting nBufferCountActual to %lu failed: %d",
- newBufferCount, err);
+
+ if (err == OK) {
+ minUndequeuedBufs += extraBuffers;
+ break;
+ }
+
+ CODEC_LOGW("setting nBufferCountActual to %u failed: %d",
+ newBufferCount, err);
+ /* exit condition */
+ if (extraBuffers == 0) {
return err;
}
}
+ CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
+ def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
err = native_window_set_buffer_count(
mNativeWindow.get(), def.nBufferCountActual);
@@ -1818,7 +1928,7 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
return err;
}
- CODEC_LOGV("allocating %lu buffers from a native window of size %lu on "
+ CODEC_LOGV("allocating %u buffers from a native window of size %u on "
"output port", def.nBufferCountActual, def.nBufferSize);
// Dequeue buffers and send them to OMX
@@ -1851,7 +1961,7 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
mPortBuffers[kPortIndexOutput].editItemAt(i).mBuffer = bufferId;
- CODEC_LOGV("registered graphic buffer with ID %p (pointer = %p)",
+ CODEC_LOGV("registered graphic buffer with ID %u (pointer = %p)",
bufferId, graphicBuffer.get());
}
@@ -1878,7 +1988,7 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
status_t OMXCodec::cancelBufferToNativeWindow(BufferInfo *info) {
CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
- CODEC_LOGV("Calling cancelBuffer on buffer %p", info->mBuffer);
+ CODEC_LOGV("Calling cancelBuffer on buffer %u", info->mBuffer);
int err = mNativeWindow->cancelBuffer(
mNativeWindow.get(), info->mMediaBuffer->graphicBuffer().get(), -1);
if (err != 0) {
@@ -2116,7 +2226,7 @@ void OMXCodec::on_message(const omx_message &msg) {
{
IOMX::buffer_id buffer = msg.u.extended_buffer_data.buffer;
- CODEC_LOGV("EMPTY_BUFFER_DONE(buffer: %p)", buffer);
+ CODEC_LOGV("EMPTY_BUFFER_DONE(buffer: %u)", buffer);
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
size_t i = 0;
@@ -2126,7 +2236,7 @@ void OMXCodec::on_message(const omx_message &msg) {
CHECK(i < buffers->size());
if ((*buffers)[i].mStatus != OWNED_BY_COMPONENT) {
- ALOGW("We already own input buffer %p, yet received "
+ ALOGW("We already own input buffer %u, yet received "
"an EMPTY_BUFFER_DONE.", buffer);
}
@@ -2140,7 +2250,7 @@ void OMXCodec::on_message(const omx_message &msg) {
}
if (mPortStatus[kPortIndexInput] == DISABLING) {
- CODEC_LOGV("Port is disabled, freeing buffer %p", buffer);
+ CODEC_LOGV("Port is disabled, freeing buffer %u", buffer);
status_t err = freeBuffer(kPortIndexInput, i);
CHECK_EQ(err, (status_t)OK);
@@ -2162,7 +2272,7 @@ void OMXCodec::on_message(const omx_message &msg) {
IOMX::buffer_id buffer = msg.u.extended_buffer_data.buffer;
OMX_U32 flags = msg.u.extended_buffer_data.flags;
- CODEC_LOGV("FILL_BUFFER_DONE(buffer: %p, size: %ld, flags: 0x%08lx, timestamp: %lld us (%.2f secs))",
+ CODEC_LOGV("FILL_BUFFER_DONE(buffer: %u, size: %u, flags: 0x%08x, timestamp: %lld us (%.2f secs))",
buffer,
msg.u.extended_buffer_data.range_length,
flags,
@@ -2179,14 +2289,14 @@ void OMXCodec::on_message(const omx_message &msg) {
BufferInfo *info = &buffers->editItemAt(i);
if (info->mStatus != OWNED_BY_COMPONENT) {
- ALOGW("We already own output buffer %p, yet received "
+ ALOGW("We already own output buffer %u, yet received "
"a FILL_BUFFER_DONE.", buffer);
}
info->mStatus = OWNED_BY_US;
if (mPortStatus[kPortIndexOutput] == DISABLING) {
- CODEC_LOGV("Port is disabled, freeing buffer %p", buffer);
+ CODEC_LOGV("Port is disabled, freeing buffer %u", buffer);
status_t err = freeBuffer(kPortIndexOutput, i);
CHECK_EQ(err, (status_t)OK);
@@ -2201,22 +2311,6 @@ void OMXCodec::on_message(const omx_message &msg) {
} else if (mPortStatus[kPortIndexOutput] != SHUTTING_DOWN) {
CHECK_EQ((int)mPortStatus[kPortIndexOutput], (int)ENABLED);
- if (info->mMediaBuffer == NULL) {
- CHECK(mOMXLivesLocally);
- CHECK(mQuirks & kRequiresAllocateBufferOnOutputPorts);
- CHECK(mQuirks & kDefersOutputBufferAllocation);
-
- // The qcom video decoders on Nexus don't actually allocate
- // output buffer memory on a call to OMX_AllocateBuffer
- // the "pBuffer" member of the OMX_BUFFERHEADERTYPE
- // structure is only filled in later.
-
- info->mMediaBuffer = new MediaBuffer(
- msg.u.extended_buffer_data.data_ptr,
- info->mSize);
- info->mMediaBuffer->setObserver(this);
- }
-
MediaBuffer *buffer = info->mMediaBuffer;
bool isGraphicBuffer = buffer->graphicBuffer() != NULL;
@@ -2251,11 +2345,7 @@ void OMXCodec::on_message(const omx_message &msg) {
buffer->meta_data()->setInt32(kKeyIsUnreadable, true);
}
- buffer->meta_data()->setPointer(
- kKeyPlatformPrivate,
- msg.u.extended_buffer_data.platform_private);
-
- buffer->meta_data()->setPointer(
+ buffer->meta_data()->setInt32(
kKeyBufferID,
msg.u.extended_buffer_data.buffer);
@@ -2397,7 +2487,7 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
case OMX_EventError:
{
- CODEC_LOGE("ERROR(0x%08lx, %ld)", data1, data2);
+ CODEC_LOGE("OMX_EventError(0x%08x, %u)", data1, data2);
setState(ERROR);
break;
@@ -2405,16 +2495,10 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
case OMX_EventPortSettingsChanged:
{
- CODEC_LOGV("OMX_EventPortSettingsChanged(port=%ld, data2=0x%08lx)",
+ CODEC_LOGV("OMX_EventPortSettingsChanged(port=%u, data2=0x%08x)",
data1, data2);
if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
- // There is no need to check whether mFilledBuffers is empty or not
- // when the OMX_EventPortSettingsChanged is not meant for reallocating
- // the output buffers.
- if (data1 == kPortIndexOutput) {
- CHECK(mFilledBuffers.empty());
- }
onPortSettingsChanged(data1);
} else if (data1 == kPortIndexOutput &&
(data2 == OMX_IndexConfigCommonOutputCrop ||
@@ -2445,7 +2529,7 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
// The scale is in 16.16 format.
// scale 1.0 = 0x010000. When there is no
// need to change the display, skip it.
- ALOGV("Get OMX_IndexConfigScale: 0x%lx/0x%lx",
+ ALOGV("Get OMX_IndexConfigScale: 0x%x/0x%x",
scale.xWidth, scale.xHeight);
if (scale.xWidth != 0x010000) {
@@ -2479,7 +2563,7 @@ void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
default:
{
- CODEC_LOGV("EVENT(%d, %ld, %ld)", event, data1, data2);
+ CODEC_LOGV("EVENT(%d, %u, %u)", event, data1, data2);
break;
}
}
@@ -2496,7 +2580,7 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
case OMX_CommandPortDisable:
{
OMX_U32 portIndex = data;
- CODEC_LOGV("PORT_DISABLED(%ld)", portIndex);
+ CODEC_LOGV("PORT_DISABLED(%u)", portIndex);
CHECK(mState == EXECUTING || mState == RECONFIGURING);
CHECK_EQ((int)mPortStatus[portIndex], (int)DISABLING);
@@ -2520,7 +2604,7 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
status_t err = enablePortAsync(portIndex);
if (err != OK) {
- CODEC_LOGE("enablePortAsync(%ld) failed (err = %d)", portIndex, err);
+ CODEC_LOGE("enablePortAsync(%u) failed (err = %d)", portIndex, err);
setState(ERROR);
} else {
err = allocateBuffersOnPort(portIndex);
@@ -2541,7 +2625,7 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
case OMX_CommandPortEnable:
{
OMX_U32 portIndex = data;
- CODEC_LOGV("PORT_ENABLED(%ld)", portIndex);
+ CODEC_LOGV("PORT_ENABLED(%u)", portIndex);
CHECK(mState == EXECUTING || mState == RECONFIGURING);
CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLING);
@@ -2562,7 +2646,7 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
{
OMX_U32 portIndex = data;
- CODEC_LOGV("FLUSH_DONE(%ld)", portIndex);
+ CODEC_LOGV("FLUSH_DONE(%u)", portIndex);
CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
mPortStatus[portIndex] = ENABLED;
@@ -2808,7 +2892,7 @@ status_t OMXCodec::freeBuffer(OMX_U32 portIndex, size_t bufIndex) {
void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
CODEC_LOGV("PORT_SETTINGS_CHANGED(%ld)", portIndex);
- CHECK_EQ((int)mState, (int)EXECUTING);
+ CHECK(mState == EXECUTING || mState == EXECUTING_TO_IDLE);
CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
CHECK(!mOutputPortSettingsChangedPending);
@@ -2990,7 +3074,8 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
size_t size = specific->mSize;
- if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mMIME)
+ if ((!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mMIME) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mMIME))
&& !(mQuirks & kWantsNALFragments)) {
static const uint8_t kNALStartCode[4] =
{ 0x00, 0x00, 0x00, 0x01 };
@@ -3880,7 +3965,7 @@ status_t OMXCodec::read(
return UNKNOWN_ERROR;
}
- CODEC_LOGV("seeking to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
+ CODEC_LOGV("seeking to %" PRId64 " us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
mSignalledEOS = false;
@@ -4125,6 +4210,7 @@ static const char *audioCodingTypeString(OMX_AUDIO_CODINGTYPE type) {
"OMX_AUDIO_CodingMP3",
"OMX_AUDIO_CodingSBC",
"OMX_AUDIO_CodingVORBIS",
+ "OMX_AUDIO_CodingOPUS",
"OMX_AUDIO_CodingWMA",
"OMX_AUDIO_CodingRA",
"OMX_AUDIO_CodingMIDI",
@@ -4219,9 +4305,9 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
CHECK((portIndex == kPortIndexInput && def.eDir == OMX_DirInput)
|| (portIndex == kPortIndexOutput && def.eDir == OMX_DirOutput));
- printf(" nBufferCountActual = %ld\n", def.nBufferCountActual);
- printf(" nBufferCountMin = %ld\n", def.nBufferCountMin);
- printf(" nBufferSize = %ld\n", def.nBufferSize);
+ printf(" nBufferCountActual = %" PRIu32 "\n", def.nBufferCountActual);
+ printf(" nBufferCountMin = %" PRIu32 "\n", def.nBufferCountMin);
+ printf(" nBufferSize = %" PRIu32 "\n", def.nBufferSize);
switch (def.eDomain) {
case OMX_PortDomainImage:
@@ -4230,9 +4316,9 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
printf("\n");
printf(" // Image\n");
- printf(" nFrameWidth = %ld\n", imageDef->nFrameWidth);
- printf(" nFrameHeight = %ld\n", imageDef->nFrameHeight);
- printf(" nStride = %ld\n", imageDef->nStride);
+ printf(" nFrameWidth = %" PRIu32 "\n", imageDef->nFrameWidth);
+ printf(" nFrameHeight = %" PRIu32 "\n", imageDef->nFrameHeight);
+ printf(" nStride = %" PRIu32 "\n", imageDef->nStride);
printf(" eCompressionFormat = %s\n",
imageCompressionFormatString(imageDef->eCompressionFormat));
@@ -4249,9 +4335,9 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
printf("\n");
printf(" // Video\n");
- printf(" nFrameWidth = %ld\n", videoDef->nFrameWidth);
- printf(" nFrameHeight = %ld\n", videoDef->nFrameHeight);
- printf(" nStride = %ld\n", videoDef->nStride);
+ printf(" nFrameWidth = %" PRIu32 "\n", videoDef->nFrameWidth);
+ printf(" nFrameHeight = %" PRIu32 "\n", videoDef->nFrameHeight);
+ printf(" nStride = %" PRIu32 "\n", videoDef->nStride);
printf(" eCompressionFormat = %s\n",
videoCompressionFormatString(videoDef->eCompressionFormat));
@@ -4280,10 +4366,10 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
CHECK_EQ(err, (status_t)OK);
- printf(" nSamplingRate = %ld\n", params.nSamplingRate);
- printf(" nChannels = %ld\n", params.nChannels);
+ printf(" nSamplingRate = %" PRIu32 "\n", params.nSamplingRate);
+ printf(" nChannels = %" PRIu32 "\n", params.nChannels);
printf(" bInterleaved = %d\n", params.bInterleaved);
- printf(" nBitPerSample = %ld\n", params.nBitPerSample);
+ printf(" nBitPerSample = %" PRIu32 "\n", params.nBitPerSample);
printf(" eNumData = %s\n",
params.eNumData == OMX_NumericalDataSigned
@@ -4299,7 +4385,7 @@ void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
mNode, OMX_IndexParamAudioAmr, &amr, sizeof(amr));
CHECK_EQ(err, (status_t)OK);
- printf(" nChannels = %ld\n", amr.nChannels);
+ printf(" nChannels = %" PRIu32 "\n", amr.nChannels);
printf(" eAMRBandMode = %s\n",
amrBandModeString(amr.eAMRBandMode));
printf(" eAMRFrameFormat = %s\n",
@@ -4654,6 +4740,8 @@ status_t QueryCodec(
}
// Color format query
+ // return colors in the order reported by the OMX component
+ // prefix "flexible" standard ones with the flexible equivalent
OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
InitOMXParams(&portFormat);
portFormat.nPortIndex = !isEncoder ? 1 : 0;
@@ -4664,6 +4752,21 @@ status_t QueryCodec(
if (err != OK) {
break;
}
+
+ OMX_U32 flexibleEquivalent;
+ if (ACodec::isFlexibleColorFormat(
+ omx, node, portFormat.eColorFormat, &flexibleEquivalent)) {
+ bool marked = false;
+ for (size_t i = 0; i < caps->mColorFormats.size(); i++) {
+ if (caps->mColorFormats.itemAt(i) == flexibleEquivalent) {
+ marked = true;
+ break;
+ }
+ }
+ if (!marked) {
+ caps->mColorFormats.push(flexibleEquivalent);
+ }
+ }
caps->mColorFormats.push(portFormat.eColorFormat);
}
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 5e79e78..821bd81 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -151,7 +151,7 @@ sp<MetaData> OggSource::getFormat() {
return mExtractor->mImpl->getFormat();
}
-status_t OggSource::start(MetaData *params) {
+status_t OggSource::start(MetaData * /* params */) {
if (mStarted) {
return INVALID_OPERATION;
}
@@ -320,25 +320,29 @@ status_t MyVorbisExtractor::seekToTime(int64_t timeUs) {
}
size_t left = 0;
- size_t right = mTableOfContents.size();
- while (left < right) {
- size_t center = left / 2 + right / 2 + (left & right & 1);
+ size_t right_plus_one = mTableOfContents.size();
+ while (left < right_plus_one) {
+ size_t center = left + (right_plus_one - left) / 2;
const TOCEntry &entry = mTableOfContents.itemAt(center);
if (timeUs < entry.mTimeUs) {
- right = center;
+ right_plus_one = center;
} else if (timeUs > entry.mTimeUs) {
left = center + 1;
} else {
- left = right = center;
+ left = center;
break;
}
}
+ if (left == mTableOfContents.size()) {
+ --left;
+ }
+
const TOCEntry &entry = mTableOfContents.itemAt(left);
- ALOGV("seeking to entry %d / %d at offset %lld",
+ ALOGV("seeking to entry %zu / %zu at offset %lld",
left, mTableOfContents.size(), entry.mPageOffset);
return seekToOffset(entry.mPageOffset);
@@ -381,7 +385,7 @@ ssize_t MyVorbisExtractor::readPage(off64_t offset, Page *page) {
ssize_t n;
if ((n = mSource->readAt(offset, header, sizeof(header)))
< (ssize_t)sizeof(header)) {
- ALOGV("failed to read %d bytes at offset 0x%016llx, got %ld bytes",
+ ALOGV("failed to read %zu bytes at offset 0x%016llx, got %zd bytes",
sizeof(header), offset, n);
if (n < 0) {
@@ -505,7 +509,7 @@ status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out) {
packetSize);
if (n < (ssize_t)packetSize) {
- ALOGV("failed to read %d bytes at 0x%016llx, got %ld bytes",
+ ALOGV("failed to read %zu bytes at 0x%016llx, got %zd bytes",
packetSize, dataOffset, n);
return ERROR_IO;
}
@@ -546,7 +550,7 @@ status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out) {
buffer = NULL;
}
- ALOGV("readPage returned %ld", n);
+ ALOGV("readPage returned %zd", n);
return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
}
@@ -590,7 +594,7 @@ status_t MyVorbisExtractor::init() {
if ((err = readNextPacket(&packet)) != OK) {
return err;
}
- ALOGV("read packet of size %d\n", packet->range_length());
+ ALOGV("read packet of size %zu\n", packet->range_length());
err = verifyHeader(packet, 1);
packet->release();
packet = NULL;
@@ -601,7 +605,7 @@ status_t MyVorbisExtractor::init() {
if ((err = readNextPacket(&packet)) != OK) {
return err;
}
- ALOGV("read packet of size %d\n", packet->range_length());
+ ALOGV("read packet of size %zu\n", packet->range_length());
err = verifyHeader(packet, 3);
packet->release();
packet = NULL;
@@ -612,7 +616,7 @@ status_t MyVorbisExtractor::init() {
if ((err = readNextPacket(&packet)) != OK) {
return err;
}
- ALOGV("read packet of size %d\n", packet->range_length());
+ ALOGV("read packet of size %zu\n", packet->range_length());
err = verifyHeader(packet, 5);
packet->release();
packet = NULL;
@@ -903,7 +907,7 @@ static void extractAlbumArt(
return;
}
- ALOGV("got flac of size %d", flacSize);
+ ALOGV("got flac of size %zu", flacSize);
uint32_t picType;
uint32_t typeLen;
@@ -953,7 +957,7 @@ static void extractAlbumArt(
goto exit;
}
- ALOGV("got image data, %d trailing bytes",
+ ALOGV("got image data, %zu trailing bytes",
flacSize - 32 - typeLen - descLen - dataLen);
fileMeta->setData(
@@ -998,7 +1002,7 @@ sp<MediaSource> OggExtractor::getTrack(size_t index) {
}
sp<MetaData> OggExtractor::getTrackMetaData(
- size_t index, uint32_t flags) {
+ size_t index, uint32_t /* flags */) {
if (index >= 1) {
return NULL;
}
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index eae721b..2748349 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -133,7 +133,8 @@ status_t SampleIterator::seekTo(uint32_t sampleIndex) {
}
status_t err;
- if ((err = findSampleTime(sampleIndex, &mCurrentSampleTime)) != OK) {
+ if ((err = findSampleTimeAndDuration(
+ sampleIndex, &mCurrentSampleTime, &mCurrentSampleDuration)) != OK) {
ALOGE("findSampleTime return error");
return err;
}
@@ -285,8 +286,8 @@ status_t SampleIterator::getSampleSizeDirect(
return OK;
}
-status_t SampleIterator::findSampleTime(
- uint32_t sampleIndex, uint32_t *time) {
+status_t SampleIterator::findSampleTimeAndDuration(
+ uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
if (sampleIndex >= mTable->mNumSampleSizes) {
return ERROR_OUT_OF_RANGE;
}
@@ -309,6 +310,8 @@ status_t SampleIterator::findSampleTime(
*time += mTable->getCompositionTimeOffset(sampleIndex);
+ *duration = mTTSDuration;
+
return OK;
}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 8dfa365..bdd6d56 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -534,83 +534,72 @@ void SampleTable::buildSampleEntriesTable() {
}
status_t SampleTable::findSampleAtTime(
- uint32_t req_time, uint32_t *sample_index, uint32_t flags) {
+ uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+ uint32_t *sample_index, uint32_t flags) {
buildSampleEntriesTable();
uint32_t left = 0;
- uint32_t right = mNumSampleSizes;
- while (left < right) {
- uint32_t center = (left + right) / 2;
- uint32_t centerTime = mSampleTimeEntries[center].mCompositionTime;
+ uint32_t right_plus_one = mNumSampleSizes;
+ while (left < right_plus_one) {
+ uint32_t center = left + (right_plus_one - left) / 2;
+ uint64_t centerTime =
+ getSampleTime(center, scale_num, scale_den);
if (req_time < centerTime) {
- right = center;
+ right_plus_one = center;
} else if (req_time > centerTime) {
left = center + 1;
} else {
- left = center;
- break;
+ *sample_index = mSampleTimeEntries[center].mSampleIndex;
+ return OK;
}
}
- if (left == mNumSampleSizes) {
+ uint32_t closestIndex = left;
+
+ if (closestIndex == mNumSampleSizes) {
if (flags == kFlagAfter) {
return ERROR_OUT_OF_RANGE;
}
-
- --left;
+ flags = kFlagBefore;
+ } else if (closestIndex == 0) {
+ if (flags == kFlagBefore) {
+ // normally we should return out of range, but that is
+ // treated as end-of-stream. instead return first sample
+ //
+ // return ERROR_OUT_OF_RANGE;
+ }
+ flags = kFlagAfter;
}
- uint32_t closestIndex = left;
-
switch (flags) {
case kFlagBefore:
{
- while (closestIndex > 0
- && mSampleTimeEntries[closestIndex].mCompositionTime
- > req_time) {
- --closestIndex;
- }
+ --closestIndex;
break;
}
case kFlagAfter:
{
- while (closestIndex + 1 < mNumSampleSizes
- && mSampleTimeEntries[closestIndex].mCompositionTime
- < req_time) {
- ++closestIndex;
- }
+ // nothing to do
break;
}
default:
{
CHECK(flags == kFlagClosest);
-
- if (closestIndex > 0) {
- // Check left neighbour and pick closest.
- uint32_t absdiff1 =
- abs_difference(
- mSampleTimeEntries[closestIndex].mCompositionTime,
- req_time);
-
- uint32_t absdiff2 =
- abs_difference(
- mSampleTimeEntries[closestIndex - 1].mCompositionTime,
- req_time);
-
- if (absdiff1 > absdiff2) {
- closestIndex = closestIndex - 1;
- }
+ // pick closest based on timestamp. use abs_difference for safety
+ if (abs_difference(
+ getSampleTime(closestIndex, scale_num, scale_den), req_time) >
+ abs_difference(
+ req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
+ --closestIndex;
}
-
break;
}
}
*sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
-
return OK;
}
@@ -632,109 +621,85 @@ status_t SampleTable::findSyncSampleNear(
}
uint32_t left = 0;
- uint32_t right = mNumSyncSamples;
- while (left < right) {
- uint32_t center = left + (right - left) / 2;
+ uint32_t right_plus_one = mNumSyncSamples;
+ while (left < right_plus_one) {
+ uint32_t center = left + (right_plus_one - left) / 2;
uint32_t x = mSyncSamples[center];
if (start_sample_index < x) {
- right = center;
+ right_plus_one = center;
} else if (start_sample_index > x) {
left = center + 1;
} else {
- left = center;
- break;
+ *sample_index = x;
+ return OK;
}
}
+
if (left == mNumSyncSamples) {
if (flags == kFlagAfter) {
ALOGE("tried to find a sync frame after the last one: %d", left);
return ERROR_OUT_OF_RANGE;
}
- left = left - 1;
+ flags = kFlagBefore;
}
+ else if (left == 0) {
+ if (flags == kFlagBefore) {
+ ALOGE("tried to find a sync frame before the first one: %d", left);
- // Now ssi[left] is the sync sample index just before (or at)
- // start_sample_index.
- // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples.
-
- uint32_t x = mSyncSamples[left];
-
- if (left + 1 < mNumSyncSamples) {
- uint32_t y = mSyncSamples[left + 1];
-
- // our sample lies between sync samples x and y.
-
- status_t err = mSampleIterator->seekTo(start_sample_index);
- if (err != OK) {
- return err;
- }
-
- uint32_t sample_time = mSampleIterator->getSampleTime();
-
- err = mSampleIterator->seekTo(x);
- if (err != OK) {
- return err;
- }
- uint32_t x_time = mSampleIterator->getSampleTime();
-
- err = mSampleIterator->seekTo(y);
- if (err != OK) {
- return err;
- }
-
- uint32_t y_time = mSampleIterator->getSampleTime();
-
- if (abs_difference(x_time, sample_time)
- > abs_difference(y_time, sample_time)) {
- // Pick the sync sample closest (timewise) to the start-sample.
- x = y;
- ++left;
+ // normally we should return out of range, but that is
+ // treated as end-of-stream. instead seek to first sync
+ //
+ // return ERROR_OUT_OF_RANGE;
}
+ flags = kFlagAfter;
}
+ // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
switch (flags) {
case kFlagBefore:
{
- if (x > start_sample_index) {
- CHECK(left > 0);
-
- x = mSyncSamples[left - 1];
-
- if (x > start_sample_index) {
- // The table of sync sample indices was not sorted
- // properly.
- return ERROR_MALFORMED;
- }
- }
+ --left;
break;
}
-
case kFlagAfter:
{
- if (x < start_sample_index) {
- if (left + 1 >= mNumSyncSamples) {
- return ERROR_OUT_OF_RANGE;
- }
-
- x = mSyncSamples[left + 1];
-
- if (x < start_sample_index) {
- // The table of sync sample indices was not sorted
- // properly.
- return ERROR_MALFORMED;
- }
- }
-
+ // nothing to do
break;
}
-
default:
+ {
+ // this route is not used, but implement it nonetheless
+ CHECK(flags == kFlagClosest);
+
+ status_t err = mSampleIterator->seekTo(start_sample_index);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t sample_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(mSyncSamples[left]);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t upper_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t lower_time = mSampleIterator->getSampleTime();
+
+ // use abs_difference for safety
+ if (abs_difference(upper_time, sample_time) >
+ abs_difference(sample_time, lower_time)) {
+ --left;
+ }
break;
+ }
}
- *sample_index = x;
-
+ *sample_index = mSyncSamples[left];
return OK;
}
@@ -792,7 +757,8 @@ status_t SampleTable::getMetaDataForSample(
off64_t *offset,
size_t *size,
uint32_t *compositionTime,
- bool *isSyncSample) {
+ bool *isSyncSample,
+ uint32_t *sampleDuration) {
Mutex::Autolock autoLock(mLock);
status_t err;
@@ -834,6 +800,10 @@ status_t SampleTable::getMetaDataForSample(
}
}
+ if (sampleDuration) {
+ *sampleDuration = mSampleIterator->getSampleDuration();
+ }
+
return OK;
}
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 773854f..e2e6d79 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -25,7 +25,7 @@
namespace android {
SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
- mFrontPadding = skip;
+ mFrontPadding = mSkip = skip;
mBackPadding = cut;
mWriteHead = 0;
mReadHead = 0;
@@ -94,6 +94,7 @@ void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
void SkipCutBuffer::clear() {
mWriteHead = mReadHead = 0;
+ mFrontPadding = mSkip;
}
void SkipCutBuffer::write(const char *src, size_t num) {
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index af8186c..4449d57 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -24,6 +24,7 @@
#include <media/stagefright/StagefrightMediaScanner.h>
+#include <media/IMediaHTTPService.h>
#include <media/mediametadataretriever.h>
#include <private/media/VideoFrame.h>
@@ -117,7 +118,7 @@ MediaScanResult StagefrightMediaScanner::processFile(
}
MediaScanResult StagefrightMediaScanner::processFileInternal(
- const char *path, const char *mimeType,
+ const char *path, const char * /* mimeType */,
MediaScannerClient &client) {
const char *extension = strrchr(path, '.');
@@ -147,7 +148,7 @@ MediaScanResult StagefrightMediaScanner::processFileInternal(
status_t status;
if (fd < 0) {
// couldn't open it locally, maybe the media server can?
- status = mRetriever->setDataSource(path);
+ status = mRetriever->setDataSource(NULL /* httpService */, path);
} else {
status = mRetriever->setDataSource(fd, 0, 0x7ffffffffffffffL);
close(fd);
@@ -202,7 +203,7 @@ MediaScanResult StagefrightMediaScanner::processFileInternal(
return MEDIA_SCAN_RESULT_OK;
}
-char *StagefrightMediaScanner::extractAlbumArt(int fd) {
+MediaAlbumArt *StagefrightMediaScanner::extractAlbumArt(int fd) {
ALOGV("extractAlbumArt %d", fd);
off64_t size = lseek64(fd, 0, SEEK_END);
@@ -214,15 +215,9 @@ char *StagefrightMediaScanner::extractAlbumArt(int fd) {
sp<MediaMetadataRetriever> mRetriever(new MediaMetadataRetriever);
if (mRetriever->setDataSource(fd, 0, size) == OK) {
sp<IMemory> mem = mRetriever->extractAlbumArt();
-
if (mem != NULL) {
MediaAlbumArt *art = static_cast<MediaAlbumArt *>(mem->pointer());
-
- char *data = (char *)malloc(art->mSize + 4);
- *(int32_t *)data = art->mSize;
- memcpy(&data[4], &art[1], art->mSize);
-
- return data;
+ return art->clone();
}
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 19af4fb..101fc8a 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -16,10 +16,14 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "StagefrightMetadataRetriever"
+
+#include <inttypes.h>
+
#include <utils/Log.h>
#include "include/StagefrightMetadataRetriever.h"
+#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/DataSource.h>
@@ -28,6 +32,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXCodec.h>
#include <media/stagefright/MediaDefs.h>
+#include <CharacterEncodingDetector.h>
namespace android {
@@ -50,7 +55,9 @@ StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
}
status_t StagefrightMetadataRetriever::setDataSource(
- const char *uri, const KeyedVector<String8, String8> *headers) {
+ const sp<IMediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers) {
ALOGV("setDataSource(%s)", uri);
mParsedMetaData = false;
@@ -58,7 +65,7 @@ status_t StagefrightMetadataRetriever::setDataSource(
delete mAlbumArt;
mAlbumArt = NULL;
- mSource = DataSource::CreateFromURI(uri, headers);
+ mSource = DataSource::CreateFromURI(httpService, uri, headers);
if (mSource == NULL) {
ALOGE("Unable to create data source for '%s'.", uri);
@@ -83,7 +90,7 @@ status_t StagefrightMetadataRetriever::setDataSource(
int fd, int64_t offset, int64_t length) {
fd = dup(fd);
- ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+ ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
mParsedMetaData = false;
mMetaData.clear();
@@ -238,7 +245,7 @@ static VideoFrame *extractVideoFrameWithCodecFlags(
const char *mime;
CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
- ALOGV("thumbNailTime = %lld us, timeUs = %lld us, mime = %s",
+ ALOGV("thumbNailTime = %" PRId64 " us, timeUs = %" PRId64 " us, mime = %s",
thumbNailTime, timeUs, mime);
}
}
@@ -321,7 +328,7 @@ static VideoFrame *extractVideoFrameWithCodecFlags(
VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
int64_t timeUs, int option) {
- ALOGV("getFrameAtTime: %lld us option: %d", timeUs, option);
+ ALOGV("getFrameAtTime: %" PRId64 " us option: %d", timeUs, option);
if (mExtractor.get() == NULL) {
ALOGV("no extractor.");
@@ -374,10 +381,7 @@ VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
size_t dataSize;
if (fileMeta->findData(kKeyAlbumArt, &type, &data, &dataSize)
&& mAlbumArt == NULL) {
- mAlbumArt = new MediaAlbumArt;
- mAlbumArt->mSize = dataSize;
- mAlbumArt->mData = new uint8_t[dataSize];
- memcpy(mAlbumArt->mData, data, dataSize);
+ mAlbumArt = MediaAlbumArt::fromData(dataSize, data);
}
VideoFrame *frame =
@@ -410,7 +414,7 @@ MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
}
if (mAlbumArt) {
- return new MediaAlbumArt(*mAlbumArt);
+ return mAlbumArt->clone();
}
return NULL;
@@ -447,48 +451,71 @@ void StagefrightMetadataRetriever::parseMetaData() {
struct Map {
int from;
int to;
+ const char *name;
};
static const Map kMap[] = {
- { kKeyMIMEType, METADATA_KEY_MIMETYPE },
- { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER },
- { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER },
- { kKeyAlbum, METADATA_KEY_ALBUM },
- { kKeyArtist, METADATA_KEY_ARTIST },
- { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST },
- { kKeyAuthor, METADATA_KEY_AUTHOR },
- { kKeyComposer, METADATA_KEY_COMPOSER },
- { kKeyDate, METADATA_KEY_DATE },
- { kKeyGenre, METADATA_KEY_GENRE },
- { kKeyTitle, METADATA_KEY_TITLE },
- { kKeyYear, METADATA_KEY_YEAR },
- { kKeyWriter, METADATA_KEY_WRITER },
- { kKeyCompilation, METADATA_KEY_COMPILATION },
- { kKeyLocation, METADATA_KEY_LOCATION },
+ { kKeyMIMEType, METADATA_KEY_MIMETYPE, NULL },
+ { kKeyCDTrackNumber, METADATA_KEY_CD_TRACK_NUMBER, "tracknumber" },
+ { kKeyDiscNumber, METADATA_KEY_DISC_NUMBER, "discnumber" },
+ { kKeyAlbum, METADATA_KEY_ALBUM, "album" },
+ { kKeyArtist, METADATA_KEY_ARTIST, "artist" },
+ { kKeyAlbumArtist, METADATA_KEY_ALBUMARTIST, "albumartist" },
+ { kKeyAuthor, METADATA_KEY_AUTHOR, NULL },
+ { kKeyComposer, METADATA_KEY_COMPOSER, "composer" },
+ { kKeyDate, METADATA_KEY_DATE, NULL },
+ { kKeyGenre, METADATA_KEY_GENRE, "genre" },
+ { kKeyTitle, METADATA_KEY_TITLE, "title" },
+ { kKeyYear, METADATA_KEY_YEAR, "year" },
+ { kKeyWriter, METADATA_KEY_WRITER, "writer" },
+ { kKeyCompilation, METADATA_KEY_COMPILATION, "compilation" },
+ { kKeyLocation, METADATA_KEY_LOCATION, NULL },
};
+
static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+ CharacterEncodingDetector *detector = new CharacterEncodingDetector();
+
for (size_t i = 0; i < kNumMapEntries; ++i) {
const char *value;
if (meta->findCString(kMap[i].from, &value)) {
- mMetaData.add(kMap[i].to, String8(value));
+ if (kMap[i].name) {
+ // add to charset detector
+ detector->addTag(kMap[i].name, value);
+ } else {
+ // directly add to output list
+ mMetaData.add(kMap[i].to, String8(value));
+ }
+ }
+ }
+
+ detector->detectAndConvert();
+ int size = detector->size();
+ if (size) {
+ for (int i = 0; i < size; i++) {
+ const char *name;
+ const char *value;
+ detector->getTag(i, &name, &value);
+ for (size_t j = 0; j < kNumMapEntries; ++j) {
+ if (kMap[j].name && !strcmp(kMap[j].name, name)) {
+ mMetaData.add(kMap[j].to, String8(value));
+ }
+ }
}
}
+ delete detector;
const void *data;
uint32_t type;
size_t dataSize;
if (meta->findData(kKeyAlbumArt, &type, &data, &dataSize)
&& mAlbumArt == NULL) {
- mAlbumArt = new MediaAlbumArt;
- mAlbumArt->mSize = dataSize;
- mAlbumArt->mData = new uint8_t[dataSize];
- memcpy(mAlbumArt->mData, data, dataSize);
+ mAlbumArt = MediaAlbumArt::fromData(dataSize, data);
}
size_t numTracks = mExtractor->countTracks();
char tmp[32];
- sprintf(tmp, "%d", numTracks);
+ sprintf(tmp, "%zu", numTracks);
mMetaData.add(METADATA_KEY_NUM_TRACKS, String8(tmp));
@@ -545,7 +572,7 @@ void StagefrightMetadataRetriever::parseMetaData() {
}
// The duration value is a string representing the duration in ms.
- sprintf(tmp, "%lld", (maxDurationUs + 500) / 1000);
+ sprintf(tmp, "%" PRId64, (maxDurationUs + 500) / 1000);
mMetaData.add(METADATA_KEY_DURATION, String8(tmp));
if (hasAudio) {
@@ -573,7 +600,7 @@ void StagefrightMetadataRetriever::parseMetaData() {
if (mSource->getSize(&sourceSize) == OK) {
int64_t avgBitRate = (int64_t)(sourceSize * 8E6 / maxDurationUs);
- sprintf(tmp, "%lld", avgBitRate);
+ sprintf(tmp, "%" PRId64, avgBitRate);
mMetaData.add(METADATA_KEY_BITRATE, String8(tmp));
}
}
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 6b934d4..4e1c65c 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -16,6 +16,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "SurfaceMediaSource"
+#include <inttypes.h>
+
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/SurfaceMediaSource.h>
#include <media/stagefright/MediaDefs.h>
@@ -54,9 +56,9 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeig
ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
}
- mBufferQueue = new BufferQueue();
- mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
- mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+ mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+ mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
GRALLOC_USAGE_HW_TEXTURE);
sp<ISurfaceComposer> composer(ComposerService::getComposerService());
@@ -68,7 +70,7 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeig
wp<ConsumerListener> listener = static_cast<ConsumerListener*>(this);
sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
- status_t err = mBufferQueue->consumerConnect(proxy, false);
+ status_t err = mConsumer->consumerConnect(proxy, false);
if (err != NO_ERROR) {
ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
strerror(-err), err);
@@ -99,13 +101,16 @@ void SurfaceMediaSource::dump(String8& result) const
dump(result, "", buffer, 1024);
}
-void SurfaceMediaSource::dump(String8& result, const char* prefix,
- char* buffer, size_t SIZE) const
+void SurfaceMediaSource::dump(
+ String8& result,
+ const char* /* prefix */,
+ char* buffer,
+ size_t /* SIZE */) const
{
Mutex::Autolock lock(mMutex);
result.append(buffer);
- mBufferQueue->dump(result, "");
+ mConsumer->dump(result, "");
}
status_t SurfaceMediaSource::setFrameRate(int32_t fps)
@@ -163,7 +168,7 @@ status_t SurfaceMediaSource::start(MetaData *params)
CHECK_GT(mMaxAcquiredBufferCount, 1);
status_t err =
- mBufferQueue->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
+ mConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
if (err != OK) {
return err;
@@ -176,7 +181,7 @@ status_t SurfaceMediaSource::start(MetaData *params)
}
status_t SurfaceMediaSource::setMaxAcquiredBufferCount(size_t count) {
- ALOGV("setMaxAcquiredBufferCount(%d)", count);
+ ALOGV("setMaxAcquiredBufferCount(%zu)", count);
Mutex::Autolock lock(mMutex);
CHECK_GT(count, 1);
@@ -202,8 +207,11 @@ status_t SurfaceMediaSource::stop()
return OK;
}
+ mStarted = false;
+ mFrameAvailableCondition.signal();
+
while (mNumPendingBuffers > 0) {
- ALOGI("Still waiting for %d buffers to be returned.",
+ ALOGI("Still waiting for %zu buffers to be returned.",
mNumPendingBuffers);
#if DEBUG_PENDING_BUFFERS
@@ -215,11 +223,9 @@ status_t SurfaceMediaSource::stop()
mMediaBuffersAvailableCondition.wait(mMutex);
}
- mStarted = false;
- mFrameAvailableCondition.signal();
mMediaBuffersAvailableCondition.signal();
- return mBufferQueue->consumerDisconnect();
+ return mConsumer->consumerDisconnect();
}
sp<MetaData> SurfaceMediaSource::getFormat()
@@ -265,13 +271,12 @@ static void passMetadataBuffer(MediaBuffer **buffer,
memcpy(data, &type, 4);
memcpy(data + 4, &bufferHandle, sizeof(buffer_handle_t));
- ALOGV("handle = %p, , offset = %d, length = %d",
+ ALOGV("handle = %p, , offset = %zu, length = %zu",
bufferHandle, (*buffer)->range_length(), (*buffer)->range_offset());
}
-status_t SurfaceMediaSource::read( MediaBuffer **buffer,
- const ReadOptions *options)
-{
+status_t SurfaceMediaSource::read(
+ MediaBuffer **buffer, const ReadOptions * /* options */) {
ALOGV("read");
Mutex::Autolock lock(mMutex);
@@ -290,7 +295,7 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer,
// wait here till the frames come in from the client side
while (mStarted) {
- status_t err = mBufferQueue->acquireBuffer(&item, 0);
+ status_t err = mConsumer->acquireBuffer(&item, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// wait for a buffer to be queued
mFrameAvailableCondition.wait(mMutex);
@@ -313,7 +318,7 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer,
if (mStartTimeNs > 0) {
if (item.mTimestamp < mStartTimeNs) {
// This frame predates start of record, discard
- mBufferQueue->releaseBuffer(
+ mConsumer->releaseBuffer(
item.mBuf, item.mFrameNumber, EGL_NO_DISPLAY,
EGL_NO_SYNC_KHR, Fence::NO_FENCE);
continue;
@@ -360,7 +365,7 @@ status_t SurfaceMediaSource::read( MediaBuffer **buffer,
(*buffer)->setObserver(this);
(*buffer)->add_ref();
(*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp / 1000);
- ALOGV("Frames encoded = %d, timestamp = %lld, time diff = %lld",
+ ALOGV("Frames encoded = %d, timestamp = %" PRId64 ", time diff = %" PRId64,
mNumFramesEncoded, mCurrentTimestamp / 1000,
mCurrentTimestamp / 1000 - prevTimeStamp / 1000);
@@ -413,7 +418,7 @@ void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {
ALOGV("Slot %d returned, matches handle = %p", id,
mSlots[id].mGraphicBuffer->handle);
- mBufferQueue->releaseBuffer(id, mSlots[id].mFrameNumber,
+ mConsumer->releaseBuffer(id, mSlots[id].mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
Fence::NO_FENCE);
@@ -474,4 +479,8 @@ void SurfaceMediaSource::onBuffersReleased() {
}
}
+void SurfaceMediaSource::onSidebandStreamChanged() {
+ ALOG_ASSERT(false, "SurfaceMediaSource can't consume sideband streams");
+}
+
} // end of namespace android
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 0afac69..1fdb244 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -17,7 +17,11 @@
#undef __STRICT_ANSI__
#define __STDINT_LIMITS
#define __STDC_LIMIT_MACROS
+
+#include <inttypes.h>
#include <stdint.h>
+#include <sys/prctl.h>
+#include <sys/time.h>
//#define LOG_NDEBUG 0
#define LOG_TAG "TimedEventQueue"
@@ -26,9 +30,6 @@
#include "include/TimedEventQueue.h"
-#include <sys/prctl.h>
-#include <sys/time.h>
-
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <binder/IServiceManager.h>
@@ -258,7 +259,7 @@ void TimedEventQueue::threadEntry() {
static int64_t kMaxTimeoutUs = 10000000ll; // 10 secs
bool timeoutCapped = false;
if (delay_us > kMaxTimeoutUs) {
- ALOGW("delay_us exceeds max timeout: %lld us", delay_us);
+ ALOGW("delay_us exceeds max timeout: %" PRId64 " us", delay_us);
// We'll never block for more than 10 secs, instead
// we will split up the full timeout into chunks of
@@ -337,7 +338,7 @@ void TimedEventQueue::acquireWakeLock_l()
status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
String16("TimedEventQueue"),
- String16("media"));
+ String16("media")); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
if (status == NO_ERROR) {
mWakeLockToken = binder;
@@ -362,7 +363,7 @@ void TimedEventQueue::releaseWakeLock_l(bool force)
CHECK(mWakeLockToken != 0);
if (mPowerManager != 0) {
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0); // not oneway
IPCThreadState::self()->restoreCallingIdentity(token);
}
mWakeLockToken.clear();
@@ -376,8 +377,8 @@ void TimedEventQueue::clearPowerManager()
mPowerManager.clear();
}
-void TimedEventQueue::PMDeathRecipient::binderDied(const wp<IBinder>& who)
-{
+void TimedEventQueue::PMDeathRecipient::binderDied(
+ const wp<IBinder>& /* who */) {
mQueue->clearPowerManager();
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 216a329..25afc5b 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -17,11 +17,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Utils"
#include <utils/Log.h>
+#include <ctype.h>
#include "include/ESDS.h"
#include <arpa/inet.h>
#include <cutils/properties.h>
+#include <media/openmax/OMX_Audio.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -83,6 +85,11 @@ status_t convertMetaDataToMessage(
msg->setInt64("durationUs", durationUs);
}
+ int avgBitRate;
+ if (meta->findInt32(kKeyBitRate, &avgBitRate)) {
+ msg->setInt32("bit-rate", avgBitRate);
+ }
+
int32_t isSync;
if (meta->findInt32(kKeyIsSyncFrame, &isSync) && isSync != 0) {
msg->setInt32("is-sync-frame", 1);
@@ -102,6 +109,25 @@ status_t convertMetaDataToMessage(
msg->setInt32("sar-width", sarWidth);
msg->setInt32("sar-height", sarHeight);
}
+
+ int32_t colorFormat;
+ if (meta->findInt32(kKeyColorFormat, &colorFormat)) {
+ msg->setInt32("color-format", colorFormat);
+ }
+
+ int32_t cropLeft, cropTop, cropRight, cropBottom;
+ if (meta->findRect(kKeyCropRect,
+ &cropLeft,
+ &cropTop,
+ &cropRight,
+ &cropBottom)) {
+ msg->setRect("crop", cropLeft, cropTop, cropRight, cropBottom);
+ }
+
+ int32_t rotationDegrees;
+ if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
+ msg->setInt32("rotation-degrees", rotationDegrees);
+ }
} else if (!strncasecmp("audio/", mime, 6)) {
int32_t numChannels, sampleRate;
CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
@@ -128,6 +154,11 @@ status_t convertMetaDataToMessage(
if (meta->findInt32(kKeyIsADTS, &isADTS)) {
msg->setInt32("is-adts", true);
}
+
+ int32_t aacProfile = -1;
+ if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
+ msg->setInt32("aac-profile", aacProfile);
+ }
}
int32_t maxInputSize;
@@ -135,6 +166,11 @@ status_t convertMetaDataToMessage(
msg->setInt32("max-input-size", maxInputSize);
}
+ int32_t rotationDegrees;
+ if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
+ msg->setInt32("rotation-degrees", rotationDegrees);
+ }
+
uint32_t type;
const void *data;
size_t size;
@@ -216,6 +252,56 @@ status_t convertMetaDataToMessage(
buffer->meta()->setInt32("csd", true);
buffer->meta()->setInt64("timeUs", 0);
msg->setBuffer("csd-1", buffer);
+ } else if (meta->findData(kKeyHVCC, &type, &data, &size)) {
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 7);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+ uint8_t profile = ptr[1] & 31;
+ uint8_t level = ptr[12];
+ ptr += 22;
+ size -= 22;
+
+
+ size_t numofArrays = (char)ptr[0];
+ ptr += 1;
+ size -= 1;
+ size_t j = 0, i = 0;
+
+ sp<ABuffer> buffer = new ABuffer(1024);
+ buffer->setRange(0, 0);
+
+ for (i = 0; i < numofArrays; i++) {
+ ptr += 1;
+ size -= 1;
+
+ //Num of nals
+ size_t numofNals = U16_AT(ptr);
+
+ ptr += 2;
+ size -= 2;
+
+ for (j = 0; j < numofNals; j++) {
+ CHECK(size >= 2);
+ size_t length = U16_AT(ptr);
+
+ ptr += 2;
+ size -= 2;
+
+ CHECK(size >= length);
+
+ memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
+ memcpy(buffer->data() + buffer->size() + 4, ptr, length);
+ buffer->setRange(0, buffer->size() + 4 + length);
+
+ ptr += length;
+ size -= length;
+ }
+ }
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-0", buffer);
+
} else if (meta->findData(kKeyESDS, &type, &data, &size)) {
ESDS esds((const char *)data, size);
CHECK_EQ(esds.InitCheck(), (status_t)OK);
@@ -251,6 +337,13 @@ status_t convertMetaDataToMessage(
buffer->meta()->setInt32("csd", true);
buffer->meta()->setInt64("timeUs", 0);
msg->setBuffer("csd-1", buffer);
+ } else if (meta->findData(kKeyOpusHeader, &type, &data, &size)) {
+ sp<ABuffer> buffer = new ABuffer(size);
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-0", buffer);
}
*format = msg;
@@ -277,7 +370,7 @@ static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, ch
// there can't be another param here, so use all the rest
i = csd0->size();
}
- ALOGV("block at %d, last was %d", i, lastparamoffset);
+ ALOGV("block at %zu, last was %d", i, lastparamoffset);
if (lastparamoffset > 0) {
int size = i - lastparamoffset;
avcc[avccidx++] = size >> 8;
@@ -308,7 +401,7 @@ static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, ch
// there can't be another param here, so use all the rest
i = csd1->size();
}
- ALOGV("block at %d, last was %d", i, lastparamoffset);
+ ALOGV("block at %zu, last was %d", i, lastparamoffset);
if (lastparamoffset > 0) {
int size = i - lastparamoffset;
avcc[avccidx++] = size >> 8;
@@ -401,6 +494,25 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
meta->setInt32(kKeySARWidth, sarWidth);
meta->setInt32(kKeySARHeight, sarHeight);
}
+
+ int32_t colorFormat;
+ if (msg->findInt32("color-format", &colorFormat)) {
+ meta->setInt32(kKeyColorFormat, colorFormat);
+ }
+
+ int32_t cropLeft, cropTop, cropRight, cropBottom;
+ if (msg->findRect("crop",
+ &cropLeft,
+ &cropTop,
+ &cropRight,
+ &cropBottom)) {
+ meta->setRect(kKeyCropRect, cropLeft, cropTop, cropRight, cropBottom);
+ }
+
+ int32_t rotationDegrees;
+ if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
+ meta->setInt32(kKeyRotation, rotationDegrees);
+ }
} else if (mime.startsWith("audio/")) {
int32_t numChannels;
if (msg->findInt32("channel-count", &numChannels)) {
@@ -452,6 +564,11 @@ void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
}
}
+ int32_t timeScale;
+ if (msg->findInt32("time-scale", &timeScale)) {
+ meta->setInt32(kKeyTimeScale, timeScale);
+ }
+
// XXX TODO add whatever other keys there are
#if 0
@@ -523,6 +640,7 @@ static const struct mime_conv_t mimeLookup[] = {
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, AUDIO_FORMAT_AMR_WB },
{ MEDIA_MIMETYPE_AUDIO_AAC, AUDIO_FORMAT_AAC },
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
+ { MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
{ 0, AUDIO_FORMAT_INVALID }
};
@@ -540,10 +658,46 @@ const struct mime_conv_t* p = &mimeLookup[0];
return BAD_VALUE;
}
+struct aac_format_conv_t {
+ OMX_AUDIO_AACPROFILETYPE eAacProfileType;
+ audio_format_t format;
+};
+
+static const struct aac_format_conv_t profileLookup[] = {
+ { OMX_AUDIO_AACObjectMain, AUDIO_FORMAT_AAC_MAIN},
+ { OMX_AUDIO_AACObjectLC, AUDIO_FORMAT_AAC_LC},
+ { OMX_AUDIO_AACObjectSSR, AUDIO_FORMAT_AAC_SSR},
+ { OMX_AUDIO_AACObjectLTP, AUDIO_FORMAT_AAC_LTP},
+ { OMX_AUDIO_AACObjectHE, AUDIO_FORMAT_AAC_HE_V1},
+ { OMX_AUDIO_AACObjectScalable, AUDIO_FORMAT_AAC_SCALABLE},
+ { OMX_AUDIO_AACObjectERLC, AUDIO_FORMAT_AAC_ERLC},
+ { OMX_AUDIO_AACObjectLD, AUDIO_FORMAT_AAC_LD},
+ { OMX_AUDIO_AACObjectHE_PS, AUDIO_FORMAT_AAC_HE_V2},
+ { OMX_AUDIO_AACObjectELD, AUDIO_FORMAT_AAC_ELD},
+ { OMX_AUDIO_AACObjectNull, AUDIO_FORMAT_AAC},
+};
+
+void mapAACProfileToAudioFormat( audio_format_t& format, uint64_t eAacProfile)
+{
+const struct aac_format_conv_t* p = &profileLookup[0];
+ while (p->eAacProfileType != OMX_AUDIO_AACObjectNull) {
+ if (eAacProfile == p->eAacProfileType) {
+ format = p->format;
+ return;
+ }
+ ++p;
+ }
+ format = AUDIO_FORMAT_AAC;
+ return;
+}
+
bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
bool isStreaming, audio_stream_type_t streamType)
{
const char *mime;
+ if (meta == NULL) {
+ return false;
+ }
CHECK(meta->findCString(kKeyMIMEType, &mime));
audio_offload_info_t info = AUDIO_INFO_INITIALIZER;
@@ -562,15 +716,11 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
return false;
}
- // check whether it is ELD/LD content -> no offloading
- // FIXME: this should depend on audio DSP capabilities. mapMimeToAudioFormat() should use the
- // metadata to refine the AAC format and the audio HAL should only list supported profiles.
+ // Redefine aac format according to its profile
+ // Offloading depends on audio DSP capabilities.
int32_t aacaot = -1;
if (meta->findInt32(kKeyAACAOT, &aacaot)) {
- if (aacaot == 23 || aacaot == 39 ) {
- ALOGV("track of type '%s' is ELD/LD content", mime);
- return false;
- }
+ mapAACProfileToAudioFormat(info.format,(OMX_AUDIO_AACPROFILETYPE) aacaot);
}
int32_t srate = -1;
@@ -615,5 +765,40 @@ bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
return AudioSystem::isOffloadSupported(info);
}
+AString uriDebugString(const AString &uri, bool incognito) {
+ if (incognito) {
+ return AString("<URI suppressed>");
+ }
+
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("media.stagefright.log-uri", prop, "false") &&
+ (!strcmp(prop, "1") || !strcmp(prop, "true"))) {
+ return uri;
+ }
+
+ // find scheme
+ AString scheme;
+ const char *chars = uri.c_str();
+ for (size_t i = 0; i < uri.size(); i++) {
+ const char c = chars[i];
+ if (!isascii(c)) {
+ break;
+ } else if (isalpha(c)) {
+ continue;
+ } else if (i == 0) {
+ // first character must be a letter
+ break;
+ } else if (isdigit(c) || c == '+' || c == '.' || c =='-') {
+ continue;
+ } else if (c != ':') {
+ break;
+ }
+ scheme = AString(uri, 0, i);
+ scheme.append("://<suppressed>");
+ return scheme;
+ }
+ return AString("<no-scheme URI suppressed>");
+}
+
} // namespace android
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index a245f2c..e988f6d 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -16,6 +16,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "VBRISeeker"
+
+#include <inttypes.h>
+
#include <utils/Log.h>
#include "include/VBRISeeker.h"
@@ -75,7 +78,7 @@ sp<VBRISeeker> VBRISeeker::CreateFromSource(
size_t entrySize = U16_AT(&vbriHeader[22]);
size_t scale = U16_AT(&vbriHeader[20]);
- ALOGV("%d entries, scale=%d, size_per_entry=%d",
+ ALOGV("%zu entries, scale=%zu, size_per_entry=%zu",
numEntries,
scale,
entrySize);
@@ -119,7 +122,7 @@ sp<VBRISeeker> VBRISeeker::CreateFromSource(
seeker->mSegments.push(numBytes);
- ALOGV("entry #%d: %d offset 0x%08lx", i, numBytes, offset);
+ ALOGV("entry #%zu: %u offset 0x%016llx", i, numBytes, offset);
offset += numBytes;
}
@@ -160,7 +163,7 @@ bool VBRISeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
*pos += mSegments.itemAt(segmentIndex++);
}
- ALOGV("getOffsetForTime %lld us => 0x%08lx", *timeUs, *pos);
+ ALOGV("getOffsetForTime %" PRId64 " us => 0x%016llx", *timeUs, *pos);
*timeUs = nowUs;
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 22af6fb..a4a651d 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -127,7 +127,7 @@ sp<MediaSource> WAVExtractor::getTrack(size_t index) {
}
sp<MetaData> WAVExtractor::getTrackMetaData(
- size_t index, uint32_t flags) {
+ size_t index, uint32_t /* flags */) {
if (mInitCheck != OK || index > 0) {
return NULL;
}
@@ -358,7 +358,7 @@ WAVSource::~WAVSource() {
}
}
-status_t WAVSource::start(MetaData *params) {
+status_t WAVSource::start(MetaData * /* params */) {
ALOGV("WAVSource::start");
CHECK(!mStarted);
@@ -414,7 +414,7 @@ status_t WAVSource::read(
} else {
pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
}
- if (pos > mSize) {
+ if (pos > (off64_t)mSize) {
pos = mSize;
}
mCurrentPos = pos + mOffset;
@@ -439,6 +439,10 @@ status_t WAVSource::read(
maxBytesToRead = maxBytesAvailable;
}
+ // read only integral amounts of audio unit frames.
+ const size_t inputUnitFrameSize = mNumChannels * mBitsPerSample / 8;
+ maxBytesToRead -= maxBytesToRead % inputUnitFrameSize;
+
if (mWaveFormat == WAVE_FORMAT_MSGSM) {
// Microsoft packs 2 frames into 65 bytes, rather than using separate 33-byte frames,
// so read multiples of 65, and use smaller buffers to account for ~10:1 expansion ratio
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index b822868..38a1f6b 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -40,6 +40,25 @@ unsigned parseUE(ABitReader *br) {
return x + (1u << numZeroes) - 1;
}
+signed parseSE(ABitReader *br) {
+ unsigned codeNum = parseUE(br);
+
+ return (codeNum & 1) ? (codeNum + 1) / 2 : -(codeNum / 2);
+}
+
+static void skipScalingList(ABitReader *br, size_t sizeOfScalingList) {
+ size_t lastScale = 8;
+ size_t nextScale = 8;
+ for (size_t j = 0; j < sizeOfScalingList; ++j) {
+ if (nextScale != 0) {
+ signed delta_scale = parseSE(br);
+ nextScale = (lastScale + delta_scale + 256) % 256;
+ }
+
+ lastScale = (nextScale == 0) ? lastScale : nextScale;
+ }
+}
+
// Determine video dimensions from the sequence parameterset.
void FindAVCDimensions(
const sp<ABuffer> &seqParamSet,
@@ -63,7 +82,24 @@ void FindAVCDimensions(
parseUE(&br); // bit_depth_luma_minus8
parseUE(&br); // bit_depth_chroma_minus8
br.skipBits(1); // qpprime_y_zero_transform_bypass_flag
- CHECK_EQ(br.getBits(1), 0u); // seq_scaling_matrix_present_flag
+
+ if (br.getBits(1)) { // seq_scaling_matrix_present_flag
+ for (size_t i = 0; i < 8; ++i) {
+ if (br.getBits(1)) { // seq_scaling_list_present_flag[i]
+
+ // WARNING: the code below has not ever been exercised...
+ // need a real-world example.
+
+ if (i < 6) {
+ // ScalingList4x4[i],16,...
+ skipScalingList(&br, 16);
+ } else {
+ // ScalingList8x8[i-6],64,...
+ skipScalingList(&br, 64);
+ }
+ }
+ }
+ }
}
parseUE(&br); // log2_max_frame_num_minus4
@@ -251,9 +287,7 @@ status_t getNextNALUnit(
return OK;
}
-static sp<ABuffer> FindNAL(
- const uint8_t *data, size_t size, unsigned nalType,
- size_t *stopOffset) {
+static sp<ABuffer> FindNAL(const uint8_t *data, size_t size, unsigned nalType) {
const uint8_t *nalStart;
size_t nalSize;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
@@ -293,7 +327,7 @@ sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) {
const uint8_t *data = accessUnit->data();
size_t size = accessUnit->size();
- sp<ABuffer> seqParamSet = FindNAL(data, size, 7, NULL);
+ sp<ABuffer> seqParamSet = FindNAL(data, size, 7);
if (seqParamSet == NULL) {
return NULL;
}
@@ -303,8 +337,7 @@ sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) {
FindAVCDimensions(
seqParamSet, &width, &height, &sarWidth, &sarHeight);
- size_t stopOffset;
- sp<ABuffer> picParamSet = FindNAL(data, size, 8, &stopOffset);
+ sp<ABuffer> picParamSet = FindNAL(data, size, 8);
CHECK(picParamSet != NULL);
size_t csdSize =
diff --git a/media/libstagefright/chromium_http/Android.mk b/media/libstagefright/chromium_http/Android.mk
deleted file mode 100644
index 109e3fe..0000000
--- a/media/libstagefright/chromium_http/Android.mk
+++ /dev/null
@@ -1,39 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-ifneq ($(TARGET_BUILD_PDK), true)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- DataUriSource.cpp \
- ChromiumHTTPDataSource.cpp \
- support.cpp \
- chromium_http_stub.cpp
-
-LOCAL_C_INCLUDES:= \
- $(TOP)/frameworks/av/media/libstagefright \
- $(TOP)/frameworks/native/include/media/openmax \
- external/chromium \
- external/chromium/android
-
-LOCAL_CFLAGS += -Wno-multichar
-
-LOCAL_SHARED_LIBRARIES += \
- libbinder \
- libstlport \
- libchromium_net \
- libutils \
- libbinder \
- libcutils \
- liblog \
- libstagefright_foundation \
- libstagefright \
- libdrmframework
-
-include external/stlport/libstlport.mk
-
-LOCAL_MODULE:= libstagefright_chromium_http
-
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_SHARED_LIBRARY)
-endif
diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
deleted file mode 100644
index 7e5c280..0000000
--- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ChromiumHTTPDataSource"
-#include <media/stagefright/foundation/ADebug.h>
-
-#include "include/ChromiumHTTPDataSource.h"
-
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include "support.h"
-
-#include <cutils/properties.h> // for property_get
-
-namespace android {
-
-ChromiumHTTPDataSource::ChromiumHTTPDataSource(uint32_t flags)
- : mFlags(flags),
- mState(DISCONNECTED),
- mDelegate(new SfDelegate),
- mCurrentOffset(0),
- mIOResult(OK),
- mContentSize(-1),
- mDecryptHandle(NULL),
- mDrmManagerClient(NULL) {
- mDelegate->setOwner(this);
-}
-
-ChromiumHTTPDataSource::~ChromiumHTTPDataSource() {
- disconnect();
-
- delete mDelegate;
- mDelegate = NULL;
-
- clearDRMState_l();
-
- if (mDrmManagerClient != NULL) {
- delete mDrmManagerClient;
- mDrmManagerClient = NULL;
- }
-}
-
-status_t ChromiumHTTPDataSource::connect(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset) {
- Mutex::Autolock autoLock(mLock);
-
- uid_t uid;
- if (getUID(&uid)) {
- mDelegate->setUID(uid);
- }
-
-#if defined(LOG_NDEBUG) && !LOG_NDEBUG
- LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG, "connect on behalf of uid %d", uid);
-#endif
-
- return connect_l(uri, headers, offset);
-}
-
-status_t ChromiumHTTPDataSource::connect_l(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset) {
- if (mState != DISCONNECTED) {
- disconnect_l();
- }
-
-#if defined(LOG_NDEBUG) && !LOG_NDEBUG
- LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG,
- "connect to <URL suppressed> @%lld", offset);
-#endif
-
- mURI = uri;
- mContentType = String8("application/octet-stream");
-
- if (headers != NULL) {
- mHeaders = *headers;
- } else {
- mHeaders.clear();
- }
-
- mState = CONNECTING;
- mContentSize = -1;
- mCurrentOffset = offset;
-
- mDelegate->initiateConnection(mURI.c_str(), &mHeaders, offset);
-
- while (mState == CONNECTING || mState == DISCONNECTING) {
- mCondition.wait(mLock);
- }
-
- return mState == CONNECTED ? OK : mIOResult;
-}
-
-void ChromiumHTTPDataSource::onRedirect(const char *url) {
- Mutex::Autolock autoLock(mLock);
- mURI = url;
-}
-
-void ChromiumHTTPDataSource::onConnectionEstablished(
- int64_t contentSize, const char *contentType) {
- Mutex::Autolock autoLock(mLock);
-
- if (mState != CONNECTING) {
- // We may have initiated disconnection.
- CHECK_EQ(mState, DISCONNECTING);
- return;
- }
-
- mState = CONNECTED;
- mContentSize = (contentSize < 0) ? -1 : contentSize + mCurrentOffset;
- mContentType = String8(contentType);
- mCondition.broadcast();
-}
-
-void ChromiumHTTPDataSource::onConnectionFailed(status_t err) {
- Mutex::Autolock autoLock(mLock);
- mState = DISCONNECTED;
- mCondition.broadcast();
-
- // mURI.clear();
-
- mIOResult = err;
-}
-
-void ChromiumHTTPDataSource::disconnect() {
- Mutex::Autolock autoLock(mLock);
- disconnect_l();
-}
-
-void ChromiumHTTPDataSource::disconnect_l() {
- if (mState == DISCONNECTED) {
- return;
- }
-
- mState = DISCONNECTING;
- mIOResult = -EINTR;
-
- mDelegate->initiateDisconnect();
-
- while (mState == DISCONNECTING) {
- mCondition.wait(mLock);
- }
-
- CHECK_EQ((int)mState, (int)DISCONNECTED);
-}
-
-status_t ChromiumHTTPDataSource::initCheck() const {
- Mutex::Autolock autoLock(mLock);
-
- return mState == CONNECTED ? OK : NO_INIT;
-}
-
-ssize_t ChromiumHTTPDataSource::readAt(off64_t offset, void *data, size_t size) {
- Mutex::Autolock autoLock(mLock);
-
- if (mState != CONNECTED) {
- return INVALID_OPERATION;
- }
-
-#if 0
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.disable-net", value, 0)
- && (!strcasecmp(value, "true") || !strcmp(value, "1"))) {
- LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Simulating that the network is down.");
- disconnect_l();
- return ERROR_IO;
- }
-#endif
-
- if (offset != mCurrentOffset) {
- AString tmp = mURI;
- KeyedVector<String8, String8> tmpHeaders = mHeaders;
-
- disconnect_l();
-
- status_t err = connect_l(tmp.c_str(), &tmpHeaders, offset);
-
- if (err != OK) {
- return err;
- }
- }
-
- mState = READING;
-
- int64_t startTimeUs = ALooper::GetNowUs();
-
- mDelegate->initiateRead(data, size);
-
- while (mState == READING) {
- mCondition.wait(mLock);
- }
-
- if (mIOResult < OK) {
- return mIOResult;
- }
-
- if (mState == CONNECTED) {
- int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
-
- // The read operation was successful, mIOResult contains
- // the number of bytes read.
- addBandwidthMeasurement(mIOResult, delayUs);
-
- mCurrentOffset += mIOResult;
- return mIOResult;
- }
-
- return ERROR_IO;
-}
-
-void ChromiumHTTPDataSource::onReadCompleted(ssize_t size) {
- Mutex::Autolock autoLock(mLock);
-
- mIOResult = size;
-
- if (mState == READING) {
- mState = CONNECTED;
- mCondition.broadcast();
- }
-}
-
-status_t ChromiumHTTPDataSource::getSize(off64_t *size) {
- Mutex::Autolock autoLock(mLock);
-
- if (mContentSize < 0) {
- return ERROR_UNSUPPORTED;
- }
-
- *size = mContentSize;
-
- return OK;
-}
-
-uint32_t ChromiumHTTPDataSource::flags() {
- return kWantsPrefetching | kIsHTTPBasedSource;
-}
-
-// static
-void ChromiumHTTPDataSource::InitiateRead(
- ChromiumHTTPDataSource *me, void *data, size_t size) {
- me->initiateRead(data, size);
-}
-
-void ChromiumHTTPDataSource::initiateRead(void *data, size_t size) {
- mDelegate->initiateRead(data, size);
-}
-
-void ChromiumHTTPDataSource::onDisconnectComplete() {
- Mutex::Autolock autoLock(mLock);
- CHECK_EQ((int)mState, (int)DISCONNECTING);
-
- mState = DISCONNECTED;
- // mURI.clear();
- mIOResult = -ENOTCONN;
-
- mCondition.broadcast();
-}
-
-sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization(const char* mime) {
- Mutex::Autolock autoLock(mLock);
-
- if (mDrmManagerClient == NULL) {
- mDrmManagerClient = new DrmManagerClient();
- }
-
- if (mDrmManagerClient == NULL) {
- return NULL;
- }
-
- if (mDecryptHandle == NULL) {
- /* Note if redirect occurs, mUri is the redirect uri instead of the
- * original one
- */
- mDecryptHandle = mDrmManagerClient->openDecryptSession(
- String8(mURI.c_str()), mime);
- }
-
- if (mDecryptHandle == NULL) {
- delete mDrmManagerClient;
- mDrmManagerClient = NULL;
- }
-
- return mDecryptHandle;
-}
-
-void ChromiumHTTPDataSource::getDrmInfo(
- sp<DecryptHandle> &handle, DrmManagerClient **client) {
- Mutex::Autolock autoLock(mLock);
-
- handle = mDecryptHandle;
- *client = mDrmManagerClient;
-}
-
-String8 ChromiumHTTPDataSource::getUri() {
- Mutex::Autolock autoLock(mLock);
-
- return String8(mURI.c_str());
-}
-
-String8 ChromiumHTTPDataSource::getMIMEType() const {
- Mutex::Autolock autoLock(mLock);
-
- return mContentType;
-}
-
-void ChromiumHTTPDataSource::clearDRMState_l() {
- if (mDecryptHandle != NULL) {
- // To release mDecryptHandle
- CHECK(mDrmManagerClient);
- mDrmManagerClient->closeDecryptSession(mDecryptHandle);
- mDecryptHandle = NULL;
- }
-}
-
-status_t ChromiumHTTPDataSource::reconnectAtOffset(off64_t offset) {
- Mutex::Autolock autoLock(mLock);
-
- if (mURI.empty()) {
- return INVALID_OPERATION;
- }
-
- LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnecting...");
- status_t err = connect_l(mURI.c_str(), &mHeaders, offset);
- if (err != OK) {
- LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "Reconnect failed w/ err 0x%08x", err);
- }
-
- return err;
-}
-
-// static
-status_t ChromiumHTTPDataSource::UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
- return SfDelegate::UpdateProxyConfig(host, port, exclusionList);
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/chromium_http/DataUriSource.cpp b/media/libstagefright/chromium_http/DataUriSource.cpp
deleted file mode 100644
index ecf3fa1..0000000
--- a/media/libstagefright/chromium_http/DataUriSource.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <include/DataUriSource.h>
-
-#include <net/base/data_url.h>
-#include <googleurl/src/gurl.h>
-
-
-namespace android {
-
-DataUriSource::DataUriSource(const char *uri) :
- mDataUri(uri),
- mInited(NO_INIT) {
-
- // Copy1: const char *uri -> String8 mDataUri.
- std::string mimeTypeStr, unusedCharsetStr, dataStr;
- // Copy2: String8 mDataUri -> std::string
- const bool ret = net::DataURL::Parse(
- GURL(std::string(mDataUri.string())),
- &mimeTypeStr, &unusedCharsetStr, &dataStr);
- // Copy3: std::string dataStr -> AString mData
- mData.setTo(dataStr.data(), dataStr.length());
- mInited = ret ? OK : UNKNOWN_ERROR;
-
- // The chromium data url implementation defaults to using "text/plain"
- // if no mime type is specified. We prefer to leave this unspecified
- // instead, since the mime type is sniffed in most cases.
- if (mimeTypeStr != "text/plain") {
- mMimeType = mimeTypeStr.c_str();
- }
-}
-
-ssize_t DataUriSource::readAt(off64_t offset, void *out, size_t size) {
- if (mInited != OK) {
- return mInited;
- }
-
- const off64_t length = mData.size();
- if (offset >= length) {
- return UNKNOWN_ERROR;
- }
-
- const char *dataBuf = mData.c_str();
- const size_t bytesToCopy =
- offset + size >= length ? (length - offset) : size;
-
- if (bytesToCopy > 0) {
- memcpy(out, dataBuf + offset, bytesToCopy);
- }
-
- return bytesToCopy;
-}
-
-} // namespace android
diff --git a/media/libstagefright/chromium_http/support.cpp b/media/libstagefright/chromium_http/support.cpp
deleted file mode 100644
index 3de4877..0000000
--- a/media/libstagefright/chromium_http/support.cpp
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ChromiumHTTPDataSourceSupport"
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/AString.h>
-
-#include "support.h"
-
-#include "android/net/android_network_library_impl.h"
-#include "base/logging.h"
-#include "base/threading/thread.h"
-#include "net/base/cert_verifier.h"
-#include "net/base/cookie_monster.h"
-#include "net/base/host_resolver.h"
-#include "net/base/ssl_config_service.h"
-#include "net/http/http_auth_handler_factory.h"
-#include "net/http/http_cache.h"
-#include "net/proxy/proxy_config_service_android.h"
-
-#include "include/ChromiumHTTPDataSource.h"
-#include <arpa/inet.h>
-#include <binder/Parcel.h>
-#include <cutils/log.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <string>
-
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IServiceManager.h>
-
-namespace android {
-
-// must be kept in sync with interface defined in IAudioService.aidl
-class IAudioService : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioService);
-
- virtual int verifyX509CertChain(
- const std::vector<std::string>& cert_chain,
- const std::string& hostname,
- const std::string& auth_type) = 0;
-};
-
-class BpAudioService : public BpInterface<IAudioService>
-{
-public:
- BpAudioService(const sp<IBinder>& impl)
- : BpInterface<IAudioService>(impl)
- {
- }
-
- virtual int verifyX509CertChain(
- const std::vector<std::string>& cert_chain,
- const std::string& hostname,
- const std::string& auth_type)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioService::getInterfaceDescriptor());
-
- // The vector of std::string we get isn't really a vector of strings,
- // but rather a vector of binary certificate data. If we try to pass
- // it to Java language code as a string, it ends up mangled on the other
- // side, so send them as bytes instead.
- // Since we can't send an array of byte arrays, send a single array,
- // which will be split out by the recipient.
-
- int numcerts = cert_chain.size();
- data.writeInt32(numcerts);
- size_t total = 0;
- for (int i = 0; i < numcerts; i++) {
- total += cert_chain[i].size();
- }
- size_t bytesize = total + numcerts * 4;
- uint8_t *bytes = (uint8_t*) malloc(bytesize);
- if (!bytes) {
- return 5; // SSL_INVALID
- }
- ALOGV("%d certs: %d -> %d", numcerts, total, bytesize);
-
- int offset = 0;
- for (int i = 0; i < numcerts; i++) {
- int32_t certsize = cert_chain[i].size();
- // store this in a known order, which just happens to match the default
- // byte order of a java ByteBuffer
- int32_t bigsize = htonl(certsize);
- ALOGV("cert %d, size %d", i, certsize);
- memcpy(bytes + offset, &bigsize, sizeof(bigsize));
- offset += sizeof(bigsize);
- memcpy(bytes + offset, cert_chain[i].data(), certsize);
- offset += certsize;
- }
- data.writeByteArray(bytesize, bytes);
- free(bytes);
- data.writeString16(String16(hostname.c_str()));
- data.writeString16(String16(auth_type.c_str()));
-
- int32_t result;
- if (remote()->transact(IBinder::FIRST_CALL_TRANSACTION, data, &reply) != NO_ERROR
- || reply.readExceptionCode() < 0 || reply.readInt32(&result) != NO_ERROR) {
- return 5; // SSL_INVALID;
- }
- return result;
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(AudioService, "android.media.IAudioService");
-
-
-static Mutex gNetworkThreadLock;
-static base::Thread *gNetworkThread = NULL;
-static scoped_refptr<SfRequestContext> gReqContext;
-static scoped_ptr<net::NetworkChangeNotifier> gNetworkChangeNotifier;
-
-bool logMessageHandler(
- int severity,
- const char* file,
- int line,
- size_t message_start,
- const std::string& str) {
- int androidSeverity = ANDROID_LOG_VERBOSE;
- switch(severity) {
- case logging::LOG_FATAL:
- androidSeverity = ANDROID_LOG_FATAL;
- break;
- case logging::LOG_ERROR_REPORT:
- case logging::LOG_ERROR:
- androidSeverity = ANDROID_LOG_ERROR;
- break;
- case logging::LOG_WARNING:
- androidSeverity = ANDROID_LOG_WARN;
- break;
- default:
- androidSeverity = ANDROID_LOG_VERBOSE;
- break;
- }
- android_printLog(androidSeverity, "chromium-libstagefright",
- "%s:%d: %s", file, line, str.c_str());
- return false;
-}
-
-struct AutoPrioritySaver {
- AutoPrioritySaver()
- : mTID(androidGetTid()),
- mPrevPriority(androidGetThreadPriority(mTID)) {
- androidSetThreadPriority(mTID, ANDROID_PRIORITY_NORMAL);
- }
-
- ~AutoPrioritySaver() {
- androidSetThreadPriority(mTID, mPrevPriority);
- }
-
-private:
- pid_t mTID;
- int mPrevPriority;
-
- DISALLOW_EVIL_CONSTRUCTORS(AutoPrioritySaver);
-};
-
-static void InitializeNetworkThreadIfNecessary() {
- Mutex::Autolock autoLock(gNetworkThreadLock);
-
- if (gNetworkThread == NULL) {
- // Make sure any threads spawned by the chromium framework are
- // running at normal priority instead of inheriting this thread's.
- AutoPrioritySaver saver;
-
- gNetworkThread = new base::Thread("network");
- base::Thread::Options options;
- options.message_loop_type = MessageLoop::TYPE_IO;
- CHECK(gNetworkThread->StartWithOptions(options));
-
- gReqContext = new SfRequestContext;
-
- gNetworkChangeNotifier.reset(net::NetworkChangeNotifier::Create());
-
- net::AndroidNetworkLibrary::RegisterSharedInstance(
- new SfNetworkLibrary);
- logging::SetLogMessageHandler(logMessageHandler);
- }
-}
-
-static void MY_LOGI(const char *s) {
- LOG_PRI(ANDROID_LOG_INFO, LOG_TAG, "%s", s);
-}
-
-static void MY_LOGV(const char *s) {
-#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0
- LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG, "%s", s);
-#endif
-}
-
-SfNetLog::SfNetLog()
- : mNextID(1) {
-}
-
-void SfNetLog::AddEntry(
- EventType type,
- const base::TimeTicks &time,
- const Source &source,
- EventPhase phase,
- EventParameters *params) {
-#if 0
- MY_LOGI(StringPrintf(
- "AddEntry time=%s type=%s source=%s phase=%s\n",
- TickCountToString(time).c_str(),
- EventTypeToString(type),
- SourceTypeToString(source.type),
- EventPhaseToString(phase)).c_str());
-#endif
-}
-
-uint32 SfNetLog::NextID() {
- return mNextID++;
-}
-
-net::NetLog::LogLevel SfNetLog::GetLogLevel() const {
- return LOG_BASIC;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfRequestContext::SfRequestContext() {
- mUserAgent = MakeUserAgent().c_str();
-
- set_net_log(new SfNetLog());
-
- set_host_resolver(
- net::CreateSystemHostResolver(
- net::HostResolver::kDefaultParallelism,
- NULL /* resolver_proc */,
- net_log()));
-
- set_ssl_config_service(
- net::SSLConfigService::CreateSystemSSLConfigService());
-
- mProxyConfigService = new net::ProxyConfigServiceAndroid;
-
- set_proxy_service(net::ProxyService::CreateWithoutProxyResolver(
- mProxyConfigService, net_log()));
-
- set_http_transaction_factory(new net::HttpCache(
- host_resolver(),
- new net::CertVerifier(),
- dnsrr_resolver(),
- dns_cert_checker(),
- proxy_service(),
- ssl_config_service(),
- net::HttpAuthHandlerFactory::CreateDefault(host_resolver()),
- network_delegate(),
- net_log(),
- NULL)); // backend_factory
-
- set_cookie_store(new net::CookieMonster(NULL, NULL));
-}
-
-const std::string &SfRequestContext::GetUserAgent(const GURL &url) const {
- return mUserAgent;
-}
-
-status_t SfRequestContext::updateProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
- Mutex::Autolock autoLock(mProxyConfigLock);
-
- if (host == NULL || *host == '\0') {
- MY_LOGV("updateProxyConfig NULL");
-
- std::string proxy;
- std::string exList;
- mProxyConfigService->UpdateProxySettings(proxy, exList);
- } else {
-#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0
- LOG_PRI(ANDROID_LOG_VERBOSE, LOG_TAG,
- "updateProxyConfig %s:%d, exclude '%s'",
- host, port, exclusionList);
-#endif
-
- std::string proxy = StringPrintf("%s:%d", host, port).c_str();
- std::string exList = exclusionList;
- mProxyConfigService->UpdateProxySettings(proxy, exList);
- }
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfNetworkLibrary::SfNetworkLibrary() {}
-
-SfNetworkLibrary::VerifyResult SfNetworkLibrary::VerifyX509CertChain(
- const std::vector<std::string>& cert_chain,
- const std::string& hostname,
- const std::string& auth_type) {
-
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("audio"));
- if (binder == 0) {
- ALOGW("Thread cannot connect to the audio service");
- } else {
- sp<IAudioService> service = interface_cast<IAudioService>(binder);
- int code = service->verifyX509CertChain(cert_chain, hostname, auth_type);
- ALOGV("verified: %d", code);
- if (code == -1) {
- return VERIFY_OK;
- } else if (code == 2) { // SSL_IDMISMATCH
- return VERIFY_BAD_HOSTNAME;
- } else if (code == 3) { // SSL_UNTRUSTED
- return VERIFY_NO_TRUSTED_ROOT;
- }
- }
- return VERIFY_INVOCATION_ERROR;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SfDelegate::SfDelegate()
- : mOwner(NULL),
- mURLRequest(NULL),
- mReadBuffer(new net::IOBufferWithSize(8192)),
- mNumBytesRead(0),
- mNumBytesTotal(0),
- mDataDestination(NULL),
- mAtEOS(false) {
- InitializeNetworkThreadIfNecessary();
-}
-
-SfDelegate::~SfDelegate() {
- CHECK(mURLRequest == NULL);
-}
-
-// static
-status_t SfDelegate::UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
- InitializeNetworkThreadIfNecessary();
-
- return gReqContext->updateProxyConfig(host, port, exclusionList);
-}
-
-void SfDelegate::setOwner(ChromiumHTTPDataSource *owner) {
- mOwner = owner;
-}
-
-void SfDelegate::setUID(uid_t uid) {
- gReqContext->setUID(uid);
-}
-
-bool SfDelegate::getUID(uid_t *uid) const {
- return gReqContext->getUID(uid);
-}
-
-void SfDelegate::OnReceivedRedirect(
- net::URLRequest *request, const GURL &new_url, bool *defer_redirect) {
- MY_LOGV("OnReceivedRedirect");
- mOwner->onRedirect(new_url.spec().c_str());
-}
-
-void SfDelegate::OnAuthRequired(
- net::URLRequest *request, net::AuthChallengeInfo *auth_info) {
- MY_LOGV("OnAuthRequired");
-
- inherited::OnAuthRequired(request, auth_info);
-}
-
-void SfDelegate::OnCertificateRequested(
- net::URLRequest *request, net::SSLCertRequestInfo *cert_request_info) {
- MY_LOGV("OnCertificateRequested");
-
- inherited::OnCertificateRequested(request, cert_request_info);
-}
-
-void SfDelegate::OnSSLCertificateError(
- net::URLRequest *request, int cert_error, net::X509Certificate *cert) {
- fprintf(stderr, "OnSSLCertificateError cert_error=%d\n", cert_error);
-
- inherited::OnSSLCertificateError(request, cert_error, cert);
-}
-
-void SfDelegate::OnGetCookies(net::URLRequest *request, bool blocked_by_policy) {
- MY_LOGV("OnGetCookies");
-}
-
-void SfDelegate::OnSetCookie(
- net::URLRequest *request,
- const std::string &cookie_line,
- const net::CookieOptions &options,
- bool blocked_by_policy) {
- MY_LOGV("OnSetCookie");
-}
-
-void SfDelegate::OnResponseStarted(net::URLRequest *request) {
- if (request->status().status() != net::URLRequestStatus::SUCCESS) {
- MY_LOGI(StringPrintf(
- "Request failed with status %d and os_error %d",
- request->status().status(),
- request->status().os_error()).c_str());
-
- delete mURLRequest;
- mURLRequest = NULL;
-
- mOwner->onConnectionFailed(ERROR_IO);
- return;
- } else if (mRangeRequested && request->GetResponseCode() != 206) {
- MY_LOGI(StringPrintf(
- "We requested a content range, but server didn't "
- "support that. (responded with %d)",
- request->GetResponseCode()).c_str());
-
- delete mURLRequest;
- mURLRequest = NULL;
-
- mOwner->onConnectionFailed(-EPIPE);
- return;
- } else if ((request->GetResponseCode() / 100) != 2) {
- MY_LOGI(StringPrintf(
- "Server responded with http status %d",
- request->GetResponseCode()).c_str());
-
- delete mURLRequest;
- mURLRequest = NULL;
-
- mOwner->onConnectionFailed(ERROR_IO);
- return;
- }
-
- MY_LOGV("OnResponseStarted");
-
- std::string headers;
- request->GetAllResponseHeaders(&headers);
-
- MY_LOGV(StringPrintf("response headers: %s", headers.c_str()).c_str());
-
- std::string contentType;
- request->GetResponseHeaderByName("Content-Type", &contentType);
-
- mOwner->onConnectionEstablished(
- request->GetExpectedContentSize(), contentType.c_str());
-}
-
-void SfDelegate::OnReadCompleted(net::URLRequest *request, int bytes_read) {
- if (bytes_read == -1) {
- MY_LOGI(StringPrintf(
- "OnReadCompleted, read failed, status %d",
- request->status().status()).c_str());
-
- mOwner->onReadCompleted(ERROR_IO);
- return;
- }
-
- MY_LOGV(StringPrintf("OnReadCompleted, read %d bytes", bytes_read).c_str());
-
- if (bytes_read < 0) {
- MY_LOGI(StringPrintf(
- "Read failed w/ status %d\n",
- request->status().status()).c_str());
-
- mOwner->onReadCompleted(ERROR_IO);
- return;
- } else if (bytes_read == 0) {
- mAtEOS = true;
- mOwner->onReadCompleted(mNumBytesRead);
- return;
- }
-
- CHECK_GT(bytes_read, 0);
- CHECK_LE(mNumBytesRead + bytes_read, mNumBytesTotal);
-
- memcpy((uint8_t *)mDataDestination + mNumBytesRead,
- mReadBuffer->data(),
- bytes_read);
-
- mNumBytesRead += bytes_read;
-
- readMore(request);
-}
-
-void SfDelegate::readMore(net::URLRequest *request) {
- while (mNumBytesRead < mNumBytesTotal) {
- size_t copy = mNumBytesTotal - mNumBytesRead;
- if (copy > mReadBuffer->size()) {
- copy = mReadBuffer->size();
- }
-
- int n;
- if (request->Read(mReadBuffer, copy, &n)) {
- MY_LOGV(StringPrintf("Read %d bytes directly.", n).c_str());
-
- CHECK_LE((size_t)n, copy);
-
- memcpy((uint8_t *)mDataDestination + mNumBytesRead,
- mReadBuffer->data(),
- n);
-
- mNumBytesRead += n;
-
- if (n == 0) {
- mAtEOS = true;
- break;
- }
- } else {
- MY_LOGV("readMore pending read");
-
- if (request->status().status() != net::URLRequestStatus::IO_PENDING) {
- MY_LOGI(StringPrintf(
- "Direct read failed w/ status %d\n",
- request->status().status()).c_str());
-
- mOwner->onReadCompleted(ERROR_IO);
- return;
- }
-
- return;
- }
- }
-
- mOwner->onReadCompleted(mNumBytesRead);
-}
-
-void SfDelegate::initiateConnection(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset) {
- GURL url(uri);
-
- MessageLoop *loop = gNetworkThread->message_loop();
- loop->PostTask(
- FROM_HERE,
- NewRunnableFunction(
- &SfDelegate::OnInitiateConnectionWrapper,
- this,
- url,
- headers,
- offset));
-
-}
-
-// static
-void SfDelegate::OnInitiateConnectionWrapper(
- SfDelegate *me, GURL url,
- const KeyedVector<String8, String8> *headers,
- off64_t offset) {
- me->onInitiateConnection(url, headers, offset);
-}
-
-void SfDelegate::onInitiateConnection(
- const GURL &url,
- const KeyedVector<String8, String8> *extra,
- off64_t offset) {
- CHECK(mURLRequest == NULL);
-
- mURLRequest = new net::URLRequest(url, this);
- mAtEOS = false;
-
- mRangeRequested = false;
-
- if (offset != 0 || extra != NULL) {
- net::HttpRequestHeaders headers =
- mURLRequest->extra_request_headers();
-
- if (offset != 0) {
- headers.AddHeaderFromString(
- StringPrintf("Range: bytes=%lld-", offset).c_str());
-
- mRangeRequested = true;
- }
-
- if (extra != NULL) {
- for (size_t i = 0; i < extra->size(); ++i) {
- AString s;
- s.append(extra->keyAt(i).string());
- s.append(": ");
- s.append(extra->valueAt(i).string());
-
- headers.AddHeaderFromString(s.c_str());
- }
- }
-
- mURLRequest->SetExtraRequestHeaders(headers);
- }
-
- mURLRequest->set_context(gReqContext);
-
- mURLRequest->Start();
-}
-
-void SfDelegate::initiateDisconnect() {
- MessageLoop *loop = gNetworkThread->message_loop();
- loop->PostTask(
- FROM_HERE,
- NewRunnableFunction(
- &SfDelegate::OnInitiateDisconnectWrapper, this));
-}
-
-// static
-void SfDelegate::OnInitiateDisconnectWrapper(SfDelegate *me) {
- me->onInitiateDisconnect();
-}
-
-void SfDelegate::onInitiateDisconnect() {
- if (mURLRequest == NULL) {
- return;
- }
-
- mURLRequest->Cancel();
-
- delete mURLRequest;
- mURLRequest = NULL;
-
- mOwner->onDisconnectComplete();
-}
-
-void SfDelegate::initiateRead(void *data, size_t size) {
- MessageLoop *loop = gNetworkThread->message_loop();
- loop->PostTask(
- FROM_HERE,
- NewRunnableFunction(
- &SfDelegate::OnInitiateReadWrapper, this, data, size));
-}
-
-// static
-void SfDelegate::OnInitiateReadWrapper(
- SfDelegate *me, void *data, size_t size) {
- me->onInitiateRead(data, size);
-}
-
-void SfDelegate::onInitiateRead(void *data, size_t size) {
- CHECK(mURLRequest != NULL);
-
- mNumBytesRead = 0;
- mNumBytesTotal = size;
- mDataDestination = data;
-
- if (mAtEOS) {
- mOwner->onReadCompleted(0);
- return;
- }
-
- readMore(mURLRequest);
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/chromium_http/support.h b/media/libstagefright/chromium_http/support.h
deleted file mode 100644
index 975a1d3..0000000
--- a/media/libstagefright/chromium_http/support.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SUPPORT_H_
-
-#define SUPPORT_H_
-
-#include <assert.h>
-
-#include "net/base/net_log.h"
-#include "net/url_request/url_request.h"
-#include "net/url_request/url_request_context.h"
-#include "net/base/android_network_library.h"
-#include "net/base/io_buffer.h"
-
-#include <utils/KeyedVector.h>
-#include <utils/Mutex.h>
-#include <utils/String8.h>
-
-namespace net {
- struct ProxyConfigServiceAndroid;
-};
-
-namespace android {
-
-struct SfNetLog : public net::NetLog {
- SfNetLog();
-
- virtual void AddEntry(
- EventType type,
- const base::TimeTicks &time,
- const Source &source,
- EventPhase phase,
- EventParameters *params);
-
- virtual uint32 NextID();
- virtual LogLevel GetLogLevel() const;
-
-private:
- uint32 mNextID;
-
- DISALLOW_EVIL_CONSTRUCTORS(SfNetLog);
-};
-
-struct SfRequestContext : public net::URLRequestContext {
- SfRequestContext();
-
- virtual const std::string &GetUserAgent(const GURL &url) const;
-
- status_t updateProxyConfig(
- const char *host, int32_t port, const char *exclusionList);
-
-private:
- Mutex mProxyConfigLock;
-
- std::string mUserAgent;
- net::ProxyConfigServiceAndroid *mProxyConfigService;
-
- DISALLOW_EVIL_CONSTRUCTORS(SfRequestContext);
-};
-
-// This is required for https support, we don't really verify certificates,
-// we accept anything...
-struct SfNetworkLibrary : public net::AndroidNetworkLibrary {
- SfNetworkLibrary();
-
- virtual VerifyResult VerifyX509CertChain(
- const std::vector<std::string>& cert_chain,
- const std::string& hostname,
- const std::string& auth_type);
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(SfNetworkLibrary);
-};
-
-struct ChromiumHTTPDataSource;
-
-struct SfDelegate : public net::URLRequest::Delegate {
- SfDelegate();
- virtual ~SfDelegate();
-
- void initiateConnection(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset);
-
- void initiateDisconnect();
- void initiateRead(void *data, size_t size);
-
- void setOwner(ChromiumHTTPDataSource *mOwner);
-
- // Gets the UID of the calling process
- bool getUID(uid_t *uid) const;
-
- void setUID(uid_t uid);
-
- virtual void OnReceivedRedirect(
- net::URLRequest *request, const GURL &new_url, bool *defer_redirect);
-
- virtual void OnAuthRequired(
- net::URLRequest *request, net::AuthChallengeInfo *auth_info);
-
- virtual void OnCertificateRequested(
- net::URLRequest *request, net::SSLCertRequestInfo *cert_request_info);
-
- virtual void OnSSLCertificateError(
- net::URLRequest *request, int cert_error, net::X509Certificate *cert);
-
- virtual void OnGetCookies(net::URLRequest *request, bool blocked_by_policy);
-
- virtual void OnSetCookie(
- net::URLRequest *request,
- const std::string &cookie_line,
- const net::CookieOptions &options,
- bool blocked_by_policy);
-
- virtual void OnResponseStarted(net::URLRequest *request);
-
- virtual void OnReadCompleted(net::URLRequest *request, int bytes_read);
-
- static status_t UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList);
-
-private:
- typedef Delegate inherited;
-
- ChromiumHTTPDataSource *mOwner;
-
- net::URLRequest *mURLRequest;
- scoped_refptr<net::IOBufferWithSize> mReadBuffer;
-
- size_t mNumBytesRead;
- size_t mNumBytesTotal;
- void *mDataDestination;
-
- bool mRangeRequested;
- bool mAtEOS;
-
- void readMore(net::URLRequest *request);
-
- static void OnInitiateConnectionWrapper(
- SfDelegate *me,
- GURL url,
- const KeyedVector<String8, String8> *headers,
- off64_t offset);
-
- static void OnInitiateDisconnectWrapper(SfDelegate *me);
-
- static void OnInitiateReadWrapper(
- SfDelegate *me, void *data, size_t size);
-
- void onInitiateConnection(
- const GURL &url,
- const KeyedVector<String8, String8> *headers,
- off64_t offset);
-
- void onInitiateDisconnect();
- void onInitiateRead(void *data, size_t size);
-
- DISALLOW_EVIL_CONSTRUCTORS(SfDelegate);
-};
-
-} // namespace android
-
-#endif // SUPPORT_H_
diff --git a/media/libstagefright/chromium_http_stub.cpp b/media/libstagefright/chromium_http_stub.cpp
deleted file mode 100644
index ed8a878..0000000
--- a/media/libstagefright/chromium_http_stub.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dlfcn.h>
-
-#include <media/stagefright/DataSource.h>
-
-#include "include/chromium_http_stub.h"
-#include "include/HTTPBase.h"
-
-namespace android {
-
-static bool gFirst = true;
-static void *gHandle;
-static Mutex gLibMutex;
-
-HTTPBase *(*gLib_createChromiumHTTPDataSource)(uint32_t flags);
-DataSource *(*gLib_createDataUriSource)(const char *uri);
-
-status_t (*gLib_UpdateChromiumHTTPDataSourceProxyConfig)(
- const char *host, int32_t port, const char *exclusionList);
-
-static bool load_libstagefright_chromium_http() {
- Mutex::Autolock autoLock(gLibMutex);
- void *sym;
-
- if (!gFirst) {
- return (gHandle != NULL);
- }
-
- gFirst = false;
-
- gHandle = dlopen("libstagefright_chromium_http.so", RTLD_NOW);
- if (gHandle == NULL) {
- return false;
- }
-
- sym = dlsym(gHandle, "createChromiumHTTPDataSource");
- if (sym == NULL) {
- gHandle = NULL;
- return false;
- }
- gLib_createChromiumHTTPDataSource = (HTTPBase *(*)(uint32_t))sym;
-
- sym = dlsym(gHandle, "createDataUriSource");
- if (sym == NULL) {
- gHandle = NULL;
- return false;
- }
- gLib_createDataUriSource = (DataSource *(*)(const char *))sym;
-
- sym = dlsym(gHandle, "UpdateChromiumHTTPDataSourceProxyConfig");
- if (sym == NULL) {
- gHandle = NULL;
- return false;
- }
- gLib_UpdateChromiumHTTPDataSourceProxyConfig =
- (status_t (*)(const char *, int32_t, const char *))sym;
-
- return true;
-}
-
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags) {
- if (!load_libstagefright_chromium_http()) {
- return NULL;
- }
-
- return gLib_createChromiumHTTPDataSource(flags);
-}
-
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
- if (!load_libstagefright_chromium_http()) {
- return INVALID_OPERATION;
- }
-
- return gLib_UpdateChromiumHTTPDataSourceProxyConfig(
- host, port, exclusionList);
-}
-
-DataSource *createDataUriSource(const char *uri) {
- if (!load_libstagefright_chromium_http()) {
- return NULL;
- }
-
- return gLib_createDataUriSource(uri);
-}
-
-}
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index ffa64f9..afb00aa 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -3,7 +3,8 @@ LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- SoftAAC2.cpp
+ SoftAAC2.cpp \
+ DrcPresModeWrap.cpp
LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright/include \
@@ -17,6 +18,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS :=
+LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
new file mode 100644
index 0000000..129ad65
--- /dev/null
+++ b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DrcPresModeWrap.h"
+
+#include <assert.h>
+
+#define LOG_TAG "SoftAAC2_DrcWrapper"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+//#define DRC_PRES_MODE_WRAP_DEBUG
+
+#define GPM_ENCODER_TARGET_LEVEL 64
+#define MAX_TARGET_LEVEL 64
+
+CDrcPresModeWrapper::CDrcPresModeWrapper()
+{
+ mDataUpdate = true;
+
+ /* Data from streamInfo. */
+ /* Initialized to the same values as in the aac decoder */
+ mStreamPRL = -1;
+ mStreamDRCPresMode = -1;
+ mStreamNrAACChan = 0;
+ mStreamNrOutChan = 0;
+
+ /* Desired values (set by user). */
+ /* Initialized to the same values as in the aac decoder */
+ mDesTarget = -1;
+ mDesAttFactor = 0;
+ mDesBoostFactor = 0;
+ mDesHeavy = 0;
+
+ mEncoderTarget = -1;
+
+ /* Values from last time. */
+ /* Initialized to the same values as the desired values */
+ mLastTarget = -1;
+ mLastAttFactor = 0;
+ mLastBoostFactor = 0;
+ mLastHeavy = 0;
+}
+
+CDrcPresModeWrapper::~CDrcPresModeWrapper()
+{
+}
+
+void
+CDrcPresModeWrapper::setDecoderHandle(const HANDLE_AACDECODER handle)
+{
+ mHandleDecoder = handle;
+}
+
+void
+CDrcPresModeWrapper::submitStreamData(CStreamInfo* pStreamInfo)
+{
+ assert(pStreamInfo);
+
+ if (mStreamPRL != pStreamInfo->drcProgRefLev) {
+ mStreamPRL = pStreamInfo->drcProgRefLev;
+ mDataUpdate = true;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ ALOGV("DRC presentation mode wrapper: drcProgRefLev is %d\n", mStreamPRL);
+#endif
+ }
+
+ if (mStreamDRCPresMode != pStreamInfo->drcPresMode) {
+ mStreamDRCPresMode = pStreamInfo->drcPresMode;
+ mDataUpdate = true;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ ALOGV("DRC presentation mode wrapper: drcPresMode is %d\n", mStreamDRCPresMode);
+#endif
+ }
+
+ if (mStreamNrAACChan != pStreamInfo->aacNumChannels) {
+ mStreamNrAACChan = pStreamInfo->aacNumChannels;
+ mDataUpdate = true;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ ALOGV("DRC presentation mode wrapper: aacNumChannels is %d\n", mStreamNrAACChan);
+#endif
+ }
+
+ if (mStreamNrOutChan != pStreamInfo->numChannels) {
+ mStreamNrOutChan = pStreamInfo->numChannels;
+ mDataUpdate = true;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ ALOGV("DRC presentation mode wrapper: numChannels is %d\n", mStreamNrOutChan);
+#endif
+ }
+
+
+
+ if (mStreamNrOutChan<mStreamNrAACChan) {
+ mIsDownmix = true;
+ } else {
+ mIsDownmix = false;
+ }
+
+ if (mIsDownmix && (mStreamNrOutChan == 1)) {
+ mIsMonoDownmix = true;
+ } else {
+ mIsMonoDownmix = false;
+ }
+
+ if (mIsDownmix && mStreamNrOutChan == 2){
+ mIsStereoDownmix = true;
+ } else {
+ mIsStereoDownmix = false;
+ }
+
+}
+
+void
+CDrcPresModeWrapper::setParam(const DRC_PRES_MODE_WRAP_PARAM param, const int value)
+{
+ switch (param) {
+ case DRC_PRES_MODE_WRAP_DESIRED_TARGET:
+ mDesTarget = value;
+ break;
+ case DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR:
+ mDesAttFactor = value;
+ break;
+ case DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR:
+ mDesBoostFactor = value;
+ break;
+ case DRC_PRES_MODE_WRAP_DESIRED_HEAVY:
+ mDesHeavy = value;
+ break;
+ case DRC_PRES_MODE_WRAP_ENCODER_TARGET:
+ mEncoderTarget = value;
+ break;
+ default:
+ break;
+ }
+ mDataUpdate = true;
+}
+
+void
+CDrcPresModeWrapper::update()
+{
+ // Get Data from Decoder
+ int progRefLevel = mStreamPRL;
+ int drcPresMode = mStreamDRCPresMode;
+
+ // by default, do as desired
+ int newTarget = mDesTarget;
+ int newAttFactor = mDesAttFactor;
+ int newBoostFactor = mDesBoostFactor;
+ int newHeavy = mDesHeavy;
+
+ if (mDataUpdate) {
+ // sanity check
+ if (mDesTarget < MAX_TARGET_LEVEL){
+ mDesTarget = MAX_TARGET_LEVEL; // limit target level to -16 dB or below
+ newTarget = MAX_TARGET_LEVEL;
+ }
+
+ if (mEncoderTarget != -1) {
+ if (mDesTarget<124) { // if target level > -31 dB
+ if ((mIsStereoDownmix == false) && (mIsMonoDownmix == false)) {
+ // no stereo or mono downmixing, calculated scaling of light DRC
+ /* use as little compression as possible */
+ newAttFactor = 0;
+ newBoostFactor = 0;
+ if (mDesTarget<progRefLevel) { // if target level > PRL
+ if (mEncoderTarget < mDesTarget) { // if mEncoderTarget > target level
+ // mEncoderTarget > target level > PRL
+ int calcFactor;
+ float calcFactor_norm;
+ // 0.0f < calcFactor_norm < 1.0f
+ calcFactor_norm = (float)(mDesTarget - progRefLevel) /
+ (float)(mEncoderTarget - progRefLevel);
+ calcFactor = (int)(calcFactor_norm*127.0f); // 0 <= calcFactor < 127
+ // calcFactor is the lower limit
+ newAttFactor = (calcFactor>newAttFactor) ? calcFactor : newAttFactor;
+ // new AttFactor will be always = calcFactor, as it is set to 0 before.
+ newBoostFactor = newAttFactor;
+ } else {
+ /* target level > mEncoderTarget > PRL */
+ // newTDLimiterEnable = 1;
+ // the time domain limiter must always be active in this case.
+ // It is assumed that the framework activates it by default
+ newAttFactor = 127;
+ newBoostFactor = 127;
+ }
+ } else { // target level <= PRL
+ // no restrictions required
+ // newAttFactor = newAttFactor;
+ }
+ } else { // downmixing
+ // if target level > -23 dB or mono downmix
+ if ( (mDesTarget<92) || mIsMonoDownmix ) {
+ newHeavy = 1;
+ } else {
+ // we perform a downmix, so, we need at least full light DRC
+ newAttFactor = 127;
+ }
+ }
+ } else { // target level <= -31 dB
+ // playback -31 dB: light DRC only needed if we perform downmixing
+ if (mIsDownmix) { // we do downmixing
+ newAttFactor = 127;
+ }
+ }
+ }
+ else { // handle other used encoder target levels
+
+ // Sanity check: DRC presentation mode is only specified for max. 5.1 channels
+ if (mStreamNrAACChan > 6) {
+ drcPresMode = 0;
+ }
+
+ switch (drcPresMode) {
+ case 0:
+ default: // presentation mode not indicated
+ {
+
+ if (mDesTarget<124) { // if target level > -31 dB
+ // no stereo or mono downmixing
+ if ((mIsStereoDownmix == false) && (mIsMonoDownmix == false)) {
+ if (mDesTarget<progRefLevel) { // if target level > PRL
+ // newTDLimiterEnable = 1;
+ // the time domain limiter must always be active in this case.
+ // It is assumed that the framework activates it by default
+ newAttFactor = 127; // at least, use light compression
+ } else { // target level <= PRL
+ // no restrictions required
+ // newAttFactor = newAttFactor;
+ }
+ } else { // downmixing
+ // newTDLimiterEnable = 1;
+ // the time domain limiter must always be active in this case.
+ // It is assumed that the framework activates it by default
+
+ // if target level > -23 dB or mono downmix
+ if ( (mDesTarget < 92) || mIsMonoDownmix ) {
+ newHeavy = 1;
+ } else{
+ // we perform a downmix, so, we need at least full light DRC
+ newAttFactor = 127;
+ }
+ }
+ } else { // target level <= -31 dB
+ if (mIsDownmix) { // we do downmixing.
+ // newTDLimiterEnable = 1;
+ // the time domain limiter must always be active in this case.
+ // It is assumed that the framework activates it by default
+ newAttFactor = 127;
+ }
+ }
+ }
+ break;
+
+ // Presentation mode 1 and 2 according to ETSI TS 101 154:
+ // Digital Video Broadcasting (DVB); Specification for the use of Video and Audio Coding
+ // in Broadcasting Applications based on the MPEG-2 Transport Stream,
+ // section C.5.4., "Decoding", and Table C.33
+ // ISO DRC -> newHeavy = 0 (Use light compression, MPEG-style)
+ // Compression_value -> newHeavy = 1 (Use heavy compression, DVB-style)
+ // scaling restricted -> newAttFactor = 127
+
+ case 1: // presentation mode 1, Light:-31/Heavy:-23
+ {
+ if (mDesTarget < 124) { // if target level > -31 dB
+ // playback up to -23 dB
+ newHeavy = 1;
+ } else { // target level <= -31 dB
+ // playback -31 dB
+ if (mIsDownmix) { // we do downmixing.
+ newAttFactor = 127;
+ }
+ }
+ }
+ break;
+
+ case 2: // presentation mode 2, Light:-23/Heavy:-23
+ {
+ if (mDesTarget < 124) { // if target level > -31 dB
+ // playback up to -23 dB
+ if (mIsMonoDownmix) { // if mono downmix
+ newHeavy = 1;
+ } else {
+ newHeavy = 0;
+ newAttFactor = 127;
+ }
+ } else { // target level <= -31 dB
+ // playback -31 dB
+ newHeavy = 0;
+ if (mIsDownmix) { // we do downmixing.
+ newAttFactor = 127;
+ }
+ }
+ }
+ break;
+
+ } // switch()
+ } // if (mEncoderTarget == GPM_ENCODER_TARGET_LEVEL)
+
+ // sanity again
+ if (newHeavy == 1) {
+ newBoostFactor=127; // not really needed as the same would be done by the decoder anyway
+ newAttFactor = 127;
+ }
+
+ // update the decoder
+ if (newTarget != mLastTarget) {
+ aacDecoder_SetParam(mHandleDecoder, AAC_DRC_REFERENCE_LEVEL, newTarget);
+ mLastTarget = newTarget;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ if (newTarget != mDesTarget)
+ ALOGV("DRC presentation mode wrapper: forced target level to %d (from %d)\n", newTarget, mDesTarget);
+ else
+ ALOGV("DRC presentation mode wrapper: set target level to %d\n", newTarget);
+#endif
+ }
+
+ if (newAttFactor != mLastAttFactor) {
+ aacDecoder_SetParam(mHandleDecoder, AAC_DRC_ATTENUATION_FACTOR, newAttFactor);
+ mLastAttFactor = newAttFactor;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ if (newAttFactor != mDesAttFactor)
+ ALOGV("DRC presentation mode wrapper: forced attenuation factor to %d (from %d)\n", newAttFactor, mDesAttFactor);
+ else
+ ALOGV("DRC presentation mode wrapper: set attenuation factor to %d\n", newAttFactor);
+#endif
+ }
+
+ if (newBoostFactor != mLastBoostFactor) {
+ aacDecoder_SetParam(mHandleDecoder, AAC_DRC_BOOST_FACTOR, newBoostFactor);
+ mLastBoostFactor = newBoostFactor;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ if (newBoostFactor != mDesBoostFactor)
+ ALOGV("DRC presentation mode wrapper: forced boost factor to %d (from %d)\n",
+ newBoostFactor, mDesBoostFactor);
+ else
+ ALOGV("DRC presentation mode wrapper: set boost factor to %d\n", newBoostFactor);
+#endif
+ }
+
+ if (newHeavy != mLastHeavy) {
+ aacDecoder_SetParam(mHandleDecoder, AAC_DRC_HEAVY_COMPRESSION, newHeavy);
+ mLastHeavy = newHeavy;
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ if (newHeavy != mDesHeavy)
+ ALOGV("DRC presentation mode wrapper: forced heavy compression to %d (from %d)\n",
+ newHeavy, mDesHeavy);
+ else
+ ALOGV("DRC presentation mode wrapper: set heavy compression to %d\n", newHeavy);
+#endif
+ }
+
+#ifdef DRC_PRES_MODE_WRAP_DEBUG
+ ALOGV("DRC config: tgt_lev: %3d, cut: %3d, boost: %3d, heavy: %d\n", newTarget,
+ newAttFactor, newBoostFactor, newHeavy);
+#endif
+ mDataUpdate = false;
+
+ } // if (mDataUpdate)
+}
diff --git a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.h b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.h
new file mode 100644
index 0000000..f0b6cf2
--- /dev/null
+++ b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+#include "aacdecoder_lib.h"
+
+typedef enum
+{
+ DRC_PRES_MODE_WRAP_DESIRED_TARGET = 0x0000,
+ DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR = 0x0001,
+ DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR = 0x0002,
+ DRC_PRES_MODE_WRAP_DESIRED_HEAVY = 0x0003,
+ DRC_PRES_MODE_WRAP_ENCODER_TARGET = 0x0004
+} DRC_PRES_MODE_WRAP_PARAM;
+
+
+class CDrcPresModeWrapper {
+public:
+ CDrcPresModeWrapper();
+ ~CDrcPresModeWrapper();
+ void setDecoderHandle(const HANDLE_AACDECODER handle);
+ void setParam(const DRC_PRES_MODE_WRAP_PARAM param, const int value);
+ void submitStreamData(CStreamInfo*);
+ void update();
+
+protected:
+ HANDLE_AACDECODER mHandleDecoder;
+ int mDesTarget;
+ int mDesAttFactor;
+ int mDesBoostFactor;
+ int mDesHeavy;
+
+ int mEncoderTarget;
+
+ int mLastTarget;
+ int mLastAttFactor;
+ int mLastBoostFactor;
+ int mLastHeavy;
+
+ SCHAR mStreamPRL;
+ SCHAR mStreamDRCPresMode;
+ INT mStreamNrAACChan;
+ INT mStreamNrOutChan;
+
+ bool mIsDownmix;
+ bool mIsMonoDownmix;
+ bool mIsStereoDownmix;
+
+ bool mDataUpdate;
+};
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1b20cbb..40925fd 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -14,27 +14,35 @@
* limitations under the License.
*/
-#define LOG_TAG "SoftAAC2"
//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAAC2"
#include <utils/Log.h>
#include "SoftAAC2.h"
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaErrors.h>
+#include <math.h>
+
#define FILEREAD_MAX_LAYERS 2
#define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
-#define MAX_CHANNEL_COUNT 6 /* maximum number of audio channels that can be decoded */
+#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_ENC_LEVEL -1 /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
+#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
// names of properties that can be used to override the default DRC settings
#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
+#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
+#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
namespace android {
@@ -57,9 +65,9 @@ SoftAAC2::SoftAAC2(
mStreamInfo(NULL),
mIsADTS(false),
mInputBufferCount(0),
+ mOutputBufferCount(0),
mSignalledError(false),
- mAnchorTimeUs(0),
- mNumSamplesOutput(0),
+ mLastInHeader(NULL),
mOutputPortSettingsChange(NONE) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
@@ -67,6 +75,7 @@ SoftAAC2::SoftAAC2(
SoftAAC2::~SoftAAC2() {
aacDecoder_Close(mAACDecoder);
+ delete mOutputDelayRingBuffer;
}
void SoftAAC2::initPorts() {
@@ -111,6 +120,7 @@ void SoftAAC2::initPorts() {
}
status_t SoftAAC2::initDecoder() {
+ ALOGV("initDecoder()");
status_t status = UNKNOWN_ERROR;
mAACDecoder = aacDecoder_Open(TT_MP4_ADIF, /* num layers */ 1);
if (mAACDecoder != NULL) {
@@ -119,36 +129,73 @@ status_t SoftAAC2::initDecoder() {
status = OK;
}
}
- mDecoderHasData = false;
- // for streams that contain metadata, use the mobile profile DRC settings unless overridden
- // by platform properties:
+ mEndOfInput = false;
+ mEndOfOutput = false;
+ mOutputDelayCompensated = 0;
+ mOutputDelayRingBufferSize = 2048 * MAX_CHANNEL_COUNT * kNumDelayBlocksMax;
+ mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize];
+ mOutputDelayRingBufferWritePos = 0;
+ mOutputDelayRingBufferReadPos = 0;
+ mOutputDelayRingBufferFilled = 0;
+
+ if (mAACDecoder == NULL) {
+ ALOGE("AAC decoder is null. TODO: Can not call aacDecoder_SetParam in the following code");
+ }
+
+ //aacDecoder_SetParam(mAACDecoder, AAC_PCM_LIMITER_ENABLE, 0);
+
+ //init DRC wrapper
+ mDrcWrap.setDecoderHandle(mAACDecoder);
+ mDrcWrap.submitStreamData(mStreamInfo);
+
+ // for streams that contain metadata, use the mobile profile DRC settings unless overridden by platform properties
+ // TODO: change the DRC settings depending on audio output device type (HDMI, loadspeaker, headphone)
char value[PROPERTY_VALUE_MAX];
- // * AAC_DRC_REFERENCE_LEVEL
+ // DRC_PRES_MODE_WRAP_DESIRED_TARGET
if (property_get(PROP_DRC_OVERRIDE_REF_LEVEL, value, NULL)) {
unsigned refLevel = atoi(value);
- ALOGV("AAC decoder using AAC_DRC_REFERENCE_LEVEL of %d instead of %d",
- refLevel, DRC_DEFAULT_MOBILE_REF_LEVEL);
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_REFERENCE_LEVEL, refLevel);
+ ALOGV("AAC decoder using desired DRC target reference level of %d instead of %d", refLevel,
+ DRC_DEFAULT_MOBILE_REF_LEVEL);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET, refLevel);
} else {
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_REFERENCE_LEVEL, DRC_DEFAULT_MOBILE_REF_LEVEL);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET, DRC_DEFAULT_MOBILE_REF_LEVEL);
}
- // * AAC_DRC_ATTENUATION_FACTOR
+ // DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR
if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL)) {
unsigned cut = atoi(value);
- ALOGV("AAC decoder using AAC_DRC_ATTENUATION_FACTOR of %d instead of %d",
- cut, DRC_DEFAULT_MOBILE_DRC_CUT);
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_ATTENUATION_FACTOR, cut);
+ ALOGV("AAC decoder using desired DRC attenuation factor of %d instead of %d", cut,
+ DRC_DEFAULT_MOBILE_DRC_CUT);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, cut);
} else {
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_ATTENUATION_FACTOR, DRC_DEFAULT_MOBILE_DRC_CUT);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, DRC_DEFAULT_MOBILE_DRC_CUT);
}
- // * AAC_DRC_BOOST_FACTOR (note: no default, using cut)
+ // DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR
if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL)) {
unsigned boost = atoi(value);
- ALOGV("AAC decoder using AAC_DRC_BOOST_FACTOR of %d", boost);
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, boost);
+ ALOGV("AAC decoder using desired DRC boost factor of %d instead of %d", boost,
+ DRC_DEFAULT_MOBILE_DRC_BOOST);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR, boost);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR, DRC_DEFAULT_MOBILE_DRC_BOOST);
+ }
+ // DRC_PRES_MODE_WRAP_DESIRED_HEAVY
+ if (property_get(PROP_DRC_OVERRIDE_HEAVY, value, NULL)) {
+ unsigned heavy = atoi(value);
+ ALOGV("AAC decoder using desried DRC heavy compression switch of %d instead of %d", heavy,
+ DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY, heavy);
+ } else {
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY, DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ }
+ // DRC_PRES_MODE_WRAP_ENCODER_TARGET
+ if (property_get(PROP_DRC_OVERRIDE_ENC_LEVEL, value, NULL)) {
+ unsigned encoderRefLevel = atoi(value);
+ ALOGV("AAC decoder using encoder-side DRC reference level of %d instead of %d",
+ encoderRefLevel, DRC_DEFAULT_MOBILE_ENC_LEVEL);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, encoderRefLevel);
} else {
- aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, DRC_DEFAULT_MOBILE_DRC_BOOST);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
}
return status;
@@ -231,7 +278,7 @@ OMX_ERRORTYPE SoftAAC2::internalGetParameter(
OMX_ERRORTYPE SoftAAC2::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
- switch (index) {
+ switch ((int)index) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
@@ -267,6 +314,71 @@ OMX_ERRORTYPE SoftAAC2::internalSetParameter(
return OMX_ErrorNone;
}
+ case OMX_IndexParamAudioAndroidAacPresentation:
+ {
+ const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
+ (const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+ // for the following parameters of the OMX_AUDIO_PARAM_AACPROFILETYPE structure,
+ // a value of -1 implies the parameter is not set by the application:
+ // nMaxOutputChannels uses default platform properties, see configureDownmix()
+ // nDrcCut uses default platform properties, see initDecoder()
+ // nDrcBoost idem
+ // nHeavyCompression idem
+ // nTargetReferenceLevel idem
+ // nEncodedTargetLevel idem
+ if (aacPresParams->nMaxOutputChannels >= 0) {
+ int max;
+ if (aacPresParams->nMaxOutputChannels >= 8) { max = 8; }
+ else if (aacPresParams->nMaxOutputChannels >= 6) { max = 6; }
+ else if (aacPresParams->nMaxOutputChannels >= 2) { max = 2; }
+ else {
+ // -1 or 0: disable downmix, 1: mono
+ max = aacPresParams->nMaxOutputChannels;
+ }
+ ALOGV("set nMaxOutputChannels=%d", max);
+ aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, max);
+ }
+ bool updateDrcWrapper = false;
+ if (aacPresParams->nDrcBoost >= 0) {
+ ALOGV("set nDrcBoost=%d", aacPresParams->nDrcBoost);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_BOOST_FACTOR,
+ aacPresParams->nDrcBoost);
+ updateDrcWrapper = true;
+ }
+ if (aacPresParams->nDrcCut >= 0) {
+ ALOGV("set nDrcCut=%d", aacPresParams->nDrcCut);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_ATT_FACTOR, aacPresParams->nDrcCut);
+ updateDrcWrapper = true;
+ }
+ if (aacPresParams->nHeavyCompression >= 0) {
+ ALOGV("set nHeavyCompression=%d", aacPresParams->nHeavyCompression);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_HEAVY,
+ aacPresParams->nHeavyCompression);
+ updateDrcWrapper = true;
+ }
+ if (aacPresParams->nTargetReferenceLevel >= 0) {
+ ALOGV("set nTargetReferenceLevel=%d", aacPresParams->nTargetReferenceLevel);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_DESIRED_TARGET,
+ aacPresParams->nTargetReferenceLevel);
+ updateDrcWrapper = true;
+ }
+ if (aacPresParams->nEncodedTargetLevel >= 0) {
+ ALOGV("set nEncodedTargetLevel=%d", aacPresParams->nEncodedTargetLevel);
+ mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET,
+ aacPresParams->nEncodedTargetLevel);
+ updateDrcWrapper = true;
+ }
+ if (aacPresParams->nPCMLimiterEnable >= 0) {
+ aacDecoder_SetParam(mAACDecoder, AAC_PCM_LIMITER_ENABLE,
+ (aacPresParams->nPCMLimiterEnable != 0));
+ }
+ if (updateDrcWrapper) {
+ mDrcWrap.update();
+ }
+
+ return OMX_ErrorNone;
+ }
+
case OMX_IndexParamAudioPcm:
{
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
@@ -288,290 +400,578 @@ bool SoftAAC2::isConfigured() const {
return mInputBufferCount > 0;
}
-void SoftAAC2::maybeConfigureDownmix() const {
- if (mStreamInfo->numChannels > 2) {
- char value[PROPERTY_VALUE_MAX];
- if (!(property_get("media.aac_51_output_enabled", value, NULL) &&
- (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
- ALOGI("Downmixing multichannel AAC to stereo");
- aacDecoder_SetParam(mAACDecoder, AAC_PCM_OUTPUT_CHANNELS, 2);
- mStreamInfo->numChannels = 2;
- }
+void SoftAAC2::configureDownmix() const {
+ char value[PROPERTY_VALUE_MAX];
+ if (!(property_get("media.aac_51_output_enabled", value, NULL)
+ && (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
+ ALOGI("limiting to stereo output");
+ aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
+ // By default, the decoder creates a 5.1 channel downmix signal
+ // for seven and eight channel input streams. To enable 6.1 and 7.1 channel output
+ // use aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1)
}
}
-void SoftAAC2::onQueueFilled(OMX_U32 portIndex) {
- if (mSignalledError || mOutputPortSettingsChange != NONE) {
- return;
+bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamples) {
+ if (numSamples == 0) {
+ return true;
+ }
+ if (outputDelayRingBufferSpaceLeft() < numSamples) {
+ ALOGE("RING BUFFER WOULD OVERFLOW");
+ return false;
}
+ if (mOutputDelayRingBufferWritePos + numSamples <= mOutputDelayRingBufferSize
+ && (mOutputDelayRingBufferReadPos <= mOutputDelayRingBufferWritePos
+ || mOutputDelayRingBufferReadPos > mOutputDelayRingBufferWritePos + numSamples)) {
+ // faster memcopy loop without checks, if the preconditions allow this
+ for (int32_t i = 0; i < numSamples; i++) {
+ mOutputDelayRingBuffer[mOutputDelayRingBufferWritePos++] = samples[i];
+ }
- UCHAR* inBuffer[FILEREAD_MAX_LAYERS];
- UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0};
- UINT bytesValid[FILEREAD_MAX_LAYERS] = {0};
+ if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
+ }
+ } else {
+ ALOGV("slow SoftAAC2::outputDelayRingBufferPutSamples()");
- List<BufferInfo *> &inQueue = getPortQueue(0);
- List<BufferInfo *> &outQueue = getPortQueue(1);
+ for (int32_t i = 0; i < numSamples; i++) {
+ mOutputDelayRingBuffer[mOutputDelayRingBufferWritePos] = samples[i];
+ mOutputDelayRingBufferWritePos++;
+ if (mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferWritePos -= mOutputDelayRingBufferSize;
+ }
+ }
+ }
+ mOutputDelayRingBufferFilled += numSamples;
+ return true;
+}
- if (portIndex == 0 && mInputBufferCount == 0) {
- ++mInputBufferCount;
- BufferInfo *info = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *header = info->mHeader;
+int32_t SoftAAC2::outputDelayRingBufferGetSamples(INT_PCM *samples, int32_t numSamples) {
- inBuffer[0] = header->pBuffer + header->nOffset;
- inBufferLength[0] = header->nFilledLen;
+ if (numSamples > mOutputDelayRingBufferFilled) {
+ ALOGE("RING BUFFER WOULD UNDERRUN");
+ return -1;
+ }
- AAC_DECODER_ERROR decoderErr =
- aacDecoder_ConfigRaw(mAACDecoder,
- inBuffer,
- inBufferLength);
+ if (mOutputDelayRingBufferReadPos + numSamples <= mOutputDelayRingBufferSize
+ && (mOutputDelayRingBufferWritePos < mOutputDelayRingBufferReadPos
+ || mOutputDelayRingBufferWritePos >= mOutputDelayRingBufferReadPos + numSamples)) {
+ // faster memcopy loop without checks, if the preconditions allow this
+ if (samples != 0) {
+ for (int32_t i = 0; i < numSamples; i++) {
+ samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos++];
+ }
+ } else {
+ mOutputDelayRingBufferReadPos += numSamples;
+ }
+ if (mOutputDelayRingBufferReadPos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferReadPos -= mOutputDelayRingBufferSize;
+ }
+ } else {
+ ALOGV("slow SoftAAC2::outputDelayRingBufferGetSamples()");
- if (decoderErr != AAC_DEC_OK) {
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- return;
+ for (int32_t i = 0; i < numSamples; i++) {
+ if (samples != 0) {
+ samples[i] = mOutputDelayRingBuffer[mOutputDelayRingBufferReadPos];
+ }
+ mOutputDelayRingBufferReadPos++;
+ if (mOutputDelayRingBufferReadPos >= mOutputDelayRingBufferSize) {
+ mOutputDelayRingBufferReadPos -= mOutputDelayRingBufferSize;
+ }
}
+ }
+ mOutputDelayRingBufferFilled -= numSamples;
+ return numSamples;
+}
- inQueue.erase(inQueue.begin());
- info->mOwnedByUs = false;
- notifyEmptyBufferDone(header);
+int32_t SoftAAC2::outputDelayRingBufferSamplesAvailable() {
+ return mOutputDelayRingBufferFilled;
+}
- // Only send out port settings changed event if both sample rate
- // and numChannels are valid.
- if (mStreamInfo->sampleRate && mStreamInfo->numChannels) {
- maybeConfigureDownmix();
- ALOGI("Initially configuring decoder: %d Hz, %d channels",
- mStreamInfo->sampleRate,
- mStreamInfo->numChannels);
+int32_t SoftAAC2::outputDelayRingBufferSpaceLeft() {
+ return mOutputDelayRingBufferSize - outputDelayRingBufferSamplesAvailable();
+}
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
- }
+void SoftAAC2::onQueueFilled(OMX_U32 /* portIndex */) {
+ if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ UCHAR* inBuffer[FILEREAD_MAX_LAYERS];
+ UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0};
+ UINT bytesValid[FILEREAD_MAX_LAYERS] = {0};
- BufferInfo *outInfo = *outQueue.begin();
- OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
+ while ((!inQueue.empty() || mEndOfInput) && !outQueue.empty()) {
+ if (!inQueue.empty()) {
+ INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
- if (mDecoderHasData) {
- // flush out the decoder's delayed data by calling DecodeFrame
- // one more time, with the AACDEC_FLUSH flag set
- INT_PCM *outBuffer =
- reinterpret_cast<INT_PCM *>(
- outHeader->pBuffer + outHeader->nOffset);
+ mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+
+ if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+ ALOGE("first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+ inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+ }
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+ inBufferLength[0] = inHeader->nFilledLen;
AAC_DECODER_ERROR decoderErr =
- aacDecoder_DecodeFrame(mAACDecoder,
- outBuffer,
- outHeader->nAllocLen,
- AACDEC_FLUSH);
- mDecoderHasData = false;
+ aacDecoder_ConfigRaw(mAACDecoder,
+ inBuffer,
+ inBufferLength);
if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_ConfigRaw decoderErr = 0x%4.4x", decoderErr);
mSignalledError = true;
-
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr,
- NULL);
-
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
return;
}
- outHeader->nFilledLen =
- mStreamInfo->frameSize
- * sizeof(int16_t)
- * mStreamInfo->numChannels;
- } else {
- // we never submitted any data to the decoder, so there's nothing to flush out
- outHeader->nFilledLen = 0;
+ mInputBufferCount++;
+ mOutputBufferCount++; // fake increase of outputBufferCount to keep the counters aligned
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+
+ configureDownmix();
+ // Only send out port settings changed event if both sample rate
+ // and numChannels are valid.
+ if (mStreamInfo->sampleRate && mStreamInfo->numChannels) {
+ ALOGI("Initially configuring decoder: %d Hz, %d channels",
+ mStreamInfo->sampleRate,
+ mStreamInfo->numChannels);
+
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ }
+ return;
}
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- return;
- }
-
- if (inHeader->nOffset == 0) {
- mAnchorTimeUs = inHeader->nTimeStamp;
- mNumSamplesOutput = 0;
- }
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ continue;
+ }
- size_t adtsHeaderSize = 0;
- if (mIsADTS) {
- // skip 30 bits, aac_frame_length follows.
- // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
+ if (mIsADTS) {
+ size_t adtsHeaderSize = 0;
+ // skip 30 bits, aac_frame_length follows.
+ // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
- const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+ const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
- bool signalError = false;
- if (inHeader->nFilledLen < 7) {
- ALOGE("Audio data too short to contain even the ADTS header. "
- "Got %ld bytes.", inHeader->nFilledLen);
- hexdump(adtsHeader, inHeader->nFilledLen);
- signalError = true;
- } else {
- bool protectionAbsent = (adtsHeader[1] & 1);
-
- unsigned aac_frame_length =
- ((adtsHeader[3] & 3) << 11)
- | (adtsHeader[4] << 3)
- | (adtsHeader[5] >> 5);
-
- if (inHeader->nFilledLen < aac_frame_length) {
- ALOGE("Not enough audio data for the complete frame. "
- "Got %ld bytes, frame size according to the ADTS "
- "header is %u bytes.",
- inHeader->nFilledLen, aac_frame_length);
+ bool signalError = false;
+ if (inHeader->nFilledLen < 7) {
+ ALOGE("Audio data too short to contain even the ADTS header. "
+ "Got %d bytes.", inHeader->nFilledLen);
hexdump(adtsHeader, inHeader->nFilledLen);
signalError = true;
} else {
- adtsHeaderSize = (protectionAbsent ? 7 : 9);
-
- inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
- inBufferLength[0] = aac_frame_length - adtsHeaderSize;
-
- inHeader->nOffset += adtsHeaderSize;
- inHeader->nFilledLen -= adtsHeaderSize;
+ bool protectionAbsent = (adtsHeader[1] & 1);
+
+ unsigned aac_frame_length =
+ ((adtsHeader[3] & 3) << 11)
+ | (adtsHeader[4] << 3)
+ | (adtsHeader[5] >> 5);
+
+ if (inHeader->nFilledLen < aac_frame_length) {
+ ALOGE("Not enough audio data for the complete frame. "
+ "Got %d bytes, frame size according to the ADTS "
+ "header is %u bytes.",
+ inHeader->nFilledLen, aac_frame_length);
+ hexdump(adtsHeader, inHeader->nFilledLen);
+ signalError = true;
+ } else {
+ adtsHeaderSize = (protectionAbsent ? 7 : 9);
+
+ inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize;
+ inBufferLength[0] = aac_frame_length - adtsHeaderSize;
+
+ inHeader->nOffset += adtsHeaderSize;
+ inHeader->nFilledLen -= adtsHeaderSize;
+ }
}
- }
-
- if (signalError) {
- mSignalledError = true;
- notify(OMX_EventError,
- OMX_ErrorStreamCorrupt,
- ERROR_MALFORMED,
- NULL);
+ if (signalError) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
+ return;
+ }
- return;
+ // insert buffer size and time stamp
+ mBufferSizes.add(inBufferLength[0]);
+ if (mLastInHeader != inHeader) {
+ mBufferTimestamps.add(inHeader->nTimeStamp);
+ mLastInHeader = inHeader;
+ } else {
+ int64_t currentTime = mBufferTimestamps.top();
+ currentTime += mStreamInfo->aacSamplesPerFrame *
+ 1000000ll / mStreamInfo->sampleRate;
+ mBufferTimestamps.add(currentTime);
+ }
+ } else {
+ inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
+ inBufferLength[0] = inHeader->nFilledLen;
+ mLastInHeader = inHeader;
+ mBufferTimestamps.add(inHeader->nTimeStamp);
+ mBufferSizes.add(inHeader->nFilledLen);
}
- } else {
- inBuffer[0] = inHeader->pBuffer + inHeader->nOffset;
- inBufferLength[0] = inHeader->nFilledLen;
- }
-
- // Fill and decode
- INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(
- outHeader->pBuffer + outHeader->nOffset);
- bytesValid[0] = inBufferLength[0];
+ // Fill and decode
+ bytesValid[0] = inBufferLength[0];
- int prevSampleRate = mStreamInfo->sampleRate;
- int prevNumChannels = mStreamInfo->numChannels;
+ INT prevSampleRate = mStreamInfo->sampleRate;
+ INT prevNumChannels = mStreamInfo->numChannels;
- AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS;
- while (bytesValid[0] > 0 && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
aacDecoder_Fill(mAACDecoder,
inBuffer,
inBufferLength,
bytesValid);
- mDecoderHasData = true;
- decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
- outBuffer,
- outHeader->nAllocLen,
- 0 /* flags */);
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
- if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
- ALOGW("Not enough bits, bytesValid %d", bytesValid[0]);
- }
- }
-
- size_t numOutBytes =
- mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
-
- if (decoderErr == AAC_DEC_OK) {
UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0];
inHeader->nFilledLen -= inBufferUsedLength;
inHeader->nOffset += inBufferUsedLength;
- } else {
- ALOGW("AAC decoder returned error %d, substituting silence",
- decoderErr);
- memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes);
+ AAC_DECODER_ERROR decoderErr;
+ int numLoops = 0;
+ do {
+ if (outputDelayRingBufferSpaceLeft() <
+ (mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ ALOGV("skipping decode: not enough space left in ringbuffer");
+ break;
+ }
+
+ int numConsumed = mStreamInfo->numTotalBytes;
+ decoderErr = aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ 0 /* flags */);
+
+ numConsumed = mStreamInfo->numTotalBytes - numConsumed;
+ numLoops++;
+
+ if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) {
+ break;
+ }
+ mDecodedSizes.add(numConsumed);
+
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
- // Discard input buffer.
- inHeader->nFilledLen = 0;
+ if (bytesValid[0] != 0) {
+ ALOGE("bytesValid[0] != 0 should never happen");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
- aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+ size_t numOutBytes =
+ mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels;
- // fall through
+ if (decoderErr == AAC_DEC_OK) {
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+ } else {
+ ALOGW("AAC decoder returned error 0x%4.4x, substituting silence", decoderErr);
+
+ memset(tmpOutBuffer, 0, numOutBytes); // TODO: check for overflow
+
+ if (!outputDelayRingBufferPutSamples(tmpOutBuffer,
+ mStreamInfo->frameSize * mStreamInfo->numChannels)) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+
+ // Discard input buffer.
+ if (inHeader) {
+ inHeader->nFilledLen = 0;
+ }
+
+ aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
+
+ // After an error, replace the last entry in mBufferSizes with the sum of the
+ // last <numLoops> entries from mDecodedSizes to resynchronize the in/out lists.
+ mBufferSizes.pop();
+ int n = 0;
+ for (int i = 0; i < numLoops; i++) {
+ n += mDecodedSizes.itemAt(mDecodedSizes.size() - numLoops + i);
+ }
+ mBufferSizes.add(n);
+
+ // fall through
+ }
+
+ /*
+ * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
+ * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
+ * rate system and the sampling rate in the final output is actually
+ * doubled compared with the core AAC decoder sampling rate.
+ *
+ * Explicit signalling is done by explicitly defining SBR audio object
+ * type in the bitstream. Implicit signalling is done by embedding
+ * SBR content in AAC extension payload specific to SBR, and hence
+ * requires an AAC decoder to perform pre-checks on actual audio frames.
+ *
+ * Thus, we could not say for sure whether a stream is
+ * AAC+/eAAC+ until the first data frame is decoded.
+ */
+ if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1
+ if (mStreamInfo->sampleRate != prevSampleRate ||
+ mStreamInfo->numChannels != prevNumChannels) {
+ ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
+ prevSampleRate, mStreamInfo->sampleRate,
+ prevNumChannels, mStreamInfo->numChannels);
+
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+
+ if (inHeader && inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ mInputBufferCount++;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
+ return;
+ }
+ } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
+ ALOGW("Invalid AAC stream");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+ return;
+ }
+ if (inHeader && inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ mInputBufferCount++;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ } else {
+ ALOGV("inHeader->nFilledLen = %d", inHeader ? inHeader->nFilledLen : 0);
+ }
+ } while (decoderErr == AAC_DEC_OK);
}
- if (inHeader->nFilledLen == 0) {
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels;
+
+ if (!mEndOfInput && mOutputDelayCompensated < outputDelay) {
+ // discard outputDelay at the beginning
+ int32_t toCompensate = outputDelay - mOutputDelayCompensated;
+ int32_t discard = outputDelayRingBufferSamplesAvailable();
+ if (discard > toCompensate) {
+ discard = toCompensate;
+ }
+ int32_t discarded = outputDelayRingBufferGetSamples(0, discard);
+ mOutputDelayCompensated += discarded;
+ continue;
}
- /*
- * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
- * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
- * rate system and the sampling rate in the final output is actually
- * doubled compared with the core AAC decoder sampling rate.
- *
- * Explicit signalling is done by explicitly defining SBR audio object
- * type in the bitstream. Implicit signalling is done by embedding
- * SBR content in AAC extension payload specific to SBR, and hence
- * requires an AAC decoder to perform pre-checks on actual audio frames.
- *
- * Thus, we could not say for sure whether a stream is
- * AAC+/eAAC+ until the first data frame is decoded.
- */
- if (mInputBufferCount <= 2) {
- if (mStreamInfo->sampleRate != prevSampleRate ||
- mStreamInfo->numChannels != prevNumChannels) {
- maybeConfigureDownmix();
- ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
- prevSampleRate, mStreamInfo->sampleRate,
- prevNumChannels, mStreamInfo->numChannels);
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
- return;
+ if (mEndOfInput) {
+ while (mOutputDelayCompensated > 0) {
+ // a buffer big enough for MAX_CHANNEL_COUNT channels of decoded HE-AAC
+ INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
+
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
+
+ AAC_DECODER_ERROR decoderErr =
+ aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ AACDEC_FLUSH);
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
+
+ int32_t tmpOutBufferSamples = mStreamInfo->frameSize * mStreamInfo->numChannels;
+ if (tmpOutBufferSamples > mOutputDelayCompensated) {
+ tmpOutBufferSamples = mOutputDelayCompensated;
+ }
+ outputDelayRingBufferPutSamples(tmpOutBuffer, tmpOutBufferSamples);
+ mOutputDelayCompensated -= tmpOutBufferSamples;
}
- } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
- ALOGW("Invalid AAC stream");
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
- return;
}
- if (decoderErr == AAC_DEC_OK || mNumSamplesOutput > 0) {
- // We'll only output data if we successfully decoded it or
- // we've previously decoded valid data, in the latter case
- // (decode failed) we'll output a silent frame.
- outHeader->nFilledLen = numOutBytes;
- outHeader->nFlags = 0;
+ while (!outQueue.empty()
+ && outputDelayRingBufferSamplesAvailable()
+ >= mStreamInfo->frameSize * mStreamInfo->numChannels) {
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (outHeader->nOffset != 0) {
+ ALOGE("outHeader->nOffset != 0 is not handled");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ INT_PCM *outBuffer =
+ reinterpret_cast<INT_PCM *>(outHeader->pBuffer + outHeader->nOffset);
+ int samplesize = mStreamInfo->numChannels * sizeof(int16_t);
+ if (outHeader->nOffset
+ + mStreamInfo->frameSize * samplesize
+ > outHeader->nAllocLen) {
+ ALOGE("buffer overflow");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+
+ }
+
+ int available = outputDelayRingBufferSamplesAvailable();
+ int numSamples = outHeader->nAllocLen / sizeof(int16_t);
+ if (numSamples > available) {
+ numSamples = available;
+ }
+ int64_t currentTime = 0;
+ if (available) {
+
+ int numFrames = numSamples / (mStreamInfo->frameSize * mStreamInfo->numChannels);
+ numSamples = numFrames * (mStreamInfo->frameSize * mStreamInfo->numChannels);
+
+ ALOGV("%d samples available (%d), or %d frames",
+ numSamples, available, numFrames);
+ int64_t *nextTimeStamp = &mBufferTimestamps.editItemAt(0);
+ currentTime = *nextTimeStamp;
+ int32_t *currentBufLeft = &mBufferSizes.editItemAt(0);
+ for (int i = 0; i < numFrames; i++) {
+ int32_t decodedSize = mDecodedSizes.itemAt(0);
+ mDecodedSizes.removeAt(0);
+ ALOGV("decoded %d of %d", decodedSize, *currentBufLeft);
+ if (*currentBufLeft > decodedSize) {
+ // adjust/interpolate next time stamp
+ *currentBufLeft -= decodedSize;
+ *nextTimeStamp += mStreamInfo->aacSamplesPerFrame *
+ 1000000ll / mStreamInfo->sampleRate;
+ ALOGV("adjusted nextTimeStamp/size to %lld/%d",
+ *nextTimeStamp, *currentBufLeft);
+ } else {
+ // move to next timestamp in list
+ if (mBufferTimestamps.size() > 0) {
+ mBufferTimestamps.removeAt(0);
+ nextTimeStamp = &mBufferTimestamps.editItemAt(0);
+ mBufferSizes.removeAt(0);
+ currentBufLeft = &mBufferSizes.editItemAt(0);
+ ALOGV("moved to next time/size: %lld/%d",
+ *nextTimeStamp, *currentBufLeft);
+ }
+ // try to limit output buffer size to match input buffers
+ // (e.g when an input buffer contained 4 "sub" frames, output
+ // at most 4 decoded units in the corresponding output buffer)
+ // This is optional. Remove the next three lines to fill the output
+ // buffer with as many units as available.
+ numFrames = i + 1;
+ numSamples = numFrames * mStreamInfo->frameSize * mStreamInfo->numChannels;
+ break;
+ }
+ }
- outHeader->nTimeStamp =
- mAnchorTimeUs
- + (mNumSamplesOutput * 1000000ll) / mStreamInfo->sampleRate;
+ ALOGV("getting %d from ringbuffer", numSamples);
+ int32_t ns = outputDelayRingBufferGetSamples(outBuffer, numSamples);
+ if (ns != numSamples) {
+ ALOGE("not a complete frame of samples available");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ }
+
+ outHeader->nFilledLen = numSamples * sizeof(int16_t);
+
+ if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) {
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ mEndOfOutput = true;
+ } else {
+ outHeader->nFlags = 0;
+ }
- mNumSamplesOutput += mStreamInfo->frameSize;
+ outHeader->nTimeStamp = currentTime;
+ mOutputBufferCount++;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
+ ALOGV("out timestamp %lld / %d", outHeader->nTimeStamp, outHeader->nFilledLen);
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
- if (decoderErr == AAC_DEC_OK) {
- ++mInputBufferCount;
+ if (mEndOfInput) {
+ if (outputDelayRingBufferSamplesAvailable() > 0
+ && outputDelayRingBufferSamplesAvailable()
+ < mStreamInfo->frameSize * mStreamInfo->numChannels) {
+ ALOGE("not a complete frame of samples available");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ if (mEndOfInput && !outQueue.empty() && outputDelayRingBufferSamplesAvailable() == 0) {
+ if (!mEndOfOutput) {
+ // send empty block signaling EOS
+ mEndOfOutput = true;
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (outHeader->nOffset != 0) {
+ ALOGE("outHeader->nOffset != 0 is not handled");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>(outHeader->pBuffer
+ + outHeader->nOffset);
+ int32_t ns = 0;
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+
+ outHeader->nTimeStamp = mBufferTimestamps.itemAt(0);
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
+
+ mOutputBufferCount++;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ }
+ break; // if outQueue not empty but no more output
+ }
}
}
}
@@ -582,28 +982,77 @@ void SoftAAC2::onPortFlushCompleted(OMX_U32 portIndex) {
// depend on fragments from the last one decoded.
// drain all existing data
drainDecoder();
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
+ mLastInHeader = NULL;
+ } else {
+ int avail;
+ while ((avail = outputDelayRingBufferSamplesAvailable()) > 0) {
+ if (avail > mStreamInfo->frameSize * mStreamInfo->numChannels) {
+ avail = mStreamInfo->frameSize * mStreamInfo->numChannels;
+ }
+ int32_t ns = outputDelayRingBufferGetSamples(0, avail);
+ if (ns != avail) {
+ ALOGE("not a complete frame of samples available");
+ break;
+ }
+ mOutputBufferCount++;
+ }
+ mOutputDelayRingBufferReadPos = mOutputDelayRingBufferWritePos;
}
}
void SoftAAC2::drainDecoder() {
- // a buffer big enough for 6 channels of decoded HE-AAC
- short buf [2048*6];
- aacDecoder_DecodeFrame(mAACDecoder,
- buf, sizeof(buf), AACDEC_FLUSH | AACDEC_CLRHIST | AACDEC_INTR);
- aacDecoder_DecodeFrame(mAACDecoder,
- buf, sizeof(buf), AACDEC_FLUSH | AACDEC_CLRHIST | AACDEC_INTR);
- aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1);
- mDecoderHasData = false;
+ int32_t outputDelay = mStreamInfo->outputDelay * mStreamInfo->numChannels;
+
+ // flush decoder until outputDelay is compensated
+ while (mOutputDelayCompensated > 0) {
+ // a buffer big enough for MAX_CHANNEL_COUNT channels of decoded HE-AAC
+ INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
+
+ // run DRC check
+ mDrcWrap.submitStreamData(mStreamInfo);
+ mDrcWrap.update();
+
+ AAC_DECODER_ERROR decoderErr =
+ aacDecoder_DecodeFrame(mAACDecoder,
+ tmpOutBuffer,
+ 2048 * MAX_CHANNEL_COUNT,
+ AACDEC_FLUSH);
+ if (decoderErr != AAC_DEC_OK) {
+ ALOGW("aacDecoder_DecodeFrame decoderErr = 0x%4.4x", decoderErr);
+ }
+
+ int32_t tmpOutBufferSamples = mStreamInfo->frameSize * mStreamInfo->numChannels;
+ if (tmpOutBufferSamples > mOutputDelayCompensated) {
+ tmpOutBufferSamples = mOutputDelayCompensated;
+ }
+ outputDelayRingBufferPutSamples(tmpOutBuffer, tmpOutBufferSamples);
+
+ mOutputDelayCompensated -= tmpOutBufferSamples;
+ }
}
void SoftAAC2::onReset() {
drainDecoder();
// reset the "configured" state
mInputBufferCount = 0;
- mNumSamplesOutput = 0;
+ mOutputBufferCount = 0;
+ mOutputDelayCompensated = 0;
+ mOutputDelayRingBufferWritePos = 0;
+ mOutputDelayRingBufferReadPos = 0;
+ mOutputDelayRingBufferFilled = 0;
+ mEndOfInput = false;
+ mEndOfOutput = false;
+ mBufferTimestamps.clear();
+ mBufferSizes.clear();
+ mDecodedSizes.clear();
+ mLastInHeader = NULL;
+
// To make the codec behave the same before and after a reset, we need to invalidate the
// streaminfo struct. This does that:
- mStreamInfo->sampleRate = 0;
+ mStreamInfo->sampleRate = 0; // TODO: mStreamInfo is read only
mSignalledError = false;
mOutputPortSettingsChange = NONE;
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 2d960ab..c3e4459 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -20,6 +20,7 @@
#include "SimpleSoftOMXComponent.h"
#include "aacdecoder_lib.h"
+#include "DrcPresModeWrap.h"
namespace android {
@@ -47,16 +48,22 @@ private:
enum {
kNumInputBuffers = 4,
kNumOutputBuffers = 4,
+ kNumDelayBlocksMax = 8,
};
HANDLE_AACDECODER mAACDecoder;
CStreamInfo *mStreamInfo;
bool mIsADTS;
- bool mDecoderHasData;
+ bool mIsFirst;
size_t mInputBufferCount;
+ size_t mOutputBufferCount;
bool mSignalledError;
- int64_t mAnchorTimeUs;
- int64_t mNumSamplesOutput;
+ OMX_BUFFERHEADERTYPE *mLastInHeader;
+ Vector<int32_t> mBufferSizes;
+ Vector<int32_t> mDecodedSizes;
+ Vector<int64_t> mBufferTimestamps;
+
+ CDrcPresModeWrapper mDrcWrap;
enum {
NONE,
@@ -67,9 +74,23 @@ private:
void initPorts();
status_t initDecoder();
bool isConfigured() const;
- void maybeConfigureDownmix() const;
+ void configureDownmix() const;
void drainDecoder();
+// delay compensation
+ bool mEndOfInput;
+ bool mEndOfOutput;
+ int32_t mOutputDelayCompensated;
+ int32_t mOutputDelayRingBufferSize;
+ short *mOutputDelayRingBuffer;
+ int32_t mOutputDelayRingBufferWritePos;
+ int32_t mOutputDelayRingBufferReadPos;
+ int32_t mOutputDelayRingBufferFilled;
+ bool outputDelayRingBufferPutSamples(INT_PCM *samples, int numSamples);
+ int32_t outputDelayRingBufferGetSamples(INT_PCM *samples, int numSamples);
+ int32_t outputDelayRingBufferSamplesAvailable();
+ int32_t outputDelayRingBufferSpaceLeft();
+
DISALLOW_EVIL_CONSTRUCTORS(SoftAAC2);
};
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 057c69b..58ec3ba 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -82,6 +82,8 @@ LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
endif
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -106,6 +108,8 @@ ifeq ($(AAC_LIBRARY), fraunhofer)
LOCAL_CFLAGS :=
+ LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
LOCAL_SHARED_LIBRARIES := \
@@ -128,6 +132,8 @@ else # visualon
LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
+ LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := \
libstagefright_aacenc
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index ff2b503..35aa883 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include "SoftAACEncoder2.h"
+#include <OMX_AudioExt.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/hexdump.h>
@@ -44,6 +45,8 @@ SoftAACEncoder2::SoftAACEncoder2(
mNumChannels(1),
mSampleRate(44100),
mBitRate(0),
+ mSBRMode(-1),
+ mSBRRatio(0),
mAACProfile(OMX_AUDIO_AACObjectLC),
mSentCodecSpecificData(false),
mInputSize(0),
@@ -156,6 +159,41 @@ OMX_ERRORTYPE SoftAACEncoder2::internalGetParameter(
aacParams->nSampleRate = mSampleRate;
aacParams->nFrameLength = 0;
+ switch (mSBRMode) {
+ case 1: // sbr on
+ switch (mSBRRatio) {
+ case 0:
+ // set both OMX AAC tool flags
+ aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+ aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ case 1:
+ // set single-rate SBR active
+ aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidSSBR;
+ aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ case 2:
+ // set dual-rate SBR active
+ aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+ aacParams->nAACtools |= OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ default:
+ ALOGE("invalid SBR ratio %d", mSBRRatio);
+ TRESPASS();
+ }
+ break;
+ case 0: // sbr off
+ case -1: // sbr undefined
+ aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidSSBR;
+ aacParams->nAACtools &= ~OMX_AUDIO_AACToolAndroidDSBR;
+ break;
+ default:
+ ALOGE("invalid SBR mode %d", mSBRMode);
+ TRESPASS();
+ }
+
+
+
return OMX_ErrorNone;
}
@@ -243,6 +281,23 @@ OMX_ERRORTYPE SoftAACEncoder2::internalSetParameter(
mAACProfile = aacParams->eAACProfile;
}
+ if (!(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+ && !(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+ mSBRMode = 0;
+ mSBRRatio = 0;
+ } else if ((aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+ && !(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+ mSBRMode = 1;
+ mSBRRatio = 1;
+ } else if (!(aacParams->nAACtools & OMX_AUDIO_AACToolAndroidSSBR)
+ && (aacParams->nAACtools & OMX_AUDIO_AACToolAndroidDSBR)) {
+ mSBRMode = 1;
+ mSBRRatio = 2;
+ } else {
+ mSBRMode = -1; // codec default sbr mode
+ mSBRRatio = 0;
+ }
+
if (setAudioParams() != OK) {
return OMX_ErrorUndefined;
}
@@ -305,11 +360,11 @@ static AUDIO_OBJECT_TYPE getAOTFromProfile(OMX_U32 profile) {
}
status_t SoftAACEncoder2::setAudioParams() {
- // We call this whenever sample rate, number of channels or bitrate change
+ // We call this whenever sample rate, number of channels, bitrate or SBR mode change
// in reponse to setParameter calls.
- ALOGV("setAudioParams: %lu Hz, %lu channels, %lu bps",
- mSampleRate, mNumChannels, mBitRate);
+ ALOGV("setAudioParams: %u Hz, %u channels, %u bps, %i sbr mode, %i sbr ratio",
+ mSampleRate, mNumChannels, mBitRate, mSBRMode, mSBRRatio);
if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_AOT,
getAOTFromProfile(mAACProfile))) {
@@ -335,10 +390,28 @@ status_t SoftAACEncoder2::setAudioParams() {
return UNKNOWN_ERROR;
}
+ if (mSBRMode != -1 && mAACProfile == OMX_AUDIO_AACObjectELD) {
+ if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_MODE, mSBRMode)) {
+ ALOGE("Failed to set AAC encoder parameters");
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ /* SBR ratio parameter configurations:
+ 0: Default configuration wherein SBR ratio is configured depending on audio object type by
+ the FDK.
+ 1: Downsampled SBR (default for ELD)
+ 2: Dualrate SBR (default for HE-AAC)
+ */
+ if (AACENC_OK != aacEncoder_SetParam(mAACEncoder, AACENC_SBR_RATIO, mSBRRatio)) {
+ ALOGE("Failed to set AAC encoder parameters");
+ return UNKNOWN_ERROR;
+ }
+
return OK;
}
-void SoftAACEncoder2::onQueueFilled(OMX_U32 portIndex) {
+void SoftAACEncoder2::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
@@ -364,7 +437,7 @@ void SoftAACEncoder2::onQueueFilled(OMX_U32 portIndex) {
OMX_U32 actualBitRate = aacEncoder_GetParam(mAACEncoder, AACENC_BITRATE);
if (mBitRate != actualBitRate) {
- ALOGW("Requested bitrate %lu unsupported, using %lu", mBitRate, actualBitRate);
+ ALOGW("Requested bitrate %u unsupported, using %u", mBitRate, actualBitRate);
}
AACENC_InfoStruct encInfo;
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index 2603f4f..bce9c24 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -53,6 +53,8 @@ private:
OMX_U32 mNumChannels;
OMX_U32 mSampleRate;
OMX_U32 mBitRate;
+ OMX_S32 mSBRMode;
+ OMX_S32 mSBRRatio;
OMX_U32 mAACProfile;
bool mSentCodecSpecificData;
diff --git a/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c b/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
index cc01927..1d029fc 100644
--- a/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
+++ b/media/libstagefright/codecs/aacenc/basic_op/oper_32b.c
@@ -24,6 +24,8 @@
#include "basic_op.h"
#include "oper_32b.h"
+#define UNUSED(x) (void)(x)
+
/*****************************************************************************
* *
* Function L_Extract() *
@@ -243,6 +245,8 @@ Word16 iLog4(Word32 value)
Word32 rsqrt(Word32 value, /*!< Operand to square root (0.0 ... 1) */
Word32 accuracy) /*!< Number of valid bits that will be calculated */
{
+ UNUSED(accuracy);
+
Word32 root = 0;
Word32 scale;
diff --git a/media/libstagefright/codecs/aacenc/src/aacenc.c b/media/libstagefright/codecs/aacenc/src/aacenc.c
index d1c8621..40db92c 100644
--- a/media/libstagefright/codecs/aacenc/src/aacenc.c
+++ b/media/libstagefright/codecs/aacenc/src/aacenc.c
@@ -27,6 +27,8 @@
#include "cmnMemory.h"
#include "memalign.h"
+#define UNUSED(x) (void)(x)
+
/**
* Init the audio codec module and return codec handle
* \param phCodec [OUT] Return the video codec handle
@@ -46,6 +48,8 @@ VO_U32 VO_API voAACEncInit(VO_HANDLE * phCodec,VO_AUDIO_CODINGTYPE vType, VO_COD
VO_MEM_OPERATOR *pMemOP;
int interMem;
+ UNUSED(vType);
+
interMem = 0;
error = 0;
@@ -471,6 +475,10 @@ VO_U32 VO_API voAACEncSetParam(VO_HANDLE hCodec, VO_S32 uParamID, VO_PTR pData)
*/
VO_U32 VO_API voAACEncGetParam(VO_HANDLE hCodec, VO_S32 uParamID, VO_PTR pData)
{
+ UNUSED(hCodec);
+ UNUSED(uParamID);
+ UNUSED(pData);
+
return VO_ERR_NONE;
}
diff --git a/media/libstagefright/codecs/aacenc/src/adj_thr.c b/media/libstagefright/codecs/aacenc/src/adj_thr.c
index ccfe883..471631c 100644
--- a/media/libstagefright/codecs/aacenc/src/adj_thr.c
+++ b/media/libstagefright/codecs/aacenc/src/adj_thr.c
@@ -72,7 +72,7 @@ static void calcThreshExp(Word32 thrExp[MAX_CHANNELS][MAX_GROUPED_SFB],
const Word16 nChannels)
{
Word16 ch, sfb, sfbGrp;
- Word32 *pthrExp, *psfbThre;
+ Word32 *pthrExp = NULL, *psfbThre;
for (ch=0; ch<nChannels; ch++) {
PSY_OUT_CHANNEL *psyOutChan = &psyOutChannel[ch];
for(sfbGrp = 0; sfbGrp < psyOutChan->sfbCnt; sfbGrp+= psyOutChan->sfbPerGroup)
diff --git a/media/libstagefright/codecs/aacenc/src/bitenc.c b/media/libstagefright/codecs/aacenc/src/bitenc.c
index fcc12dd..d1fd647 100644
--- a/media/libstagefright/codecs/aacenc/src/bitenc.c
+++ b/media/libstagefright/codecs/aacenc/src/bitenc.c
@@ -26,6 +26,7 @@
#include "qc_data.h"
#include "interface.h"
+#define UNUSED(x) (void)(x)
static const Word16 globalGainOffset = 100;
static const Word16 icsReservedBit = 0;
@@ -585,6 +586,8 @@ Word16 WriteBitstream (HANDLE_BIT_BUF hBitStream,
Word16 elementUsedBits;
Word16 frameBits=0;
+ UNUSED(ancBytes);
+
/* struct bitbuffer bsWriteCopy; */
bitMarkUp = GetBitsAvail(hBitStream);
if(qcOut->qcElement.adtsUsed) /* write adts header*/
diff --git a/media/libstagefright/codecs/aacenc/src/dyn_bits.c b/media/libstagefright/codecs/aacenc/src/dyn_bits.c
index 7769188..4d763d0 100644
--- a/media/libstagefright/codecs/aacenc/src/dyn_bits.c
+++ b/media/libstagefright/codecs/aacenc/src/dyn_bits.c
@@ -25,7 +25,6 @@
#include "bit_cnt.h"
#include "psy_const.h"
-
/*****************************************************************************
*
* function name: buildBitLookUp
@@ -226,7 +225,7 @@ gmStage2(SECTION_INFO *sectionInfo,
}
while (TRUE) {
- Word16 maxMergeGain, maxNdx, maxNdxNext, maxNdxLast;
+ Word16 maxMergeGain, maxNdx = 0, maxNdxNext, maxNdxLast;
maxMergeGain = findMaxMerge(mergeGainLookUp, sectionInfo, maxSfb, &maxNdx);
diff --git a/media/libstagefright/codecs/aacenc/src/psy_main.c b/media/libstagefright/codecs/aacenc/src/psy_main.c
index 4e9218c..6f0679c 100644
--- a/media/libstagefright/codecs/aacenc/src/psy_main.c
+++ b/media/libstagefright/codecs/aacenc/src/psy_main.c
@@ -38,6 +38,8 @@
#include "tns_func.h"
#include "memalign.h"
+#define UNUSED(x) (void)(x)
+
/* long start short stop */
static Word16 blockType2windowShape[] = {KBD_WINDOW,SINE_WINDOW,SINE_WINDOW,KBD_WINDOW};
@@ -170,7 +172,9 @@ Word16 PsyOutNew(PSY_OUT *hPsyOut, VO_MEM_OPERATOR *pMemOP)
*****************************************************************************/
Word16 PsyOutDelete(PSY_OUT *hPsyOut, VO_MEM_OPERATOR *pMemOP)
{
- hPsyOut=NULL;
+ UNUSED(hPsyOut);
+ UNUSED(pMemOP);
+
return 0;
}
diff --git a/media/libstagefright/codecs/aacenc/src/qc_main.c b/media/libstagefright/codecs/aacenc/src/qc_main.c
index 48ff300..e5d78aa 100644
--- a/media/libstagefright/codecs/aacenc/src/qc_main.c
+++ b/media/libstagefright/codecs/aacenc/src/qc_main.c
@@ -33,6 +33,7 @@
#include "channel_map.h"
#include "memalign.h"
+#define UNUSED(x) (void)(x)
typedef enum{
FRAME_LEN_BYTES_MODULO = 1,
@@ -204,11 +205,8 @@ Word16 QCNew(QC_STATE *hQC, VO_MEM_OPERATOR *pMemOP)
**********************************************************************************/
void QCDelete(QC_STATE *hQC, VO_MEM_OPERATOR *pMemOP)
{
-
- /*
- nothing to do
- */
- hQC=NULL;
+ UNUSED(hQC);
+ UNUSED(pMemOP);
}
/*********************************************************************************
diff --git a/media/libstagefright/codecs/aacenc/src/tns.c b/media/libstagefright/codecs/aacenc/src/tns.c
index 455a864..5172612 100644
--- a/media/libstagefright/codecs/aacenc/src/tns.c
+++ b/media/libstagefright/codecs/aacenc/src/tns.c
@@ -30,6 +30,8 @@
#include "psy_configuration.h"
#include "tns_func.h"
+#define UNUSED(x) (void)(x)
+
#define TNS_MODIFY_BEGIN 2600 /* Hz */
#define RATIO_PATCH_LOWER_BORDER 380 /* Hz */
#define TNS_GAIN_THRESH 141 /* 1.41*100 */
@@ -643,6 +645,8 @@ static Word16 CalcTnsFilter(const Word16 *signal,
Word32 i;
Word32 tnsOrderPlus1 = tnsOrder + 1;
+ UNUSED(window);
+
assert(tnsOrder <= TNS_MAX_ORDER); /* remove asserts later? (btg) */
for(i=0;i<tnsOrder;i++) {
diff --git a/media/libstagefright/codecs/amrnb/common/Android.mk b/media/libstagefright/codecs/amrnb/common/Android.mk
index 30ce29c..a2b3c8f 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.mk
+++ b/media/libstagefright/codecs/amrnb/common/Android.mk
@@ -69,6 +69,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF= -DOSCL_EXPORT_REF=
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_amrnb_common
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 8d6c6f8..b067456 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -47,6 +47,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF=
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_amrnbdec
include $(BUILD_STATIC_LIBRARY)
@@ -68,6 +70,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
+LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := \
libstagefright_amrnbdec libstagefright_amrwbdec
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index 3320688..d1b0f76 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -274,7 +274,7 @@ static size_t getFrameSize(unsigned FT) {
return frameSize;
}
-void SoftAMR::onQueueFilled(OMX_U32 portIndex) {
+void SoftAMR::onQueueFilled(OMX_U32 /* portIndex */) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
@@ -428,7 +428,7 @@ void SoftAMR::onQueueFilled(OMX_U32 portIndex) {
}
}
-void SoftAMR::onPortFlushCompleted(OMX_U32 portIndex) {
+void SoftAMR::onPortFlushCompleted(OMX_U32 /* portIndex */) {
}
void SoftAMR::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index f4e467a..afc0b89 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -69,6 +69,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_UNUSED_ARG=
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_amrnbenc
include $(BUILD_STATIC_LIBRARY)
@@ -88,6 +90,8 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/../common/include \
$(LOCAL_PATH)/../common
+LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := \
libstagefright_amrnbenc
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
index 50b739c..9489457 100644
--- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -270,7 +270,7 @@ OMX_ERRORTYPE SoftAMRNBEncoder::internalSetParameter(
}
}
-void SoftAMRNBEncoder::onQueueFilled(OMX_U32 portIndex) {
+void SoftAMRNBEncoder::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
diff --git a/media/libstagefright/codecs/amrwb/Android.mk b/media/libstagefright/codecs/amrwb/Android.mk
index 677107f..efdf988 100644
--- a/media/libstagefright/codecs/amrwb/Android.mk
+++ b/media/libstagefright/codecs/amrwb/Android.mk
@@ -50,6 +50,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_UNUSED_ARG= -DOSCL_IMPORT_REF=
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_amrwbdec
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index c5b8e0c..64fe8d1 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -112,6 +112,8 @@ LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
endif
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -126,6 +128,8 @@ LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright/codecs/common/include \
frameworks/native/include/media/openmax
+LOCAL_CFLAGS += -Werror
+
LOCAL_STATIC_LIBRARIES := \
libstagefright_amrwbenc
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
index 9ccb49c..91a512d 100644
--- a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -317,7 +317,7 @@ OMX_ERRORTYPE SoftAMRWBEncoder::internalSetParameter(
}
}
-void SoftAMRWBEncoder::onQueueFilled(OMX_U32 portIndex) {
+void SoftAMRWBEncoder::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
index 8451195..f23b5a0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
@@ -29,6 +29,7 @@
.global Filt_6k_7k_asm
.extern voAWB_Copy
.extern fir_6k_7k
+ .hidden fir_6k_7k
Filt_6k_7k_asm:
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
index ac2dd13..deb7efc 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
@@ -32,6 +32,7 @@
.section .text
.global pred_lt4_asm
.extern inter4_2
+ .hidden inter4_2
pred_lt4_asm:
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
index fc42a03..8df0caa 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
@@ -28,6 +28,7 @@
.section .text
.global Filt_6k_7k_asm
.extern fir_6k_7k
+ .hidden fir_6k_7k
Filt_6k_7k_asm:
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
index 8d2aaf2..67be1ed 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
@@ -29,6 +29,7 @@
.section .text
.global pred_lt4_asm
.extern inter4_2
+ .hidden inter4_2
pred_lt4_asm:
diff --git a/media/libstagefright/codecs/amrwbenc/src/autocorr.c b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
index 8c477ca..0b2ea89 100644
--- a/media/libstagefright/codecs/amrwbenc/src/autocorr.c
+++ b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
@@ -28,6 +28,8 @@
#include "acelp.h"
#include "ham_wind.tab"
+#define UNUSED(x) (void)(x)
+
void Autocorr(
Word16 x[], /* (i) : Input signal */
Word16 m, /* (i) : LPC order */
@@ -40,6 +42,8 @@ void Autocorr(
Word32 L_sum, L_sum1, L_tmp, F_LEN;
Word16 *p1,*p2,*p3;
const Word16 *p4;
+ UNUSED(m);
+
/* Windowing of signal */
p1 = x;
p4 = vo_window;
diff --git a/media/libstagefright/codecs/amrwbenc/src/convolve.c b/media/libstagefright/codecs/amrwbenc/src/convolve.c
index acba532..4c1f7d4 100644
--- a/media/libstagefright/codecs/amrwbenc/src/convolve.c
+++ b/media/libstagefright/codecs/amrwbenc/src/convolve.c
@@ -25,6 +25,8 @@
#include "typedef.h"
#include "basic_op.h"
+#define UNUSED(x) (void)(x)
+
void Convolve (
Word16 x[], /* (i) : input vector */
Word16 h[], /* (i) : impulse response */
@@ -35,6 +37,8 @@ void Convolve (
Word32 i, n;
Word16 *tmpH,*tmpX;
Word32 s;
+ UNUSED(L);
+
for (n = 0; n < 64;)
{
tmpH = h+n;
diff --git a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
index 0d66c31..b66b55e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
@@ -31,6 +31,8 @@
#define UP_SAMP 4
#define L_INTERPOL1 4
+#define UNUSED(x) (void)(x)
+
/* Local functions */
#ifdef ASM_OPT
@@ -171,6 +173,7 @@ static void Norm_Corr(
Word32 corr, exp_corr, norm, exp, scale;
Word16 exp_norm, excf[L_SUBFR], tmp;
Word32 L_tmp, L_tmp1, L_tmp2;
+ UNUSED(L_subfr);
/* compute the filtered excitation for the first delay t_min */
k = -t_min;
diff --git a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
index 1bda05a..961aadc 100644
--- a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
+++ b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
@@ -26,6 +26,8 @@
#include "math_op.h"
#include "cnst.h"
+#define UNUSED(x) (void)(x)
+
void Syn_filt(
Word16 a[], /* (i) Q12 : a[m+1] prediction coefficients */
Word16 x[], /* (i) : input signal */
@@ -95,6 +97,8 @@ void Syn_filt_32(
Word32 i,a0;
Word32 L_tmp, L_tmp1;
Word16 *p1, *p2, *p3;
+ UNUSED(m);
+
a0 = a[0] >> (4 + Qnew); /* input / 16 and >>Qnew */
/* Do the filtering. */
for (i = 0; i < lg; i++)
diff --git a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
index ea9da52..df7b9b3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
@@ -39,6 +39,8 @@
#include "mem_align.h"
#include "cmnMemory.h"
+#define UNUSED(x) (void)(x)
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -1602,6 +1604,8 @@ VO_U32 VO_API voAMRWB_Init(VO_HANDLE * phCodec, /* o: the audi
VO_MEM_OPERATOR voMemoprator;
#endif
VO_MEM_OPERATOR *pMemOP;
+ UNUSED(vType);
+
int interMem = 0;
if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
index 22dee15..844ef0a 100644
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ b/media/libstagefright/codecs/avc/common/Android.mk
@@ -16,4 +16,6 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/src \
$(LOCAL_PATH)/include
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 7d17c2a..537ba42 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -30,6 +30,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -69,4 +71,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_h264enc
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index 4a21a3e..ed3dca0 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -34,6 +34,12 @@
#include "SoftAVCEncoder.h"
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
namespace android {
template<class T>
@@ -105,45 +111,15 @@ static status_t ConvertAvcSpecLevelToOmxAvcLevel(
return BAD_VALUE;
}
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
- uint8_t *inyuv, uint8_t* outyuv,
- int32_t width, int32_t height) {
-
- int32_t outYsize = width * height;
- uint32_t *outy = (uint32_t *) outyuv;
- uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
- uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
- /* Y copying */
- memcpy(outy, inyuv, outYsize);
-
- /* U & V copying */
- uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
- for (int32_t i = height >> 1; i > 0; --i) {
- for (int32_t j = width >> 2; j > 0; --j) {
- uint32_t temp = *inyuv_4++;
- uint32_t tempU = temp & 0xFF;
- tempU = tempU | ((temp >> 8) & 0xFF00);
-
- uint32_t tempV = (temp >> 8) & 0xFF;
- tempV = tempV | ((temp >> 16) & 0xFF00);
-
- // Flip U and V
- *outcb++ = tempV;
- *outcr++ = tempU;
- }
- }
-}
-
static void* MallocWrapper(
- void *userData, int32_t size, int32_t attrs) {
+ void * /* userData */, int32_t size, int32_t /* attrs */) {
void *ptr = malloc(size);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
-static void FreeWrapper(void *userData, void* ptr) {
+static void FreeWrapper(void * /* userData */, void* ptr) {
free(ptr);
}
@@ -172,7 +148,7 @@ SoftAVCEncoder::SoftAVCEncoder(
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
OMX_COMPONENTTYPE **component)
- : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ : SoftVideoEncoderOMXComponent(name, callbacks, appData, component),
mVideoWidth(176),
mVideoHeight(144),
mVideoFrameRate(30),
@@ -217,7 +193,7 @@ OMX_ERRORTYPE SoftAVCEncoder::initEncParams() {
mHandle->CBAVC_Free = FreeWrapper;
CHECK(mEncParams != NULL);
- memset(mEncParams, 0, sizeof(mEncParams));
+ memset(mEncParams, 0, sizeof(*mEncParams));
mEncParams->rate_control = AVC_ON;
mEncParams->initQP = 0;
mEncParams->init_CBP_removal_delay = 1600;
@@ -254,9 +230,10 @@ OMX_ERRORTYPE SoftAVCEncoder::initEncParams() {
mEncParams->use_overrun_buffer = AVC_OFF;
- if (mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar
+ || mStoreMetaDataInBuffers) {
// Color conversion is needed.
- CHECK(mInputFrameData == NULL);
+ free(mInputFrameData);
mInputFrameData =
(uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
CHECK(mInputFrameData != NULL);
@@ -342,10 +319,10 @@ OMX_ERRORTYPE SoftAVCEncoder::releaseEncoder() {
PVAVCCleanUpEncoder(mHandle);
releaseOutputBuffers();
- delete mInputFrameData;
+ free(mInputFrameData);
mInputFrameData = NULL;
- delete mSliceGroup;
+ free(mSliceGroup);
mSliceGroup = NULL;
delete mEncParams;
@@ -593,6 +570,17 @@ OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
mVideoHeight = def->format.video.nFrameHeight;
mVideoFrameRate = def->format.video.xFramerate >> 16;
mVideoColorFormat = def->format.video.eColorFormat;
+
+ OMX_PARAM_PORTDEFINITIONTYPE *portDef =
+ &editPortInfo(0)->mDef;
+ portDef->format.video.nFrameWidth = mVideoWidth;
+ portDef->format.video.nFrameHeight = mVideoHeight;
+ portDef->format.video.xFramerate = def->format.video.xFramerate;
+ portDef->format.video.eColorFormat =
+ (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+ portDef = &editPortInfo(1)->mDef;
+ portDef->format.video.nFrameWidth = mVideoWidth;
+ portDef->format.video.nFrameHeight = mVideoHeight;
} else {
mVideoBitRate = def->format.video.nBitrate;
}
@@ -696,11 +684,7 @@ OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
mStoreMetaDataInBuffers ? " true" : "false");
if (mStoreMetaDataInBuffers) {
- mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar;
- if (mInputFrameData == NULL) {
- mInputFrameData =
- (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
- }
+ mVideoColorFormat = OMX_COLOR_FormatAndroidOpaque;
}
return OMX_ErrorNone;
@@ -711,7 +695,7 @@ OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
}
}
-void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
+void SoftAVCEncoder::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mSawInputEOS) {
return;
}
@@ -784,8 +768,6 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
}
}
- buffer_handle_t srcBuffer; // for MetaDataMode only
-
// Get next input video frame
if (mReadyForNextFrame) {
// Save the input buffer info so that it can be
@@ -806,18 +788,20 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
videoInput.height = ((mVideoHeight + 15) >> 4) << 4;
videoInput.pitch = ((mVideoWidth + 15) >> 4) << 4;
videoInput.coding_timestamp = (inHeader->nTimeStamp + 500) / 1000; // in ms
- uint8_t *inputData = NULL;
+ const uint8_t *inputData = NULL;
if (mStoreMetaDataInBuffers) {
if (inHeader->nFilledLen != 8) {
ALOGE("MetaData buffer is wrong size! "
- "(got %lu bytes, expected 8)", inHeader->nFilledLen);
+ "(got %u bytes, expected 8)", inHeader->nFilledLen);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
inputData =
- extractGrallocData(inHeader->pBuffer + inHeader->nOffset,
- &srcBuffer);
+ extractGraphicBuffer(
+ mInputFrameData, (mVideoWidth * mVideoHeight * 3) >> 1,
+ inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen,
+ mVideoWidth, mVideoHeight);
if (inputData == NULL) {
ALOGE("Unable to extract gralloc buffer in metadata mode");
mSignalledError = true;
@@ -826,16 +810,16 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
}
// TODO: Verify/convert pixel format enum
} else {
- inputData = (uint8_t *)inHeader->pBuffer + inHeader->nOffset;
+ inputData = (const uint8_t *)inHeader->pBuffer + inHeader->nOffset;
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ ConvertYUV420SemiPlanarToYUV420Planar(
+ inputData, mInputFrameData, mVideoWidth, mVideoHeight);
+ inputData = mInputFrameData;
+ }
}
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- ConvertYUV420SemiPlanarToYUV420Planar(
- inputData, mInputFrameData, mVideoWidth, mVideoHeight);
- inputData = mInputFrameData;
- }
CHECK(inputData != NULL);
- videoInput.YCbCr[0] = inputData;
+ videoInput.YCbCr[0] = (uint8_t *)inputData;
videoInput.YCbCr[1] = videoInput.YCbCr[0] + videoInput.height * videoInput.pitch;
videoInput.YCbCr[2] = videoInput.YCbCr[1] +
((videoInput.height * videoInput.pitch) >> 2);
@@ -852,14 +836,12 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
if (encoderStatus < AVCENC_SUCCESS) {
ALOGE("encoderStatus = %d at line %d", encoderStatus, __LINE__);
mSignalledError = true;
- releaseGrallocData(srcBuffer);
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
} else {
ALOGV("encoderStatus = %d at line %d", encoderStatus, __LINE__);
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
- releaseGrallocData(srcBuffer);
notifyEmptyBufferDone(inHeader);
return;
}
@@ -899,7 +881,6 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
if (encoderStatus < AVCENC_SUCCESS) {
ALOGE("encoderStatus = %d at line %d", encoderStatus, __LINE__);
mSignalledError = true;
- releaseGrallocData(srcBuffer);
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
@@ -909,7 +890,6 @@ void SoftAVCEncoder::onQueueFilled(OMX_U32 portIndex) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
- releaseGrallocData(srcBuffer);
notifyEmptyBufferDone(inHeader);
outQueue.erase(outQueue.begin());
@@ -953,50 +933,10 @@ int32_t SoftAVCEncoder::bindOutputBuffer(int32_t index, uint8_t **yuv) {
}
void SoftAVCEncoder::signalBufferReturned(MediaBuffer *buffer) {
+ UNUSED_UNLESS_VERBOSE(buffer);
ALOGV("signalBufferReturned: %p", buffer);
}
-OMX_ERRORTYPE SoftAVCEncoder::getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index) {
- if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
- *(int32_t*)index = kStoreMetaDataExtensionIndex;
- return OMX_ErrorNone;
- }
- return OMX_ErrorUndefined;
-}
-
-uint8_t *SoftAVCEncoder::extractGrallocData(void *data, buffer_handle_t *buffer) {
- OMX_U32 type = *(OMX_U32*)data;
- status_t res;
- if (type != kMetadataBufferTypeGrallocSource) {
- ALOGE("Data passed in with metadata mode does not have type "
- "kMetadataBufferTypeGrallocSource (%d), has type %ld instead",
- kMetadataBufferTypeGrallocSource, type);
- return NULL;
- }
- buffer_handle_t imgBuffer = *(buffer_handle_t*)((uint8_t*)data + 4);
-
- const Rect rect(mVideoWidth, mVideoHeight);
- uint8_t *img;
- res = GraphicBufferMapper::get().lock(imgBuffer,
- GRALLOC_USAGE_HW_VIDEO_ENCODER,
- rect, (void**)&img);
- if (res != OK) {
- ALOGE("%s: Unable to lock image buffer %p for access", __FUNCTION__,
- imgBuffer);
- return NULL;
- }
-
- *buffer = imgBuffer;
- return img;
-}
-
-void SoftAVCEncoder::releaseGrallocData(buffer_handle_t buffer) {
- if (mStoreMetaDataInBuffers) {
- GraphicBufferMapper::get().unlock(buffer);
- }
-}
-
} // namespace android
android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
index 23d5ff1..130593f 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.h
@@ -22,14 +22,14 @@
#include <utils/Vector.h>
#include "avcenc_api.h"
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoEncoderOMXComponent.h"
namespace android {
struct MediaBuffer;
struct SoftAVCEncoder : public MediaBufferObserver,
- public SimpleSoftOMXComponent {
+ public SoftVideoEncoderOMXComponent {
SoftAVCEncoder(
const char *name,
const OMX_CALLBACKTYPE *callbacks,
@@ -45,11 +45,6 @@ struct SoftAVCEncoder : public MediaBufferObserver,
virtual void onQueueFilled(OMX_U32 portIndex);
- // Override SoftOMXComponent methods
-
- virtual OMX_ERRORTYPE getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index);
-
// Implement MediaBufferObserver
virtual void signalBufferReturned(MediaBuffer *buffer);
@@ -67,10 +62,6 @@ private:
kNumBuffers = 2,
};
- enum {
- kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
- };
-
// OMX input buffer's timestamp and flags
typedef struct {
int64_t mTimeUs;
@@ -109,9 +100,6 @@ private:
OMX_ERRORTYPE releaseEncoder();
void releaseOutputBuffers();
- uint8_t* extractGrallocData(void *data, buffer_handle_t *buffer);
- void releaseGrallocData(buffer_handle_t buffer);
-
DISALLOW_EVIL_CONSTRUCTORS(SoftAVCEncoder);
};
diff --git a/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp b/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
index 0e3037f..d71c327 100644
--- a/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
+++ b/media/libstagefright/codecs/avc/enc/src/bitstream_io.cpp
@@ -103,6 +103,15 @@ AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream)
{
num_bits -= 8;
byte = (current_word >> num_bits) & 0xFF;
+ if (stream->count_zeros == 2)
+ { /* for num_bits = 32, this can add 2 more bytes extra for EPBS */
+ if (byte <= 3)
+ {
+ *write_pnt++ = 0x3;
+ stream->write_pos++;
+ stream->count_zeros = 0;
+ }
+ }
if (byte != 0)
{
*write_pnt++ = byte;
@@ -114,12 +123,6 @@ AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream)
stream->count_zeros++;
*write_pnt++ = byte;
stream->write_pos++;
- if (stream->count_zeros == 2)
- { /* for num_bits = 32, this can add 2 more bytes extra for EPBS */
- *write_pnt++ = 0x3;
- stream->write_pos++;
- stream->count_zeros = 0;
- }
}
}
diff --git a/media/libstagefright/codecs/common/Android.mk b/media/libstagefright/codecs/common/Android.mk
index a33cb92..b0010ff 100644
--- a/media/libstagefright/codecs/common/Android.mk
+++ b/media/libstagefright/codecs/common/Android.mk
@@ -14,6 +14,8 @@ LOCAL_STATIC_LIBRARIES :=
LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/common/Config.mk b/media/libstagefright/codecs/common/Config.mk
index a6d4286..a843cef 100644
--- a/media/libstagefright/codecs/common/Config.mk
+++ b/media/libstagefright/codecs/common/Config.mk
@@ -14,8 +14,10 @@ VOTT := pc
endif
# Do we also need to check on ARCH_ARM_HAVE_ARMV7A? - probably not
-ifeq ($(ARCH_ARM_HAVE_NEON),true)
-VOTT := v7
+ifeq ($(TARGET_ARCH),arm)
+ ifeq ($(ARCH_ARM_HAVE_NEON),true)
+ VOTT := v7
+ endif
endif
VOTEST := 0
diff --git a/media/libstagefright/codecs/common/cmnMemory.c b/media/libstagefright/codecs/common/cmnMemory.c
index aa52bd9..5bb6cc4 100644
--- a/media/libstagefright/codecs/common/cmnMemory.c
+++ b/media/libstagefright/codecs/common/cmnMemory.c
@@ -26,8 +26,12 @@
//VO_MEM_OPERATOR g_memOP;
+#define UNUSED(x) (void)(x)
+
VO_U32 cmnMemAlloc (VO_S32 uID, VO_MEM_INFO * pMemInfo)
{
+ UNUSED(uID);
+
if (!pMemInfo)
return VO_ERR_INVALID_ARG;
@@ -37,34 +41,48 @@ VO_U32 cmnMemAlloc (VO_S32 uID, VO_MEM_INFO * pMemInfo)
VO_U32 cmnMemFree (VO_S32 uID, VO_PTR pMem)
{
+ UNUSED(uID);
+
free (pMem);
return 0;
}
VO_U32 cmnMemSet (VO_S32 uID, VO_PTR pBuff, VO_U8 uValue, VO_U32 uSize)
{
+ UNUSED(uID);
+
memset (pBuff, uValue, uSize);
return 0;
}
VO_U32 cmnMemCopy (VO_S32 uID, VO_PTR pDest, VO_PTR pSource, VO_U32 uSize)
{
+ UNUSED(uID);
+
memcpy (pDest, pSource, uSize);
return 0;
}
VO_U32 cmnMemCheck (VO_S32 uID, VO_PTR pBuffer, VO_U32 uSize)
{
+ UNUSED(uID);
+ UNUSED(pBuffer);
+ UNUSED(uSize);
+
return 0;
}
VO_S32 cmnMemCompare (VO_S32 uID, VO_PTR pBuffer1, VO_PTR pBuffer2, VO_U32 uSize)
{
+ UNUSED(uID);
+
return memcmp(pBuffer1, pBuffer2, uSize);
}
VO_U32 cmnMemMove (VO_S32 uID, VO_PTR pDest, VO_PTR pSource, VO_U32 uSize)
{
+ UNUSED(uID);
+
memmove (pDest, pSource, uSize);
return 0;
}
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index f01d605..59a11de 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -9,6 +9,8 @@ LOCAL_C_INCLUDES := \
frameworks/native/include/media/openmax \
external/flac/include
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libstagefright libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index e64fe72..1301060 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -27,6 +27,12 @@
#define FLAC_COMPRESSION_LEVEL_DEFAULT 5
#define FLAC_COMPRESSION_LEVEL_MAX 8
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
namespace android {
template<class T>
@@ -204,7 +210,7 @@ OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter(
mNumChannels = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
- ALOGV("will encode %ld channels at %ldHz", mNumChannels, mSampleRate);
+ ALOGV("will encode %d channels at %dHz", mNumChannels, mSampleRate);
return configureEncoder();
}
@@ -241,7 +247,7 @@ OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter(
if (defParams->nPortIndex == 0) {
if (defParams->nBufferSize > kMaxInputBufferSize) {
- ALOGE("Input buffer size must be at most %zu bytes",
+ ALOGE("Input buffer size must be at most %d bytes",
kMaxInputBufferSize);
return OMX_ErrorUnsupportedSetting;
}
@@ -257,8 +263,8 @@ OMX_ERRORTYPE SoftFlacEncoder::internalSetParameter(
}
void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) {
-
- ALOGV("SoftFlacEncoder::onQueueFilled(portIndex=%ld)", portIndex);
+ UNUSED_UNLESS_VERBOSE(portIndex);
+ ALOGV("SoftFlacEncoder::onQueueFilled(portIndex=%d)", portIndex);
if (mSignalledError) {
return;
@@ -290,7 +296,7 @@ void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) {
}
if (inHeader->nFilledLen > kMaxInputBufferSize) {
- ALOGE("input buffer too large (%ld).", inHeader->nFilledLen);
+ ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
return;
@@ -343,16 +349,17 @@ void SoftFlacEncoder::onQueueFilled(OMX_U32 portIndex) {
}
}
-
FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
const FLAC__byte buffer[],
- size_t bytes, unsigned samples, unsigned current_frame) {
- ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%d, samples=%d, curr_frame=%d)",
+ size_t bytes, unsigned samples,
+ unsigned current_frame) {
+ UNUSED_UNLESS_VERBOSE(current_frame);
+ ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%zu, samples=%u, curr_frame=%u)",
bytes, samples, current_frame);
#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
if (samples == 0) {
- ALOGI(" saving %d bytes of header", bytes);
+ ALOGI(" saving %zu bytes of header", bytes);
memcpy(mHeader + mHeaderOffset, buffer, bytes);
mHeaderOffset += bytes;// will contain header size when finished receiving header
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
@@ -363,7 +370,7 @@ FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
if ((samples == 0) || !mEncoderWriteData) {
// called by the encoder because there's header data to save, but it's not the role
// of this component (unless WRITE_FLAC_HEADER_IN_FIRST_BUFFER is defined)
- ALOGV("ignoring %d bytes of header data (samples=%d)", bytes, samples);
+ ALOGV("ignoring %zu bytes of header data (samples=%d)", bytes, samples);
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}
@@ -384,9 +391,9 @@ FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
#endif
// write encoded data
- ALOGV(" writing %d bytes of encoded data on output port", bytes);
+ ALOGV(" writing %zu bytes of encoded data on output port", bytes);
if (bytes > outHeader->nAllocLen - outHeader->nOffset - outHeader->nFilledLen) {
- ALOGE(" not enough space left to write encoded data, dropping %u bytes", bytes);
+ ALOGE(" not enough space left to write encoded data, dropping %zu bytes", bytes);
// a fatal error would stop the encoding
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}
@@ -405,7 +412,7 @@ FLAC__StreamEncoderWriteStatus SoftFlacEncoder::onEncodedFlacAvailable(
OMX_ERRORTYPE SoftFlacEncoder::configureEncoder() {
- ALOGV("SoftFlacEncoder::configureEncoder() numChannel=%ld, sampleRate=%ld",
+ ALOGV("SoftFlacEncoder::configureEncoder() numChannel=%d, sampleRate=%d",
mNumChannels, mSampleRate);
if (mSignalledError || (mFlacStreamEncoder == NULL)) {
@@ -444,8 +451,12 @@ return_result:
// static
FLAC__StreamEncoderWriteStatus SoftFlacEncoder::flacEncoderWriteCallback(
- const FLAC__StreamEncoder *encoder, const FLAC__byte buffer[],
- size_t bytes, unsigned samples, unsigned current_frame, void *client_data) {
+ const FLAC__StreamEncoder * /* encoder */,
+ const FLAC__byte buffer[],
+ size_t bytes,
+ unsigned samples,
+ unsigned current_frame,
+ void *client_data) {
return ((SoftFlacEncoder*) client_data)->onEncodedFlacAvailable(
buffer, bytes, samples, current_frame);
}
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index 4c80da6..a0112e1 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -14,4 +14,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_g711dec
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index bcdd3c7..3a69095 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -117,7 +117,14 @@ OMX_ERRORTYPE SoftG711::internalGetParameter(
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
- pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ if (pcmParams->nPortIndex == 0) {
+ // input port
+ pcmParams->ePCMMode = mIsMLaw ? OMX_AUDIO_PCMModeMULaw
+ : OMX_AUDIO_PCMModeALaw;
+ } else {
+ // output port
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ }
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -182,7 +189,7 @@ OMX_ERRORTYPE SoftG711::internalSetParameter(
}
}
-void SoftG711::onQueueFilled(OMX_U32 portIndex) {
+void SoftG711::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
@@ -212,7 +219,7 @@ void SoftG711::onQueueFilled(OMX_U32 portIndex) {
}
if (inHeader->nFilledLen > kMaxNumSamplesPerFrame) {
- ALOGE("input buffer too large (%ld).", inHeader->nFilledLen);
+ ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index 71613d2..30868d5 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -9,6 +9,8 @@ LOCAL_C_INCLUDES := \
frameworks/native/include/media/openmax \
external/libgsm/inc
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libstagefright libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
index 00e0c85..4debc48 100644
--- a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
@@ -172,7 +172,7 @@ OMX_ERRORTYPE SoftGSM::internalSetParameter(
}
}
-void SoftGSM::onQueueFilled(OMX_U32 portIndex) {
+void SoftGSM::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
@@ -202,13 +202,13 @@ void SoftGSM::onQueueFilled(OMX_U32 portIndex) {
}
if (inHeader->nFilledLen > kMaxNumSamplesPerFrame) {
- ALOGE("input buffer too large (%ld).", inHeader->nFilledLen);
+ ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
}
if(((inHeader->nFilledLen / 65) * 65) != inHeader->nFilledLen) {
- ALOGE("input buffer not multiple of 65 (%ld).", inHeader->nFilledLen);
+ ALOGE("input buffer not multiple of 65 (%d).", inHeader->nFilledLen);
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
}
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
new file mode 100644
index 0000000..c0c694e
--- /dev/null
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -0,0 +1,30 @@
+ifeq ($(if $(wildcard external/libhevc),1,0),1)
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libstagefright_soft_hevcdec
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_STATIC_LIBRARIES := libhevcdec
+LOCAL_SRC_FILES := SoftHEVC.cpp
+
+LOCAL_C_INCLUDES := $(TOP)/external/libhevc/decoder
+LOCAL_C_INCLUDES += $(TOP)/external/libhevc/common
+LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
+LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
+
+LOCAL_SHARED_LIBRARIES := libstagefright
+LOCAL_SHARED_LIBRARIES += libstagefright_omx
+LOCAL_SHARED_LIBRARIES += libstagefright_foundation
+LOCAL_SHARED_LIBRARIES += libutils
+LOCAL_SHARED_LIBRARIES += liblog
+
+# We need this because the current asm generates the following link error:
+# requires unsupported dynamic reloc R_ARM_REL32; recompile with -fPIC
+# Bug: 16853291
+LOCAL_LDFLAGS := -Wl,-Bsymbolic
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
new file mode 100644
index 0000000..f4cba54
--- /dev/null
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -0,0 +1,765 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftHEVC"
+#include <utils/Log.h>
+
+#include "ihevc_typedefs.h"
+#include "iv.h"
+#include "ivd.h"
+#include "ithread.h"
+#include "ihevcd_cxa.h"
+#include "SoftHEVC.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <OMX_VideoExt.h>
+
+namespace android {
+
+#define componentName "video_decoder.hevc"
+#define codingType OMX_VIDEO_CodingHEVC
+#define CODEC_MIME_TYPE MEDIA_MIMETYPE_VIDEO_HEVC
+
+/** Function and structure definitions to keep code similar for each codec */
+#define ivdec_api_function ihevcd_cxa_api_function
+#define ivdext_init_ip_t ihevcd_cxa_init_ip_t
+#define ivdext_init_op_t ihevcd_cxa_init_op_t
+#define ivdext_fill_mem_rec_ip_t ihevcd_cxa_fill_mem_rec_ip_t
+#define ivdext_fill_mem_rec_op_t ihevcd_cxa_fill_mem_rec_op_t
+#define ivdext_ctl_set_num_cores_ip_t ihevcd_cxa_ctl_set_num_cores_ip_t
+#define ivdext_ctl_set_num_cores_op_t ihevcd_cxa_ctl_set_num_cores_op_t
+
+#define IVDEXT_CMD_CTL_SET_NUM_CORES \
+ (IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES
+
+static const CodecProfileLevel kProfileLevels[] = {
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel1 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel2 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel21 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel3 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel31 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel4 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel41 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel5 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
+};
+
+SoftHEVC::SoftHEVC(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SoftVideoDecoderOMXComponent(name, componentName, codingType,
+ kProfileLevels, ARRAY_SIZE(kProfileLevels),
+ 320 /* width */, 240 /* height */, callbacks,
+ appData, component),
+ mMemRecords(NULL),
+ mFlushOutBuffer(NULL),
+ mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
+ mIvColorFormat(IV_YUV_420P),
+ mNewWidth(mWidth),
+ mNewHeight(mHeight),
+ mChangingResolution(false) {
+ initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers,
+ CODEC_MIME_TYPE);
+ CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftHEVC::~SoftHEVC() {
+ ALOGD("In SoftHEVC::~SoftHEVC");
+ CHECK_EQ(deInitDecoder(), (status_t)OK);
+}
+
+static size_t GetCPUCoreCount() {
+ long cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+ cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ // _SC_NPROC_ONLN must be defined...
+ cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+ CHECK(cpuCoreCount >= 1);
+ ALOGD("Number of CPU cores: %ld", cpuCoreCount);
+ return (size_t)cpuCoreCount;
+}
+
+void SoftHEVC::logVersion() {
+ ivd_ctl_getversioninfo_ip_t s_ctl_ip;
+ ivd_ctl_getversioninfo_op_t s_ctl_op;
+ UWORD8 au1_buf[512];
+ IV_API_CALL_STATUS_T status;
+
+ s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+ s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_GETVERSION;
+ s_ctl_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t);
+ s_ctl_op.u4_size = sizeof(ivd_ctl_getversioninfo_op_t);
+ s_ctl_ip.pv_version_buffer = au1_buf;
+ s_ctl_ip.u4_version_buffer_size = sizeof(au1_buf);
+
+ status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip,
+ (void *)&s_ctl_op);
+
+ if (status != IV_SUCCESS) {
+ ALOGE("Error in getting version number: 0x%x",
+ s_ctl_op.u4_error_code);
+ } else {
+ ALOGD("Ittiam decoder version number: %s",
+ (char *)s_ctl_ip.pv_version_buffer);
+ }
+ return;
+}
+
+status_t SoftHEVC::setParams(size_t stride) {
+ ivd_ctl_set_config_ip_t s_ctl_ip;
+ ivd_ctl_set_config_op_t s_ctl_op;
+ IV_API_CALL_STATUS_T status;
+ s_ctl_ip.u4_disp_wd = (UWORD32)stride;
+ s_ctl_ip.e_frm_skip_mode = IVD_SKIP_NONE;
+
+ s_ctl_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
+ s_ctl_ip.e_vid_dec_mode = IVD_DECODE_FRAME;
+ s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+ s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS;
+ s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
+ s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
+
+ ALOGV("Set the run-time (dynamic) parameters stride = %u", stride);
+ status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip,
+ (void *)&s_ctl_op);
+
+ if (status != IV_SUCCESS) {
+ ALOGE("Error in setting the run-time parameters: 0x%x",
+ s_ctl_op.u4_error_code);
+
+ return UNKNOWN_ERROR;
+ }
+ return OK;
+}
+
+status_t SoftHEVC::resetPlugin() {
+ mIsInFlush = false;
+ mReceivedEOS = false;
+ memset(mTimeStamps, 0, sizeof(mTimeStamps));
+ memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid));
+
+ /* Initialize both start and end times */
+ gettimeofday(&mTimeStart, NULL);
+ gettimeofday(&mTimeEnd, NULL);
+
+ return OK;
+}
+
+status_t SoftHEVC::resetDecoder() {
+ ivd_ctl_reset_ip_t s_ctl_ip;
+ ivd_ctl_reset_op_t s_ctl_op;
+ IV_API_CALL_STATUS_T status;
+
+ s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+ s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_RESET;
+ s_ctl_ip.u4_size = sizeof(ivd_ctl_reset_ip_t);
+ s_ctl_op.u4_size = sizeof(ivd_ctl_reset_op_t);
+
+ status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip,
+ (void *)&s_ctl_op);
+ if (IV_SUCCESS != status) {
+ ALOGE("Error in reset: 0x%x", s_ctl_op.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+
+ /* Set the run-time (dynamic) parameters */
+ setParams(outputBufferWidth());
+
+ /* Set number of cores/threads to be used by the codec */
+ setNumCores();
+
+ return OK;
+}
+
+status_t SoftHEVC::setNumCores() {
+ ivdext_ctl_set_num_cores_ip_t s_set_cores_ip;
+ ivdext_ctl_set_num_cores_op_t s_set_cores_op;
+ IV_API_CALL_STATUS_T status;
+ s_set_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+ s_set_cores_ip.e_sub_cmd = IVDEXT_CMD_CTL_SET_NUM_CORES;
+ s_set_cores_ip.u4_num_cores = MIN(mNumCores, CODEC_MAX_NUM_CORES);
+ s_set_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
+ s_set_cores_op.u4_size = sizeof(ivdext_ctl_set_num_cores_op_t);
+ ALOGD("Set number of cores to %u", s_set_cores_ip.u4_num_cores);
+ status = ivdec_api_function(mCodecCtx, (void *)&s_set_cores_ip,
+ (void *)&s_set_cores_op);
+ if (IV_SUCCESS != status) {
+ ALOGE("Error in setting number of cores: 0x%x",
+ s_set_cores_op.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+ return OK;
+}
+
+status_t SoftHEVC::setFlushMode() {
+ IV_API_CALL_STATUS_T status;
+ ivd_ctl_flush_ip_t s_video_flush_ip;
+ ivd_ctl_flush_op_t s_video_flush_op;
+
+ s_video_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+ s_video_flush_ip.e_sub_cmd = IVD_CMD_CTL_FLUSH;
+ s_video_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t);
+ s_video_flush_op.u4_size = sizeof(ivd_ctl_flush_op_t);
+ ALOGD("Set the decoder in flush mode ");
+
+ /* Set the decoder in Flush mode, subsequent decode() calls will flush */
+ status = ivdec_api_function(mCodecCtx, (void *)&s_video_flush_ip,
+ (void *)&s_video_flush_op);
+
+ if (status != IV_SUCCESS) {
+ ALOGE("Error in setting the decoder in flush mode: (%d) 0x%x", status,
+ s_video_flush_op.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+
+ mIsInFlush = true;
+ return OK;
+}
+
+status_t SoftHEVC::initDecoder() {
+ IV_API_CALL_STATUS_T status;
+
+ UWORD32 u4_num_reorder_frames;
+ UWORD32 u4_num_ref_frames;
+ UWORD32 u4_share_disp_buf;
+ WORD32 i4_level;
+
+ mNumCores = GetCPUCoreCount();
+
+ /* Initialize number of ref and reorder modes (for HEVC) */
+ u4_num_reorder_frames = 16;
+ u4_num_ref_frames = 16;
+ u4_share_disp_buf = 0;
+
+ uint32_t displayStride = outputBufferWidth();
+ uint32_t displayHeight = outputBufferHeight();
+ uint32_t displaySizeY = displayStride * displayHeight;
+
+ if (displaySizeY > (1920 * 1088)) {
+ i4_level = 50;
+ } else if (displaySizeY > (1280 * 720)) {
+ i4_level = 40;
+ } else if (displaySizeY > (960 * 540)) {
+ i4_level = 31;
+ } else if (displaySizeY > (640 * 360)) {
+ i4_level = 30;
+ } else if (displaySizeY > (352 * 288)) {
+ i4_level = 21;
+ } else {
+ i4_level = 20;
+ }
+ {
+ iv_num_mem_rec_ip_t s_num_mem_rec_ip;
+ iv_num_mem_rec_op_t s_num_mem_rec_op;
+
+ s_num_mem_rec_ip.u4_size = sizeof(s_num_mem_rec_ip);
+ s_num_mem_rec_op.u4_size = sizeof(s_num_mem_rec_op);
+ s_num_mem_rec_ip.e_cmd = IV_CMD_GET_NUM_MEM_REC;
+
+ ALOGV("Get number of mem records");
+ status = ivdec_api_function(mCodecCtx, (void*)&s_num_mem_rec_ip,
+ (void*)&s_num_mem_rec_op);
+ if (IV_SUCCESS != status) {
+ ALOGE("Error in getting mem records: 0x%x",
+ s_num_mem_rec_op.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+
+ mNumMemRecords = s_num_mem_rec_op.u4_num_mem_rec;
+ }
+
+ mMemRecords = (iv_mem_rec_t*)ivd_aligned_malloc(
+ 128, mNumMemRecords * sizeof(iv_mem_rec_t));
+ if (mMemRecords == NULL) {
+ ALOGE("Allocation failure");
+ return NO_MEMORY;
+ }
+
+ memset(mMemRecords, 0, mNumMemRecords * sizeof(iv_mem_rec_t));
+
+ {
+ size_t i;
+ ivdext_fill_mem_rec_ip_t s_fill_mem_ip;
+ ivdext_fill_mem_rec_op_t s_fill_mem_op;
+ iv_mem_rec_t *ps_mem_rec;
+
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_size =
+ sizeof(ivdext_fill_mem_rec_ip_t);
+ s_fill_mem_ip.i4_level = i4_level;
+ s_fill_mem_ip.u4_num_reorder_frames = u4_num_reorder_frames;
+ s_fill_mem_ip.u4_num_ref_frames = u4_num_ref_frames;
+ s_fill_mem_ip.u4_share_disp_buf = u4_share_disp_buf;
+ s_fill_mem_ip.u4_num_extra_disp_buf = 0;
+ s_fill_mem_ip.e_output_format = mIvColorFormat;
+
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.e_cmd = IV_CMD_FILL_NUM_MEM_REC;
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.pv_mem_rec_location = mMemRecords;
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = displayStride;
+ s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = displayHeight;
+ s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_size =
+ sizeof(ivdext_fill_mem_rec_op_t);
+
+ ps_mem_rec = mMemRecords;
+ for (i = 0; i < mNumMemRecords; i++)
+ ps_mem_rec[i].u4_size = sizeof(iv_mem_rec_t);
+
+ status = ivdec_api_function(mCodecCtx, (void *)&s_fill_mem_ip,
+ (void *)&s_fill_mem_op);
+
+ if (IV_SUCCESS != status) {
+ ALOGE("Error in filling mem records: 0x%x",
+ s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+ mNumMemRecords =
+ s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_num_mem_rec_filled;
+
+ ps_mem_rec = mMemRecords;
+
+ for (i = 0; i < mNumMemRecords; i++) {
+ ps_mem_rec->pv_base = ivd_aligned_malloc(
+ ps_mem_rec->u4_mem_alignment, ps_mem_rec->u4_mem_size);
+ if (ps_mem_rec->pv_base == NULL) {
+ ALOGE("Allocation failure for memory record #%zu of size %u",
+ i, ps_mem_rec->u4_mem_size);
+ status = IV_FAIL;
+ return NO_MEMORY;
+ }
+
+ ps_mem_rec++;
+ }
+ }
+
+ /* Initialize the decoder */
+ {
+ ivdext_init_ip_t s_init_ip;
+ ivdext_init_op_t s_init_op;
+
+ void *dec_fxns = (void *)ivdec_api_function;
+
+ s_init_ip.s_ivd_init_ip_t.u4_size = sizeof(ivdext_init_ip_t);
+ s_init_ip.s_ivd_init_ip_t.e_cmd = (IVD_API_COMMAND_TYPE_T)IV_CMD_INIT;
+ s_init_ip.s_ivd_init_ip_t.pv_mem_rec_location = mMemRecords;
+ s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = displayStride;
+ s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = displayHeight;
+
+ s_init_ip.i4_level = i4_level;
+ s_init_ip.u4_num_reorder_frames = u4_num_reorder_frames;
+ s_init_ip.u4_num_ref_frames = u4_num_ref_frames;
+ s_init_ip.u4_share_disp_buf = u4_share_disp_buf;
+ s_init_ip.u4_num_extra_disp_buf = 0;
+
+ s_init_op.s_ivd_init_op_t.u4_size = sizeof(s_init_op);
+
+ s_init_ip.s_ivd_init_ip_t.u4_num_mem_rec = mNumMemRecords;
+ s_init_ip.s_ivd_init_ip_t.e_output_format = mIvColorFormat;
+
+ mCodecCtx = (iv_obj_t*)mMemRecords[0].pv_base;
+ mCodecCtx->pv_fxns = dec_fxns;
+ mCodecCtx->u4_size = sizeof(iv_obj_t);
+
+ ALOGD("Initializing decoder");
+ status = ivdec_api_function(mCodecCtx, (void *)&s_init_ip,
+ (void *)&s_init_op);
+ if (status != IV_SUCCESS) {
+ ALOGE("Error in init: 0x%x",
+ s_init_op.s_ivd_init_op_t.u4_error_code);
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ /* Reset the plugin state */
+ resetPlugin();
+
+ /* Set the run time (dynamic) parameters */
+ setParams(displayStride);
+
+ /* Set number of cores/threads to be used by the codec */
+ setNumCores();
+
+ /* Get codec version */
+ logVersion();
+
+ /* Allocate internal picture buffer */
+ uint32_t bufferSize = displaySizeY * 3 / 2;
+ mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
+ if (NULL == mFlushOutBuffer) {
+ ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+ return NO_MEMORY;
+ }
+
+ mInitNeeded = false;
+ mFlushNeeded = false;
+ return OK;
+}
+
+status_t SoftHEVC::deInitDecoder() {
+ size_t i;
+
+ if (mMemRecords) {
+ iv_mem_rec_t *ps_mem_rec;
+
+ ps_mem_rec = mMemRecords;
+ ALOGD("Freeing codec memory");
+ for (i = 0; i < mNumMemRecords; i++) {
+ if(ps_mem_rec->pv_base) {
+ ivd_aligned_free(ps_mem_rec->pv_base);
+ }
+ ps_mem_rec++;
+ }
+ ivd_aligned_free(mMemRecords);
+ mMemRecords = NULL;
+ }
+
+ if(mFlushOutBuffer) {
+ ivd_aligned_free(mFlushOutBuffer);
+ mFlushOutBuffer = NULL;
+ }
+
+ mInitNeeded = true;
+ mChangingResolution = false;
+
+ return OK;
+}
+
+status_t SoftHEVC::reInitDecoder() {
+ status_t ret;
+
+ deInitDecoder();
+
+ ret = initDecoder();
+ if (OK != ret) {
+ ALOGE("Create failure");
+ deInitDecoder();
+ return NO_MEMORY;
+ }
+ return OK;
+}
+
+void SoftHEVC::onReset() {
+ ALOGD("onReset called");
+ SoftVideoDecoderOMXComponent::onReset();
+
+ resetDecoder();
+ resetPlugin();
+}
+
+OMX_ERRORTYPE SoftHEVC::internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params) {
+ const uint32_t oldWidth = mWidth;
+ const uint32_t oldHeight = mHeight;
+ OMX_ERRORTYPE ret = SoftVideoDecoderOMXComponent::internalSetParameter(index, params);
+ if (mWidth != oldWidth || mHeight != oldHeight) {
+ reInitDecoder();
+ }
+ return ret;
+}
+
+void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
+ ivd_video_decode_op_t *ps_dec_op,
+ OMX_BUFFERHEADERTYPE *inHeader,
+ OMX_BUFFERHEADERTYPE *outHeader,
+ size_t timeStampIx) {
+ size_t sizeY = outputBufferWidth() * outputBufferHeight();
+ size_t sizeUV;
+ uint8_t *pBuf;
+
+ ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t);
+ ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
+
+ ps_dec_ip->e_cmd = IVD_CMD_VIDEO_DECODE;
+
+ /* When in flush and after EOS with zero byte input,
+ * inHeader is set to zero. Hence check for non-null */
+ if (inHeader) {
+ ps_dec_ip->u4_ts = timeStampIx;
+ ps_dec_ip->pv_stream_buffer = inHeader->pBuffer
+ + inHeader->nOffset;
+ ps_dec_ip->u4_num_Bytes = inHeader->nFilledLen;
+ } else {
+ ps_dec_ip->u4_ts = 0;
+ ps_dec_ip->pv_stream_buffer = NULL;
+ ps_dec_ip->u4_num_Bytes = 0;
+ }
+
+ if (outHeader) {
+ pBuf = outHeader->pBuffer;
+ } else {
+ pBuf = mFlushOutBuffer;
+ }
+
+ sizeUV = sizeY / 4;
+ ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY;
+ ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV;
+ ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV;
+
+ ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf;
+ ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY;
+ ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV;
+ ps_dec_ip->s_out_buffer.u4_num_bufs = 3;
+ return;
+}
+void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) {
+ /* Once the output buffers are flushed, ignore any buffers that are held in decoder */
+ if (kOutputPortIndex == portIndex) {
+ setFlushMode();
+
+ while (true) {
+ ivd_video_decode_ip_t s_dec_ip;
+ ivd_video_decode_op_t s_dec_op;
+ IV_API_CALL_STATUS_T status;
+ size_t sizeY, sizeUV;
+
+ setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, 0);
+
+ status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip,
+ (void *)&s_dec_op);
+ if (0 == s_dec_op.u4_output_present) {
+ resetPlugin();
+ break;
+ }
+ }
+ }
+}
+
+void SoftHEVC::onQueueFilled(OMX_U32 portIndex) {
+ UNUSED(portIndex);
+
+ if (mOutputPortSettingsChange != NONE) {
+ return;
+ }
+
+ List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
+ List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
+
+ /* If input EOS is seen and decoder is not in flush mode,
+ * set the decoder in flush mode.
+ * There can be a case where EOS is sent along with last picture data
+ * In that case, only after decoding that input data, decoder has to be
+ * put in flush. This case is handled here */
+
+ if (mReceivedEOS && !mIsInFlush) {
+ setFlushMode();
+ }
+
+ while (!outQueue.empty()) {
+ BufferInfo *inInfo;
+ OMX_BUFFERHEADERTYPE *inHeader;
+
+ BufferInfo *outInfo;
+ OMX_BUFFERHEADERTYPE *outHeader;
+ size_t timeStampIx;
+
+ inInfo = NULL;
+ inHeader = NULL;
+
+ if (!mIsInFlush) {
+ if (!inQueue.empty()) {
+ inInfo = *inQueue.begin();
+ inHeader = inInfo->mHeader;
+ } else {
+ break;
+ }
+ }
+
+ outInfo = *outQueue.begin();
+ outHeader = outInfo->mHeader;
+ outHeader->nFlags = 0;
+ outHeader->nTimeStamp = 0;
+ outHeader->nOffset = 0;
+
+ if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
+ ALOGD("EOS seen on input");
+ mReceivedEOS = true;
+ if (inHeader->nFilledLen == 0) {
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ setFlushMode();
+ }
+ }
+
+ // When there is an init required and the decoder is not in flush mode,
+ // update output port's definition and reinitialize decoder.
+ if (mInitNeeded && !mIsInFlush) {
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
+
+ CHECK_EQ(reInitDecoder(), (status_t)OK);
+ return;
+ }
+
+ /* Get a free slot in timestamp array to hold input timestamp */
+ {
+ size_t i;
+ timeStampIx = 0;
+ for (i = 0; i < MAX_TIME_STAMPS; i++) {
+ if (!mTimeStampsValid[i]) {
+ timeStampIx = i;
+ break;
+ }
+ }
+ if (inHeader != NULL) {
+ mTimeStampsValid[timeStampIx] = true;
+ mTimeStamps[timeStampIx] = inHeader->nTimeStamp;
+ }
+ }
+
+ {
+ ivd_video_decode_ip_t s_dec_ip;
+ ivd_video_decode_op_t s_dec_op;
+ WORD32 timeDelay, timeTaken;
+ size_t sizeY, sizeUV;
+
+ setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+
+ GETTIME(&mTimeStart, NULL);
+ /* Compute time elapsed between end of previous decode()
+ * to start of current decode() */
+ TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+
+ IV_API_CALL_STATUS_T status;
+ status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ // FIXME: Compare |status| to IHEVCD_UNSUPPORTED_DIMENSIONS, which is not one of the
+ // IV_API_CALL_STATUS_T, seems be wrong. But this is what the decoder returns right now.
+ // The decoder should be fixed so that |u4_error_code| instead of |status| returns
+ // IHEVCD_UNSUPPORTED_DIMENSIONS.
+ bool unsupportedDimensions =
+ ((IHEVCD_UNSUPPORTED_DIMENSIONS == status)
+ || (IHEVCD_UNSUPPORTED_DIMENSIONS == s_dec_op.u4_error_code));
+ bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
+
+ GETTIME(&mTimeEnd, NULL);
+ /* Compute time taken for decode() */
+ TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+
+ ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
+ s_dec_op.u4_num_bytes_consumed);
+ if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
+ mFlushNeeded = true;
+ }
+
+ if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) {
+ /* If the input did not contain picture data, then ignore
+ * the associated timestamp */
+ mTimeStampsValid[timeStampIx] = false;
+ }
+
+ // This is needed to handle CTS DecoderTest testCodecResetsHEVCWithoutSurface,
+ // which is not sending SPS/PPS after port reconfiguration and flush to the codec.
+ if (unsupportedDimensions && !mFlushNeeded) {
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht);
+
+ CHECK_EQ(reInitDecoder(), (status_t)OK);
+
+ setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+
+ ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+ return;
+ }
+
+ // If the decoder is in the changing resolution mode and there is no output present,
+ // that means the switching is done and it's ready to reset the decoder and the plugin.
+ if (mChangingResolution && !s_dec_op.u4_output_present) {
+ mChangingResolution = false;
+ resetDecoder();
+ resetPlugin();
+ continue;
+ }
+
+ if (unsupportedDimensions || resChanged) {
+ mChangingResolution = true;
+ if (mFlushNeeded) {
+ setFlushMode();
+ }
+
+ if (unsupportedDimensions) {
+ mNewWidth = s_dec_op.u4_pic_wd;
+ mNewHeight = s_dec_op.u4_pic_ht;
+ mInitNeeded = true;
+ }
+ continue;
+ }
+
+ if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
+ uint32_t width = s_dec_op.u4_pic_wd;
+ uint32_t height = s_dec_op.u4_pic_ht;
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, width, height);
+
+ if (portWillReset) {
+ resetDecoder();
+ return;
+ }
+ }
+
+ if (s_dec_op.u4_output_present) {
+ outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
+
+ outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
+ mTimeStampsValid[s_dec_op.u4_ts] = false;
+
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ } else {
+ /* If in flush mode and no output is returned by the codec,
+ * then come out of flush mode */
+ mIsInFlush = false;
+
+ /* If EOS was recieved on input port and there is no output
+ * from the codec, then signal EOS on output port */
+ if (mReceivedEOS) {
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ resetPlugin();
+ }
+ }
+ }
+
+ // TODO: Handle more than one picture data
+ if (inHeader != NULL) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
+ }
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(const char *name,
+ const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
+ OMX_COMPONENTTYPE **component) {
+ return new android::SoftHEVC(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
new file mode 100644
index 0000000..a91f528
--- /dev/null
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_HEVC_H_
+
+#define SOFT_HEVC_H_
+
+#include "SoftVideoDecoderOMXComponent.h"
+#include <sys/time.h>
+
+namespace android {
+
+#define ivd_aligned_malloc(alignment, size) memalign(alignment, size)
+#define ivd_aligned_free(buf) free(buf)
+
+/** Number of entries in the time-stamp array */
+#define MAX_TIME_STAMPS 64
+
+/** Maximum number of cores supported by the codec */
+#define CODEC_MAX_NUM_CORES 4
+
+#define CODEC_MAX_WIDTH 1920
+
+#define CODEC_MAX_HEIGHT 1088
+
+/** Input buffer size */
+#define INPUT_BUF_SIZE (1024 * 1024)
+
+#define MIN(a, b) ((a) < (b)) ? (a) : (b)
+
+/** Used to remove warnings about unused parameters */
+#define UNUSED(x) ((void)(x))
+
+/** Get time */
+#define GETTIME(a, b) gettimeofday(a, b);
+
+/** Compute difference between start and end */
+#define TIME_DIFF(start, end, diff) \
+ diff = ((end.tv_sec - start.tv_sec) * 1000000) + \
+ (end.tv_usec - start.tv_usec);
+
+struct SoftHEVC: public SoftVideoDecoderOMXComponent {
+ SoftHEVC(const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftHEVC();
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+ virtual void onPortFlushCompleted(OMX_U32 portIndex);
+ virtual void onReset();
+ virtual OMX_ERRORTYPE internalSetParameter(OMX_INDEXTYPE index, const OMX_PTR params);
+private:
+ // Number of input and output buffers
+ enum {
+ kNumBuffers = 8
+ };
+
+ iv_obj_t *mCodecCtx; // Codec context
+ iv_mem_rec_t *mMemRecords; // Memory records requested by the codec
+ size_t mNumMemRecords; // Number of memory records requested by the codec
+
+ size_t mNumCores; // Number of cores to be uesd by the codec
+
+ struct timeval mTimeStart; // Time at the start of decode()
+ struct timeval mTimeEnd; // Time at the end of decode()
+
+ // Internal buffer to be used to flush out the buffers from decoder
+ uint8_t *mFlushOutBuffer;
+
+ // Status of entries in the timestamp array
+ bool mTimeStampsValid[MAX_TIME_STAMPS];
+
+ // Timestamp array - Since codec does not take 64 bit timestamps,
+ // they are maintained in the plugin
+ OMX_S64 mTimeStamps[MAX_TIME_STAMPS];
+
+ OMX_COLOR_FORMATTYPE mOmxColorFormat; // OMX Color format
+ IV_COLOR_FORMAT_T mIvColorFormat; // Ittiam Color format
+
+ bool mIsInFlush; // codec is flush mode
+ bool mReceivedEOS; // EOS is receieved on input port
+ bool mInitNeeded;
+ uint32_t mNewWidth;
+ uint32_t mNewHeight;
+ // The input stream has changed to a different resolution, which is still supported by the
+ // codec. So the codec is switching to decode the new resolution.
+ bool mChangingResolution;
+ bool mFlushNeeded;
+
+ status_t initDecoder();
+ status_t deInitDecoder();
+ status_t setFlushMode();
+ status_t setParams(size_t stride);
+ void logVersion();
+ status_t setNumCores();
+ status_t resetDecoder();
+ status_t resetPlugin();
+ status_t reInitDecoder();
+
+ void setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip,
+ ivd_video_decode_op_t *ps_dec_op,
+ OMX_BUFFERHEADERTYPE *inHeader,
+ OMX_BUFFERHEADERTYPE *outHeader,
+ size_t timeStampIx);
+
+ DISALLOW_EVIL_CONSTRUCTORS (SoftHEVC);
+};
+
+} // namespace android
+
+#endif // SOFT_HEVC_H_
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index a3d5779..1d232c6 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -46,6 +46,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -72,4 +74,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_mpeg4dec
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index fb2a430..1f4b6fd 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -91,7 +91,7 @@ status_t SoftMPEG4::initDecoder() {
return OK;
}
-void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
+void SoftMPEG4::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
@@ -134,6 +134,12 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
}
uint8_t *bitstream = inHeader->pBuffer + inHeader->nOffset;
+ uint32_t *start_code = (uint32_t *)bitstream;
+ bool volHeader = *start_code == 0xB0010000;
+ if (volHeader) {
+ PVCleanUpVideoDecoder(mHandle);
+ mInitialized = false;
+ }
if (!mInitialized) {
uint8_t *vol_data[1];
@@ -141,7 +147,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
vol_data[0] = NULL;
- if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) || volHeader) {
vol_data[0] = bitstream;
vol_size = inHeader->nFilledLen;
}
@@ -150,7 +156,8 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
(mMode == MODE_MPEG4) ? MPEG4_MODE : H263_MODE;
Bool success = PVInitVideoDecoder(
- mHandle, vol_data, &vol_size, 1, mWidth, mHeight, mode);
+ mHandle, vol_data, &vol_size, 1,
+ outputBufferWidth(), outputBufferHeight(), mode);
if (!success) {
ALOGW("PVInitVideoDecoder failed. Unsupported content?");
@@ -169,21 +176,26 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
PVSetPostProcType((VideoDecControls *) mHandle, 0);
+ bool hasFrameData = false;
if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
inInfo = NULL;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
+ } else if (volHeader) {
+ hasFrameData = true;
}
mInitialized = true;
- if (mode == MPEG4_MODE && portSettingsChanged()) {
+ if (mode == MPEG4_MODE && handlePortSettingsChange()) {
return;
}
- continue;
+ if (!hasFrameData) {
+ continue;
+ }
}
if (!mFramesConfigured) {
@@ -223,7 +235,9 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
return;
}
- if (portSettingsChanged()) {
+ // H263 doesn't have VOL header, the frame size information is in short header, i.e. the
+ // decoder may detect size change after PVDecodeVideoFrame.
+ if (handlePortSettingsChange()) {
return;
}
@@ -269,7 +283,7 @@ void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
}
}
-bool SoftMPEG4::portSettingsChanged() {
+bool SoftMPEG4::handlePortSettingsChange() {
uint32_t disp_width, disp_height;
PVGetVideoDimensions(mHandle, (int32 *)&disp_width, (int32 *)&disp_height);
@@ -282,25 +296,24 @@ bool SoftMPEG4::portSettingsChanged() {
ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d",
disp_width, disp_height, buf_width, buf_height);
- if (mCropWidth != disp_width
- || mCropHeight != disp_height) {
- mCropLeft = 0;
- mCropTop = 0;
- mCropWidth = disp_width;
- mCropHeight = disp_height;
-
- notify(OMX_EventPortSettingsChanged,
- 1,
- OMX_IndexConfigCommonOutputCrop,
- NULL);
- }
+ CropSettingsMode cropSettingsMode = kCropUnSet;
+ if (disp_width != buf_width || disp_height != buf_height) {
+ cropSettingsMode = kCropSet;
- if (buf_width != mWidth || buf_height != mHeight) {
- mWidth = buf_width;
- mHeight = buf_height;
-
- updatePortDefinitions();
+ if (mCropWidth != disp_width || mCropHeight != disp_height) {
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = disp_width;
+ mCropHeight = disp_height;
+ cropSettingsMode = kCropChanged;
+ }
+ }
+ bool portWillReset = false;
+ const bool fakeStride = true;
+ SoftVideoDecoderOMXComponent::handlePortSettingsChange(
+ &portWillReset, buf_width, buf_height, cropSettingsMode, fakeStride);
+ if (portWillReset) {
if (mMode == MODE_H263) {
PVCleanUpVideoDecoder(mHandle);
@@ -309,7 +322,7 @@ bool SoftMPEG4::portSettingsChanged() {
vol_data[0] = NULL;
if (!PVInitVideoDecoder(
- mHandle, vol_data, &vol_size, 1, mWidth, mHeight,
+ mHandle, vol_data, &vol_size, 1, outputBufferWidth(), outputBufferHeight(),
H263_MODE)) {
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
mSignalledError = true;
@@ -318,13 +331,9 @@ bool SoftMPEG4::portSettingsChanged() {
}
mFramesConfigured = false;
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
- return true;
}
- return false;
+ return portWillReset;
}
void SoftMPEG4::onPortFlushCompleted(OMX_U32 portIndex) {
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
index de14aaf..8a06a00 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
@@ -67,7 +67,7 @@ private:
status_t initDecoder();
virtual void updatePortDefinitions();
- bool portSettingsChanged();
+ bool handlePortSettingsChange();
DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4);
};
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/get_pred_adv_b_add.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/get_pred_adv_b_add.cpp
index e23f23d..fe9e7dc 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/get_pred_adv_b_add.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/get_pred_adv_b_add.cpp
@@ -96,7 +96,7 @@ int GetPredAdvancedBy0x0(
offset = width - B_SIZE; /* offset for prev */
offset2 = (pred_width_rnd >> 1) - 4; /* offset for pred_block */
- tmp = (uint32)prev & 0x3;
+ tmp = (uintptr_t)prev & 0x3;
pred_block -= offset2; /* preset */
if (tmp == 0) /* word-aligned */
@@ -203,7 +203,7 @@ int GetPredAdvancedBy0x1(
/* Branch based on pixel location (half-pel or full-pel) for x and y */
pred_block -= offset2; /* preset */
- tmp = (uint32)prev & 3;
+ tmp = (uintptr_t)prev & 3;
mask = 254;
mask |= (mask << 8);
mask |= (mask << 16); /* 0xFEFEFEFE */
@@ -532,7 +532,7 @@ int GetPredAdvancedBy1x0(
/* Branch based on pixel location (half-pel or full-pel) for x and y */
pred_block -= offset2; /* preset */
- tmp = (uint32)prev & 3;
+ tmp = (uintptr_t)prev & 3;
mask = 254;
mask |= (mask << 8);
mask |= (mask << 16); /* 0xFEFEFEFE */
@@ -884,7 +884,7 @@ int GetPredAdvancedBy1x1(
mask |= (mask << 8);
mask |= (mask << 16); /* 0x3f3f3f3f */
- tmp = (uint32)prev & 3;
+ tmp = (uintptr_t)prev & 3;
pred_block -= 4; /* preset */
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
index b3c350f..b03ec8c 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
@@ -1426,7 +1426,7 @@ PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop)
video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); /* otherwise calculate above */
}
size = (int32)video->width * video->height;
- if (video->currVop->predictionType == P_VOP && size > video->videoDecControls->size)
+ if (currVop->predictionType == P_VOP && size > video->videoDecControls->size)
{
status = PV_FAIL;
goto return_point;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 83a2dd2..c9006d9 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -33,6 +33,8 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/include \
$(TOP)/frameworks/native/include/media/openmax
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -72,4 +74,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_mpeg4enc
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index e02af90..c87d19c 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -33,6 +33,8 @@
#include "SoftMPEG4Encoder.h"
+#include <inttypes.h>
+
namespace android {
template<class T>
@@ -44,42 +46,12 @@ static void InitOMXParams(T *params) {
params->nVersion.s.nStep = 0;
}
-inline static void ConvertYUV420SemiPlanarToYUV420Planar(
- uint8_t *inyuv, uint8_t* outyuv,
- int32_t width, int32_t height) {
-
- int32_t outYsize = width * height;
- uint32_t *outy = (uint32_t *) outyuv;
- uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
- uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
- /* Y copying */
- memcpy(outy, inyuv, outYsize);
-
- /* U & V copying */
- uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
- for (int32_t i = height >> 1; i > 0; --i) {
- for (int32_t j = width >> 2; j > 0; --j) {
- uint32_t temp = *inyuv_4++;
- uint32_t tempU = temp & 0xFF;
- tempU = tempU | ((temp >> 8) & 0xFF00);
-
- uint32_t tempV = (temp >> 8) & 0xFF;
- tempV = tempV | ((temp >> 16) & 0xFF00);
-
- // Flip U and V
- *outcb++ = tempV;
- *outcr++ = tempU;
- }
- }
-}
-
SoftMPEG4Encoder::SoftMPEG4Encoder(
const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
OMX_COMPONENTTYPE **component)
- : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ : SoftVideoEncoderOMXComponent(name, callbacks, appData, component),
mEncodeMode(COMBINE_MODE_WITH_ERR_RES),
mVideoWidth(176),
mVideoHeight(144),
@@ -147,9 +119,10 @@ OMX_ERRORTYPE SoftMPEG4Encoder::initEncParams() {
mEncParams->quantType[0] = 0;
mEncParams->noFrameSkipped = PV_OFF;
- if (mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar
+ || mStoreMetaDataInBuffers) {
// Color conversion is needed.
- CHECK(mInputFrameData == NULL);
+ free(mInputFrameData);
mInputFrameData =
(uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
CHECK(mInputFrameData != NULL);
@@ -214,7 +187,7 @@ OMX_ERRORTYPE SoftMPEG4Encoder::releaseEncoder() {
PVCleanUpVideoEncoder(mHandle);
- delete mInputFrameData;
+ free(mInputFrameData);
mInputFrameData = NULL;
delete mEncParams;
@@ -484,6 +457,17 @@ OMX_ERRORTYPE SoftMPEG4Encoder::internalSetParameter(
mVideoHeight = def->format.video.nFrameHeight;
mVideoFrameRate = def->format.video.xFramerate >> 16;
mVideoColorFormat = def->format.video.eColorFormat;
+
+ OMX_PARAM_PORTDEFINITIONTYPE *portDef =
+ &editPortInfo(0)->mDef;
+ portDef->format.video.nFrameWidth = mVideoWidth;
+ portDef->format.video.nFrameHeight = mVideoHeight;
+ portDef->format.video.xFramerate = def->format.video.xFramerate;
+ portDef->format.video.eColorFormat =
+ (OMX_COLOR_FORMATTYPE) mVideoColorFormat;
+ portDef = &editPortInfo(1)->mDef;
+ portDef->format.video.nFrameWidth = mVideoWidth;
+ portDef->format.video.nFrameHeight = mVideoHeight;
} else {
mVideoBitRate = def->format.video.nBitrate;
}
@@ -605,11 +589,7 @@ OMX_ERRORTYPE SoftMPEG4Encoder::internalSetParameter(
mStoreMetaDataInBuffers ? " true" : "false");
if (mStoreMetaDataInBuffers) {
- mVideoColorFormat == OMX_COLOR_FormatYUV420SemiPlanar;
- if (mInputFrameData == NULL) {
- mInputFrameData =
- (uint8_t *) malloc((mVideoWidth * mVideoHeight * 3 ) >> 1);
- }
+ mVideoColorFormat = OMX_COLOR_FormatAndroidOpaque;
}
return OMX_ErrorNone;
@@ -620,7 +600,7 @@ OMX_ERRORTYPE SoftMPEG4Encoder::internalSetParameter(
}
}
-void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
+void SoftMPEG4Encoder::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mSawInputEOS) {
return;
}
@@ -677,36 +657,36 @@ void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
mSawInputEOS = true;
}
- buffer_handle_t srcBuffer; // for MetaDataMode only
if (inHeader->nFilledLen > 0) {
- uint8_t *inputData = NULL;
+ const uint8_t *inputData = NULL;
if (mStoreMetaDataInBuffers) {
if (inHeader->nFilledLen != 8) {
ALOGE("MetaData buffer is wrong size! "
- "(got %lu bytes, expected 8)", inHeader->nFilledLen);
+ "(got %u bytes, expected 8)", inHeader->nFilledLen);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
inputData =
- extractGrallocData(inHeader->pBuffer + inHeader->nOffset,
- &srcBuffer);
+ extractGraphicBuffer(
+ mInputFrameData, (mVideoWidth * mVideoHeight * 3) >> 1,
+ inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen,
+ mVideoWidth, mVideoHeight);
if (inputData == NULL) {
ALOGE("Unable to extract gralloc buffer in metadata mode");
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
- // TODO: Verify/convert pixel format enum
} else {
- inputData = (uint8_t *)inHeader->pBuffer + inHeader->nOffset;
+ inputData = (const uint8_t *)inHeader->pBuffer + inHeader->nOffset;
+ if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ ConvertYUV420SemiPlanarToYUV420Planar(
+ inputData, mInputFrameData, mVideoWidth, mVideoHeight);
+ inputData = mInputFrameData;
+ }
}
- if (mVideoColorFormat != OMX_COLOR_FormatYUV420Planar) {
- ConvertYUV420SemiPlanarToYUV420Planar(
- inputData, mInputFrameData, mVideoWidth, mVideoHeight);
- inputData = mInputFrameData;
- }
CHECK(inputData != NULL);
VideoEncFrameIO vin, vout;
@@ -715,17 +695,17 @@ void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
vin.height = ((mVideoHeight + 15) >> 4) << 4;
vin.pitch = ((mVideoWidth + 15) >> 4) << 4;
vin.timestamp = (inHeader->nTimeStamp + 500) / 1000; // in ms
- vin.yChan = inputData;
+ vin.yChan = (uint8_t *)inputData;
vin.uChan = vin.yChan + vin.height * vin.pitch;
vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
- unsigned long modTimeMs = 0;
+ ULong modTimeMs = 0;
int32_t nLayer = 0;
MP4HintTrack hintTrack;
if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
&modTimeMs, outPtr, &dataLength, &nLayer) ||
!PVGetHintTrack(mHandle, &hintTrack)) {
- ALOGE("Failed to encode frame or get hink track at frame %lld",
+ ALOGE("Failed to encode frame or get hink track at frame %" PRId64,
mNumInputFrames);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
@@ -742,7 +722,6 @@ void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
- releaseGrallocData(srcBuffer);
notifyEmptyBufferDone(inHeader);
outQueue.erase(outQueue.begin());
@@ -757,47 +736,6 @@ void SoftMPEG4Encoder::onQueueFilled(OMX_U32 portIndex) {
}
}
-OMX_ERRORTYPE SoftMPEG4Encoder::getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index) {
- if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
- *(int32_t*)index = kStoreMetaDataExtensionIndex;
- return OMX_ErrorNone;
- }
- return OMX_ErrorUndefined;
-}
-
-uint8_t *SoftMPEG4Encoder::extractGrallocData(void *data, buffer_handle_t *buffer) {
- OMX_U32 type = *(OMX_U32*)data;
- status_t res;
- if (type != kMetadataBufferTypeGrallocSource) {
- ALOGE("Data passed in with metadata mode does not have type "
- "kMetadataBufferTypeGrallocSource (%d), has type %ld instead",
- kMetadataBufferTypeGrallocSource, type);
- return NULL;
- }
- buffer_handle_t imgBuffer = *(buffer_handle_t*)((uint8_t*)data + 4);
-
- const Rect rect(mVideoWidth, mVideoHeight);
- uint8_t *img;
- res = GraphicBufferMapper::get().lock(imgBuffer,
- GRALLOC_USAGE_HW_VIDEO_ENCODER,
- rect, (void**)&img);
- if (res != OK) {
- ALOGE("%s: Unable to lock image buffer %p for access", __FUNCTION__,
- imgBuffer);
- return NULL;
- }
-
- *buffer = imgBuffer;
- return img;
-}
-
-void SoftMPEG4Encoder::releaseGrallocData(buffer_handle_t buffer) {
- if (mStoreMetaDataInBuffers) {
- GraphicBufferMapper::get().unlock(buffer);
- }
-}
-
} // namespace android
android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
index cc4ea8f..b0605b4 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.h
@@ -19,7 +19,7 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/foundation/ABase.h>
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoEncoderOMXComponent.h"
#include "mp4enc_api.h"
@@ -27,7 +27,7 @@ namespace android {
struct MediaBuffer;
-struct SoftMPEG4Encoder : public SimpleSoftOMXComponent {
+struct SoftMPEG4Encoder : public SoftVideoEncoderOMXComponent {
SoftMPEG4Encoder(
const char *name,
const OMX_CALLBACKTYPE *callbacks,
@@ -43,11 +43,6 @@ struct SoftMPEG4Encoder : public SimpleSoftOMXComponent {
virtual void onQueueFilled(OMX_U32 portIndex);
- // Override SoftOMXComponent methods
-
- virtual OMX_ERRORTYPE getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index);
-
protected:
virtual ~SoftMPEG4Encoder();
@@ -56,10 +51,6 @@ private:
kNumBuffers = 2,
};
- enum {
- kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1
- };
-
// OMX input buffer's timestamp and flags
typedef struct {
int64_t mTimeUs;
@@ -90,9 +81,6 @@ private:
OMX_ERRORTYPE initEncoder();
OMX_ERRORTYPE releaseEncoder();
- uint8_t* extractGrallocData(void *data, buffer_handle_t *buffer);
- void releaseGrallocData(buffer_handle_t buffer);
-
DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4Encoder);
};
diff --git a/media/libstagefright/codecs/m4v_h263/enc/include/mp4enc_api.h b/media/libstagefright/codecs/m4v_h263/enc/include/mp4enc_api.h
index a54fd8b..9451479 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/include/mp4enc_api.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/include/mp4enc_api.h
@@ -29,7 +29,7 @@ typedef int Int;
typedef unsigned short UShort;
typedef short Short;
typedef unsigned int Bool;
-typedef unsigned long ULong;
+typedef uint32_t ULong;
#define PV_CODEC_INIT 0
#define PV_CODEC_STOP 1
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/dct.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/dct.cpp
index fa50eeb..fa4ae23 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/dct.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/dct.cpp
@@ -250,7 +250,7 @@ extern "C"
out[40] = k4 ; /* row 5 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
@@ -455,7 +455,7 @@ extern "C"
out[8] = k5 ; /* row 1 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
@@ -635,7 +635,7 @@ extern "C"
out[8] = k5 ; /* row 1 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
@@ -846,7 +846,7 @@ extern "C"
out[40] = k4 ; /* row 5 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
@@ -1033,7 +1033,7 @@ extern "C"
out[8] = k5 ; /* row 1 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
@@ -1195,7 +1195,7 @@ extern "C"
out[8] = k5 ; /* row 1 */
out++;
}
- while ((UInt)out < (UInt)dst) ;
+ while ((uintptr_t)out < (uintptr_t)dst) ;
return ;
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
index 6fd41c3..0ad39a6 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
@@ -572,7 +572,7 @@ Int Sad8x8(UChar *cur, UChar *prev, Int width)
cur2 = cur2 & (mask << 8); /* mask first and third bytes */
sum2 = sum2 + ((UInt)cur2 >> 8);
}
- while ((UInt)curInt < (UInt)end);
+ while ((uintptr_t)curInt < (uintptr_t)end);
cur1 = sum4 - (sum2 << 8); /* get even-sum */
cur1 = cur1 + sum2; /* add 16 bit even-sum and odd-sum*/
@@ -611,7 +611,7 @@ Int getBlockSum(UChar *cur, Int width)
load2 = load2 & (mask << 8); /* even bytes */
sum2 += ((UInt)load2 >> 8); /* sum even bytes, 16 bit */
}
- while ((UInt)curInt < (UInt)end);
+ while ((uintptr_t)curInt < (uintptr_t)end);
load1 = sum4 - (sum2 << 8); /* get even-sum */
load1 = load1 + sum2; /* add 16 bit even-sum and odd-sum*/
load1 = load1 + (load1 << 16); /* add upper and lower 16 bit sum */
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/motion_comp.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/motion_comp.cpp
index b81d278..9a967c2 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/motion_comp.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/motion_comp.cpp
@@ -363,7 +363,7 @@ Int GetPredAdvBy0x0(
/* initialize offset to adjust pixel counter */
/* the next row; full-pel resolution */
- tmp = (ULong)prev & 0x3;
+ tmp = (uintptr_t)prev & 0x3;
if (tmp == 0) /* word-aligned */
{
@@ -466,7 +466,7 @@ Int GetPredAdvBy0x1(
/* Branch based on pixel location (half-pel or full-pel) for x and y */
rec -= 12; /* preset */
- tmp = (ULong)prev & 3;
+ tmp = (uintptr_t)prev & 3;
mask = 254;
mask |= (mask << 8);
mask |= (mask << 16); /* 0xFEFEFEFE */
@@ -791,7 +791,7 @@ Int GetPredAdvBy1x0(
/* Branch based on pixel location (half-pel or full-pel) for x and y */
rec -= 12; /* preset */
- tmp = (ULong)prev & 3;
+ tmp = (uintptr_t)prev & 3;
mask = 254;
mask |= (mask << 8);
mask |= (mask << 16); /* 0xFEFEFEFE */
@@ -1140,7 +1140,7 @@ Int GetPredAdvBy1x1(
mask |= (mask << 8);
mask |= (mask << 16); /* 0x3f3f3f3f */
- tmp = (ULong)prev & 3;
+ tmp = (uintptr_t)prev & 3;
rec -= 4; /* preset */
@@ -1959,7 +1959,7 @@ void PutSkippedBlock(UChar *rec, UChar *prev, Int lx)
dst += offset;
src += offset;
}
- while ((UInt)src < (UInt)end);
+ while ((uintptr_t)src < (uintptr_t)end);
return ;
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/mp4def.h b/media/libstagefright/codecs/m4v_h263/enc/src/mp4def.h
index 0d5a3e8..2d44482 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/mp4def.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/mp4def.h
@@ -60,7 +60,7 @@ typedef unsigned short UShort;
typedef short Short;
typedef short int SInt;
typedef unsigned int Bool;
-typedef unsigned long ULong;
+typedef uint32_t ULong;
typedef void Void;
#define PV_CODEC_INIT 0
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/sad_inline.h b/media/libstagefright/codecs/m4v_h263/enc/src/sad_inline.h
index ba77dfd..b865f23 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/sad_inline.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/sad_inline.h
@@ -85,7 +85,7 @@ extern "C"
x9 = 0x80808080; /* const. */
- x8 = (uint32)ref & 0x3;
+ x8 = (uintptr_t)ref & 0x3;
if (x8 == 3)
goto SadMBOffset3;
if (x8 == 2)
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 135c715..8284490 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -50,6 +50,8 @@ LOCAL_C_INCLUDES := \
LOCAL_CFLAGS := \
-DOSCL_UNUSED_ARG=
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_mp3dec
LOCAL_ARM_MODE := arm
@@ -69,6 +71,8 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/src \
$(LOCAL_PATH)/include
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libstagefright libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 7c382fb..5396022 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -49,6 +49,8 @@ SoftMP3::SoftMP3(
mNumChannels(2),
mSamplingRate(44100),
mSignalledError(false),
+ mSawInputEos(false),
+ mSignalledOutputEos(false),
mOutputPortSettingsChange(NONE) {
initPorts();
initDecoder();
@@ -144,6 +146,23 @@ OMX_ERRORTYPE SoftMP3::internalGetParameter(
return OMX_ErrorNone;
}
+ case OMX_IndexParamAudioMp3:
+ {
+ OMX_AUDIO_PARAM_MP3TYPE *mp3Params =
+ (OMX_AUDIO_PARAM_MP3TYPE *)params;
+
+ if (mp3Params->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ mp3Params->nChannels = mNumChannels;
+ mp3Params->nBitRate = 0 /* unknown */;
+ mp3Params->nSampleRate = mSamplingRate;
+ // other fields are encoder-only
+
+ return OMX_ErrorNone;
+ }
+
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
@@ -186,7 +205,7 @@ OMX_ERRORTYPE SoftMP3::internalSetParameter(
}
}
-void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
+void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
@@ -194,48 +213,36 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+ BufferInfo *inInfo = NULL;
+ OMX_BUFFERHEADERTYPE *inHeader = NULL;
+ if (!inQueue.empty()) {
+ inInfo = *inQueue.begin();
+ inHeader = inInfo->mHeader;
+ }
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+ outHeader->nFlags = 0;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- if (!mIsFirst) {
- // pad the end of the stream with 529 samples, since that many samples
- // were trimmed off the beginning when decoding started
- outHeader->nFilledLen =
- kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
-
- memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
- } else {
- // Since we never discarded frames from the start, we won't have
- // to add any padding at the end either.
- outHeader->nFilledLen = 0;
+ if (inHeader) {
+ if (inHeader->nOffset == 0 && inHeader->nFilledLen) {
+ mAnchorTimeUs = inHeader->nTimeStamp;
+ mNumFramesOutput = 0;
}
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ mSawInputEos = true;
+ }
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- return;
- }
+ mConfig->pInputBuffer =
+ inHeader->pBuffer + inHeader->nOffset;
- if (inHeader->nOffset == 0) {
- mAnchorTimeUs = inHeader->nTimeStamp;
- mNumFramesOutput = 0;
+ mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
+ } else {
+ mConfig->pInputBuffer = NULL;
+ mConfig->inputBufferCurrentLength = 0;
}
-
- mConfig->pInputBuffer =
- inHeader->pBuffer + inHeader->nOffset;
-
- mConfig->inputBufferCurrentLength = inHeader->nFilledLen;
mConfig->inputBufferMaxLength = 0;
mConfig->inputBufferUsedLength = 0;
@@ -262,13 +269,28 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
}
- // This is recoverable, just ignore the current frame and
- // play silence instead.
- memset(outHeader->pBuffer,
- 0,
- mConfig->outputFrameSize * sizeof(int16_t));
-
- mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+ if (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR && mSawInputEos) {
+ if (!mIsFirst) {
+ // pad the end of the stream with 529 samples, since that many samples
+ // were trimmed off the beginning when decoding started
+ outHeader->nOffset = 0;
+ outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t);
+
+ memset(outHeader->pBuffer, 0, outHeader->nFilledLen);
+ }
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ mSignalledOutputEos = true;
+ } else {
+ // This is recoverable, just ignore the current frame and
+ // play silence instead.
+ memset(outHeader->pBuffer,
+ 0,
+ mConfig->outputFrameSize * sizeof(int16_t));
+
+ if (inHeader) {
+ mConfig->inputBufferUsedLength = inHeader->nFilledLen;
+ }
+ }
} else if (mConfig->samplingRate != mSamplingRate
|| mConfig->num_channels != mNumChannels) {
mSamplingRate = mConfig->samplingRate;
@@ -289,7 +311,7 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
outHeader->nFilledLen =
mConfig->outputFrameSize * sizeof(int16_t) - outHeader->nOffset;
- } else {
+ } else if (!mSignalledOutputEos) {
outHeader->nOffset = 0;
outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t);
}
@@ -298,23 +320,24 @@ void SoftMP3::onQueueFilled(OMX_U32 portIndex) {
mAnchorTimeUs
+ (mNumFramesOutput * 1000000ll) / mConfig->samplingRate;
- outHeader->nFlags = 0;
-
- CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
+ if (inHeader) {
+ CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
- inHeader->nOffset += mConfig->inputBufferUsedLength;
- inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
+ inHeader->nOffset += mConfig->inputBufferUsedLength;
+ inHeader->nFilledLen -= mConfig->inputBufferUsedLength;
- mNumFramesOutput += mConfig->outputFrameSize / mNumChannels;
- if (inHeader->nFilledLen == 0) {
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
}
+ mNumFramesOutput += mConfig->outputFrameSize / mNumChannels;
+
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
@@ -329,6 +352,9 @@ void SoftMP3::onPortFlushCompleted(OMX_U32 portIndex) {
// depend on fragments from the last one decoded.
pvmp3_InitDecoder(mConfig, mDecoderBuf);
mIsFirst = true;
+ mSignalledError = false;
+ mSawInputEos = false;
+ mSignalledOutputEos = false;
}
}
@@ -362,6 +388,8 @@ void SoftMP3::onReset() {
pvmp3_InitDecoder(mConfig, mDecoderBuf);
mIsFirst = true;
mSignalledError = false;
+ mSawInputEos = false;
+ mSignalledOutputEos = false;
mOutputPortSettingsChange = NONE;
}
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.h b/media/libstagefright/codecs/mp3dec/SoftMP3.h
index 4af91ea..f9e7b53 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.h
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.h
@@ -61,6 +61,8 @@ private:
bool mIsFirst;
bool mSignalledError;
+ bool mSawInputEos;
+ bool mSignalledOutputEos;
enum {
NONE,
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_dct_9_arm.s b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_dct_9_arm.s
deleted file mode 100644
index 3a6dd4f..0000000
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_dct_9_arm.s
+++ /dev/null
@@ -1,210 +0,0 @@
-; ------------------------------------------------------------------
-; Copyright (C) 1998-2009 PacketVideo
-;
-; Licensed under the Apache License, Version 2.0 (the "License");
-; you may not use this file except in compliance with the License.
-; You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-; express or implied.
-; See the License for the specific language governing permissions
-; and limitations under the License.
-; -------------------------------------------------------------------
-
-;
-;
-; Filename: pvmp3_dct_9.s
-;
-;------------------------------------------------------------------------------
-; REVISION HISTORY
-;
-;
-; Who: Date: MM/DD/YYYY
-; Description:
-;
-;------------------------------------------------------------------------------
-
- AREA |.drectve|, DRECTVE
-
- DCB "-defaultlib:coredll.lib "
- DCB "-defaultlib:corelibc.lib "
-
- IMPORT pvmp3_mdct_18 ; pvmp3_mdct_18.cpp
-
-;------------------------------------------------------------------------------
-
- AREA |.rdata|, DATA, READONLY
- % 4
-
-
-;------------------------------------------------------------------------------
-
- AREA |.text|, CODE, READONLY
-
-
-;------------------------------------------------------------------------------
-
- EXPORT |pvmp3_dct_9|
-
-|pvmp3_dct_9| PROC
- stmfd sp!,{r4-r10,lr}
- ldr r2, [r0, #0x20]
- ldr r3, [r0]
- ldr r12,[r0, #4]
- add r1,r2,r3
- sub lr,r2,r3
- ldr r3,[r0, #0x1c]
- ldr r4,[r0, #0x18]
- add r2,r3,r12
- ldr r5,[r0,#8]
- sub r3,r3,r12
- add r12,r4,r5
- sub r4,r4,r5
- ldr r5,[r0, #0x14]
- ldr r7,[r0, #0xc]
- ldr r9,[r0, #0x10]
- add r6,r5,r7
- sub r5,r5,r7
- add r7,r1,r12
- add r8,r9,r2
- add r7,r7,r6
- add r10,r7,r8
- rsb r7,r8,r7,asr #1
- str r7,[r0, #0x18]
- rsb r2,r9,r2,asr #1
- str r10,[r0]
- ldr r11,|cos_2pi_9|
- rsb r7,r2,#0
-
- mov r9,r1,lsl #1
- mov r1,r9 ;;;;;; !!!!!!
- mov r8,r7
-
-; vec[4] = fxp_mac32_Q32( vec[4], tmp0<<1, cos_2pi_9);
-
- smlal r1,r8,r11,r9
- ldr r10,|cos_4pi_9|
- ldr r11,|cos_pi_9|
-
-; vec[8] = fxp_mac32_Q32( vec[8], tmp0<<1, cos_4pi_9);
-
- smlal r1,r7,r10,r9
-
-
-
-; vec[2] = fxp_mac32_Q32( vec[2], tmp0<<1, cos_pi_9);
-
- smlal r9,r2,r11,r9
- mov r1,r12,lsl #1
- rsb r9,r10,#0
- ldr r11,|cos_5pi_9|
-
- smlal r12,r2,r9,r1
-
-
-
-; vec[2] = fxp_mac32_Q32( vec[2], tmp2<<1, cos_5pi_9);
-
- ldr r9,|cos_2pi_9|
- mov r12,r1 ;;;;;; !!!!!!
- smlal r12,r8,r11,r1
-
-
-; vec[8] = fxp_mac32_Q32( vec[8], tmp2<<1, cos_2pi_9);
-
- smlal r1,r7,r9,r1
- mov r1,r6,lsl #1
- smlal r12,r7,r11,r1
- and r6,r10,r11,asr #14
- smlal r12,r8,r6,r1
- ldr r10,|cos_11pi_18|
- add r12,r11,r6
- smlal r1,r2,r12,r1
- ldr r9,|cos_8pi_9|
- str r2,[r0,#8]
- mov r1,r5,lsl #1
-
-; vec[8] = fxp_mac32_Q32( vec[8], tmp3<<1, cos_8pi_9);
-
- smull r2,r6,r9,r1
- str r7,[r0,#0x20]
- mov r2,r4,lsl #1
- ldr r7,|cos_13pi_18|
- smlal r12,r6,r10,r2
-
- mov r3,r3,lsl #1
-
-; vec[5] = fxp_mac32_Q32( vec[5], tmp8<<1, cos_13pi_18);
-
- smlal r12,r6,r7,r3
- add r4,r5,r4
- mov r12,lr,lsl #1
- sub lr,r4,lr
- ldr r7,|cos_17pi_18|
- str r8,[r0, #0x10]
- ldr r4,|cos_pi_6|
-
- mov lr,lr,lsl #1
-
-; vec[1] = fxp_mac32_Q32( vec[1], tmp8<<1, cos_17pi_18);
-
- smlal r8,r6,r7,r12
-
-; vec[3] = fxp_mul32_Q32((tmp5 + tmp6 - tmp8)<<1, cos_pi_6);
-
- smull r5,lr,r4,lr
- str r6,[r0, #4]
- str lr,[r0, #0xc]
-
-
-; vec[5] = fxp_mul32_Q32(tmp5<<1, cos_17pi_18);
- smull r5,lr,r7,r1
- rsb r6,r9,#0
-; vec[5] = fxp_mac32_Q32( vec[5], tmp6<<1, cos_7pi_18);
- smlal r5,lr,r6,r2
-; vec[5] = fxp_mac32_Q32( vec[5], tmp7<<1, cos_pi_6);
- smlal r5,lr,r4,r3
-; vec[5] = fxp_mac32_Q32( vec[5], tmp8<<1, cos_13pi_18);
- smlal r5,lr,r10,r12
- str lr,[r0, #0x14]
- rsb lr,r10,#0
-
-; vec[7] = fxp_mul32_Q32(tmp5<<1, cos_5pi_18);
- smull r5,r1,lr,r1
-; vec[7] = fxp_mac32_Q32( vec[7], tmp6<<1, cos_17pi_18);
- smlal r2,r1,r7,r2
-; vec[7] = fxp_mac32_Q32( vec[7], tmp7<<1, cos_pi_6);
- smlal r3,r1,r4,r3
-; vec[7] = fxp_mac32_Q32( vec[7], tmp8<<1, cos_11pi_18);
- smlal r12,r1,r9,r12
- str r1,[r0, #0x1c]
- ldmfd sp!,{r4-r10,pc}
-|cos_2pi_9|
- DCD 0x620dbe80
-|cos_4pi_9|
- DCD 0x163a1a80
-|cos_pi_9|
- DCD 0x7847d900
-|cos_5pi_9|
- DCD 0x87b82700
-|cos_8pi_9|
- DCD 0xd438af00
-|cos_11pi_18|
- DCD 0xadb92280
-|cos_13pi_18|
- DCD 0x91261480
-|cos_17pi_18|
- DCD 0x81f1d200
-|cos_pi_6|
- DCD 0x6ed9eb80
- ENDP
-
-
-
-
-
- END
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_arm.s b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_arm.s
deleted file mode 100644
index 9401d8c..0000000
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_arm.s
+++ /dev/null
@@ -1,369 +0,0 @@
-; ------------------------------------------------------------------
-; Copyright (C) 1998-2009 PacketVideo
-;
-; Licensed under the Apache License, Version 2.0 (the "License");
-; you may not use this file except in compliance with the License.
-; You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-; express or implied.
-; See the License for the specific language governing permissions
-; and limitations under the License.
-; -------------------------------------------------------------------
-
-;
-;
-; Filename: pvmp3_dct_18.s
-;
-;------------------------------------------------------------------------------
-; REVISION HISTORY
-;
-;
-; Who: Date: MM/DD/YYYY
-; Description:
-;
-;------------------------------------------------------------------------------
-
- EXPORT pvmp3_mdct_18
-
- IMPORT ||Lib$$Request$$armlib|| [WEAK]
- IMPORT ||Lib$$Request$$cpplib|| [WEAK]
- IMPORT pvmp3_dct_9
-
-
-;------------------------------------------------------------------------------
-
- AREA |.text|, CODE, READONLY, ALIGN=2
-
-
-;------------------------------------------------------------------------------
-
-|pvmp3_mdct_18| PROC
- stmfd sp!,{r4-r10,lr}
- mov r7,r2
- ldr r2,table
- mov r6,r1
- add r3,r2,#0x24
- add r12,r3,#0x44
- add r1,r0,#0x44
- mov r5,r0
-
-; for ( i=9; i!=0; i--)
-; {
-
- mov r4,#9
-Loop_1
-
-; tmp = *(pt_vec);
-; tmp1 = *(pt_vec_o);
-
- ldr lr,[r0] ;; tmp == lr
- ldr r8,[r3],#4 ;; tmp1 == r8
-
-; tmp = fxp_mul32_Q32( tmp<<1, *(pt_cos++ ));
-; tmp1 = fxp_mul32_Q27( tmp1, *(pt_cos_x--));
-
- mov lr,lr,lsl #1
- smull r10,lr,r8,lr
- ldr r8,[r12],#-4
- ldr r9,[r1]
- subs r4,r4,#1
- smull r9,r10,r8,r9
- mov r8,r9,lsr #27
- add r8,r8,r10,lsl #5
-
-; *(pt_vec++) = tmp + tmp1 ;
-; *(pt_vec_o--) = fxp_mul32_Q28( (tmp - tmp1), *(pt_cos_split++));
-
- add r9,lr,r8
- sub r8,lr,r8
- ldr lr,[r2],#4
- str r9,[r0],#4
- smull r8,r9,lr,r8
- mov lr,r8,lsr #28
- add lr,lr,r9,lsl #4
- str lr,[r1],#-4
- bne Loop_1
-
-; }
-
- mov r0,r5 ;; r0 = vec
- bl pvmp3_dct_9
- add r0,r5,#0x24 ;; r0 = &vec[9]
- bl pvmp3_dct_9
-
- ldr r0,[r5,#0x20]
- ldr r2,[r5,#0x40]
- str r0,[r5,#0x40]
- ldr r0,[r5,#0x1c]
- ldr r3,[r5,#0x38]
- str r0,[r5,#0x38]
- ldr r1,[r5,#0x18]
- ldr r0,[r5,#0x30]
- str r1,[r5,#0x30]
- ldr r12,[r5,#0x14]
- ldr r1,[r5,#0x28]
- str r12,[r5,#0x28]
- ldr r12,[r5,#0x10]
- str r12,[r5,#0x20]
- ldr r12,[r5,#0xc]
- str r12,[r5,#0x18]
- ldr r12,[r5,#8]
- str r12,[r5,#0x10]
- ldr r12,[r5,#4]
- str r12,[r5,#8]
- ldr r12,[r5,#0x24]
- sub r12,r12,r1
- str r12,[r5,#4]
- ldr r12,[r5,#0x2c]
- sub r1,r12,r1
- str r1,[r5,#0xc]
- sub r1,r12,r0
- str r1,[r5,#0x14]
- ldr r1,[r5,#0x34]
- sub r0,r1,r0
- str r0,[r5,#0x1c]
- sub r0,r1,r3
- str r0,[r5,#0x24]
- ldr r1,[r5,#0x3c]
- sub r3,r1,r3
- sub r1,r1,r2
- str r1,[r5,#0x34]
- str r3,[r5,#0x2c]
- ldr r1,[r5,#0x44]
- sub r1,r1,r2
- str r1,[r5,#0x3c]
- ldr r12,[r5,#0]
-
-Loop_2
- add r1,r5,r4,lsl #2
- ldr r2,[r1,#0x28]
- ldr r3,[r6,r4,lsl #2]
- add r0,r0,r2
- str r0,[r1,#0x28]
- ldr lr,[r7,r4,lsl #2]
- ldr r1,[r1,#4]
- smlal r0,r3,lr,r0
- mov r0,r2
- add r2,r12,r1
- rsb r2,r2,#0
- str r3,[r5,r4,lsl #2]
- str r2,[r6,r4,lsl #2]
- add r4,r4,#1
- cmp r4,#6
- mov r12,r1
-
- blt Loop_2
-
- ldr r1,[r5,#0x40]
- ldr r2,[r6,#0x18]
- add r3,r0,r1
- str r3,[r5,#0x40]
- ldr lr,[r7,r4,lsl #2]
- mov r3,r3,lsl #1
- ldr r0,[r5,#0x1c]
- smlal r3,r2,lr,r3
- add r3,r12,r0
- str r2,[r5,#0x18]
- ldr r2,[r6,#0x1c]
- rsb r3,r3,#0
- str r3,[r6,#0x18]
- ldr r3,[r5,#0x20]
- add r0,r3,r0
- rsb r0,r0,#0
- str r0,[r6,#0x1c]
- ldr r3,[r5,#0x44]
- ldr r0,[r6,#0x20]
- add r3,r3,r1
- mov r1,r2
- ldr r10,[r7,#0x1c]
- mov r2,r3,lsl #1
- smlal r12,r1,r10,r2
- str r1,[r5,#0x1c]
- ldr r1,[r5,#0x20]
- ldr r3,[r5,#0x24]
- add r1,r1,r3
- rsb r1,r1,#0
- str r1,[r6,#0x20]
- ldr r1,[r5,#0x44]
- ldr r3,[r7,#0x20]
- mov r1,r1,lsl #1
- smlal r12,r0,r3,r1
- ldr lr,[r7,#0x24]
- ldr r3,[r6,#0x24]
- str r0,[r5,#0x20]
- smlal r1,r3,lr,r1
- ldr r0,[r6,#0x40]
- ldr r12,[r6,#0x44]
- str r3,[r5,#0x24]
- ldr r1,[r5,#0x28]
- ldr r3,[r7,#0x44]
- mov r1,r1,lsl #1
- smlal r1,r12,r3,r1
- ldr r1,[r5,#0x40]
- str r12,[r5,#0x44]
- rsb r8,r1,#0
- str r8,[r5,#0x28]
- ldr r1,[r5,#0x2c]
- ldr r3,[r7,#0x40]
- mov r1,r1,lsl #1
- smlal r1,r0,r3,r1
- str r0,[r5,#0x40]
- ldr r0,[r5,#0x3c]
- ldr r1,[r6,#0x38]
- ldr r3,[r6,#0x3c]
- rsb r9,r0,#0
- str r9,[r5,#0x2c]
- ldr r0,[r5,#0x30]
- ldr r12,[r7,#0x3c]
- mov r0,r0,lsl #1
- smlal r0,r3,r12,r0
- str r3,[r5,#0x3c]
- ldr r0,[r5,#0x38]
- rsb r0,r0,#0
- str r0,[r5,#0x30]
- ldr r3,[r5,#0x34]
- ldr r12,[r7,#0x38]
- mov r3,r3,lsl #1
- smlal r3,r1,r12,r3
- mov r0,r0,lsl #1
- str r1,[r5,#0x38]
- ldr r4,[r7,#0x34]
- ldr r1,[r6,#0x34]
- ldr r3,[r6,#0x30]
- smlal r0,r1,r4,r0
- ldr r12,[r6,#0x2c]
- ldr lr,[r6,#0x28]
- str r1,[r5,#0x34]
- ldr r1,[r7,#0x30]
- mov r0,r9,lsl #1
- smlal r0,r3,r1,r0
- mov r0,r8,lsl #1
- ldr r1,[r7,#0x2c]
- str r3,[r5,#0x30]
- smlal r0,r12,r1,r0
- ldr r0,[r7,#0x28]
- str r12,[r5,#0x2c]
- smlal r2,lr,r0,r2
- str lr,[r5,#0x28]
- ldr r1,[r6,#4]
- ldr r12,[r7,#0x48]
- mov r2,r1,lsl #1
- ldr r1,[r6,#0x20]
- ldr r0,[r6]
- mov r1,r1,lsl #1
- smull r4,lr,r12,r1
- ldr r3,[r6,#0x1c]
- str lr,[r6]
- ldr r12,[r7,#0x4c]
- mov r3,r3,lsl #1
- smull r4,lr,r12,r3
- mov r0,r0,lsl #1
- ldr r12,[r7,#0x64]
- str lr,[r6,#4]
- smull r4,lr,r12,r2
- ldr r12,[r7,#0x68]
- str lr,[r6,#0x1c]
- smull r4,lr,r12,r0
- ldr r12,[r7,#0x6c]
- str lr,[r6,#0x20]
- smull lr,r0,r12,r0
- ldr r12,[r7,#0x70]
- str r0,[r6,#0x24]
- smull r0,r2,r12,r2
- ldr r0,[r7,#0x88]
- str r2,[r6,#0x28]
- smull r3,r2,r0,r3
- ldr r0,[r7,#0x8c]
- str r2,[r6,#0x40]
- smull r2,r1,r0,r1
- str r1,[r6,#0x44]
- ldr r0,[r6,#0x18]
- ldr lr,[r7,#0x50]
- mov r1,r0,lsl #1
- ldr r0,[r6,#0x14]
- smull r5,r4,lr,r1
- ldr r12,[r6,#0x10]
- mov r3,r0,lsl #1
- ldr r0,[r6,#0xc]
- mov r12,r12,lsl #1
- mov r2,r0,lsl #1
- ldr r0,[r6,#8]
- str r4,[r6,#8]
- ldr lr,[r7,#0x54]
- mov r0,r0,lsl #1
- smull r5,r4,lr,r3
- ldr lr,[r7,#0x58]
- str r4,[r6,#0xc]
- smull r5,r4,lr,r12
- ldr lr,[r7,#0x5c]
- str r4,[r6,#0x10]
- smull r5,r4,lr,r2
- ldr lr,[r7,#0x60]
- str r4,[r6,#0x14]
- smull r5,r4,lr,r0
- ldr lr,[r7,#0x74]
- str r4,[r6,#0x18]
- smull r4,r0,lr,r0
- ldr lr,[r7,#0x78]
- str r0,[r6,#0x2c]
- smull r0,r2,lr,r2
- ldr r0,[r7,#0x7c]
- str r2,[r6,#0x30]
- smull r12,r2,r0,r12
- ldr r0,[r7,#0x80]
- str r2,[r6,#0x34]
- smull r3,r2,r0,r3
- ldr r0,[r7,#0x84]
- str r2,[r6,#0x38]
- smull r2,r1,r0,r1
- str r1,[r6,#0x3c]
- ldmfd sp!,{r4-r10,pc}
-table
- DCD ||.constdata$1||
- ENDP
-
-;------------------------------------------------------------------------------
-
- AREA |.constdata|, DATA, READONLY, ALIGN=2
-
-;------------------------------------------------------------------------------
-
-||.constdata$1||
-cosTerms_dct18
- DCD 0x0807d2b0
- DCD 0x08483ee0
- DCD 0x08d3b7d0
- DCD 0x09c42570
- DCD 0x0b504f30
- DCD 0x0df29440
- DCD 0x12edfb20
- DCD 0x1ee8dd40
- DCD 0x5bca2a00
-cosTerms_1_ov_cos_phi
- DCD 0x400f9c00
- DCD 0x408d6080
- DCD 0x418dcb80
- DCD 0x431b1a00
- DCD 0x4545ea00
- DCD 0x48270680
- DCD 0x4be25480
- DCD 0x50ab9480
- DCD 0x56ce4d80
- DCD 0x05ebb630
- DCD 0x06921a98
- DCD 0x0771d3a8
- DCD 0x08a9a830
- DCD 0x0a73d750
- DCD 0x0d4d5260
- DCD 0x127b1ca0
- DCD 0x1ea52b40
- DCD 0x5bb3cc80
-
-
-
- END
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_wm.asm b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_wm.asm
deleted file mode 100644
index 5be75d4..0000000
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_mdct_18_wm.asm
+++ /dev/null
@@ -1,366 +0,0 @@
-; ------------------------------------------------------------------
-; Copyright (C) 1998-2009 PacketVideo
-;
-; Licensed under the Apache License, Version 2.0 (the "License");
-; you may not use this file except in compliance with the License.
-; You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-; express or implied.
-; See the License for the specific language governing permissions
-; and limitations under the License.
-; -------------------------------------------------------------------
-
-;
-;
-; Filename: pvmp3_dct_18.s
-;
-;------------------------------------------------------------------------------
-; REVISION HISTORY
-;
-;
-; Who: Date: MM/DD/YYYY
-; Description:
-;
-;------------------------------------------------------------------------------
-
- EXPORT |pvmp3_mdct_18|
-
- IMPORT pvmp3_dct_9
-
-
-;------------------------------------------------------------------------------
-
- AREA |.text|, CODE, READONLY, ALIGN=2
-
-
-;------------------------------------------------------------------------------
-
-|pvmp3_mdct_18| PROC
- stmfd sp!,{r4-r10,lr}
- mov r7,r2
- ldr r2,table
- mov r6,r1
- add r3,r2,#0x24
- add r12,r3,#0x44
- add r1,r0,#0x44
- mov r5,r0
-
-; for ( i=9; i!=0; i--)
-; {
-
- mov r4,#9
-Loop_1
-
-; tmp = *(pt_vec);
-; tmp1 = *(pt_vec_o);
-
- ldr lr,[r0] ;; tmp == lr
- ldr r8,[r3],#4 ;; tmp1 == r8
-
-; tmp = fxp_mul32_Q32( tmp<<1, *(pt_cos++ ));
-; tmp1 = fxp_mul32_Q27( tmp1, *(pt_cos_x--));
-
- mov lr,lr,lsl #1
- smull r10,lr,r8,lr
- ldr r8,[r12],#-4
- ldr r9,[r1]
- subs r4,r4,#1
- smull r9,r10,r8,r9
- mov r8,r9,lsr #27
- add r8,r8,r10,lsl #5
-
-; *(pt_vec++) = tmp + tmp1 ;
-; *(pt_vec_o--) = fxp_mul32_Q28( (tmp - tmp1), *(pt_cos_split++));
-
- add r9,lr,r8
- sub r8,lr,r8
- ldr lr,[r2],#4
- str r9,[r0],#4
- smull r8,r9,lr,r8
- mov lr,r8,lsr #28
- add lr,lr,r9,lsl #4
- str lr,[r1],#-4
- bne Loop_1
-
-; }
-
- mov r0,r5 ;; r0 = vec
- bl pvmp3_dct_9
- add r0,r5,#0x24 ;; r0 = &vec[9]
- bl pvmp3_dct_9
-
- ldr r0,[r5,#0x20]
- ldr r2,[r5,#0x40]
- str r0,[r5,#0x40]
- ldr r0,[r5,#0x1c]
- ldr r3,[r5,#0x38]
- str r0,[r5,#0x38]
- ldr r1,[r5,#0x18]
- ldr r0,[r5,#0x30]
- str r1,[r5,#0x30]
- ldr r12,[r5,#0x14]
- ldr r1,[r5,#0x28]
- str r12,[r5,#0x28]
- ldr r12,[r5,#0x10]
- str r12,[r5,#0x20]
- ldr r12,[r5,#0xc]
- str r12,[r5,#0x18]
- ldr r12,[r5,#8]
- str r12,[r5,#0x10]
- ldr r12,[r5,#4]
- str r12,[r5,#8]
- ldr r12,[r5,#0x24]
- sub r12,r12,r1
- str r12,[r5,#4]
- ldr r12,[r5,#0x2c]
- sub r1,r12,r1
- str r1,[r5,#0xc]
- sub r1,r12,r0
- str r1,[r5,#0x14]
- ldr r1,[r5,#0x34]
- sub r0,r1,r0
- str r0,[r5,#0x1c]
- sub r0,r1,r3
- str r0,[r5,#0x24]
- ldr r1,[r5,#0x3c]
- sub r3,r1,r3
- sub r1,r1,r2
- str r1,[r5,#0x34]
- str r3,[r5,#0x2c]
- ldr r1,[r5,#0x44]
- sub r1,r1,r2
- str r1,[r5,#0x3c]
- ldr r12,[r5,#0]
-
-Loop_2
- add r1,r5,r4,lsl #2
- ldr r2,[r1,#0x28]
- ldr r3,[r6,r4,lsl #2]
- add r0,r0,r2
- str r0,[r1,#0x28]
- ldr lr,[r7,r4,lsl #2]
- ldr r1,[r1,#4]
- smlal r0,r3,lr,r0
- mov r0,r2
- add r2,r12,r1
- rsb r2,r2,#0
- str r3,[r5,r4,lsl #2]
- str r2,[r6,r4,lsl #2]
- add r4,r4,#1
- cmp r4,#6
- mov r12,r1
-
- blt Loop_2
-
- ldr r1,[r5,#0x40]
- ldr r2,[r6,#0x18]
- add r3,r0,r1
- str r3,[r5,#0x40]
- ldr lr,[r7,r4,lsl #2]
- mov r3,r3,lsl #1
- ldr r0,[r5,#0x1c]
- smlal r3,r2,lr,r3
- add r3,r12,r0
- str r2,[r5,#0x18]
- ldr r2,[r6,#0x1c]
- rsb r3,r3,#0
- str r3,[r6,#0x18]
- ldr r3,[r5,#0x20]
- add r0,r3,r0
- rsb r0,r0,#0
- str r0,[r6,#0x1c]
- ldr r3,[r5,#0x44]
- ldr r0,[r6,#0x20]
- add r3,r3,r1
- mov r1,r2
- ldr r10,[r7,#0x1c]
- mov r2,r3,lsl #1
- smlal r12,r1,r10,r2
- str r1,[r5,#0x1c]
- ldr r1,[r5,#0x20]
- ldr r3,[r5,#0x24]
- add r1,r1,r3
- rsb r1,r1,#0
- str r1,[r6,#0x20]
- ldr r1,[r5,#0x44]
- ldr r3,[r7,#0x20]
- mov r1,r1,lsl #1
- smlal r12,r0,r3,r1
- ldr lr,[r7,#0x24]
- ldr r3,[r6,#0x24]
- str r0,[r5,#0x20]
- smlal r1,r3,lr,r1
- ldr r0,[r6,#0x40]
- ldr r12,[r6,#0x44]
- str r3,[r5,#0x24]
- ldr r1,[r5,#0x28]
- ldr r3,[r7,#0x44]
- mov r1,r1,lsl #1
- smlal r1,r12,r3,r1
- ldr r1,[r5,#0x40]
- str r12,[r5,#0x44]
- rsb r8,r1,#0
- str r8,[r5,#0x28]
- ldr r1,[r5,#0x2c]
- ldr r3,[r7,#0x40]
- mov r1,r1,lsl #1
- smlal r1,r0,r3,r1
- str r0,[r5,#0x40]
- ldr r0,[r5,#0x3c]
- ldr r1,[r6,#0x38]
- ldr r3,[r6,#0x3c]
- rsb r9,r0,#0
- str r9,[r5,#0x2c]
- ldr r0,[r5,#0x30]
- ldr r12,[r7,#0x3c]
- mov r0,r0,lsl #1
- smlal r0,r3,r12,r0
- str r3,[r5,#0x3c]
- ldr r0,[r5,#0x38]
- rsb r0,r0,#0
- str r0,[r5,#0x30]
- ldr r3,[r5,#0x34]
- ldr r12,[r7,#0x38]
- mov r3,r3,lsl #1
- smlal r3,r1,r12,r3
- mov r0,r0,lsl #1
- str r1,[r5,#0x38]
- ldr r4,[r7,#0x34]
- ldr r1,[r6,#0x34]
- ldr r3,[r6,#0x30]
- smlal r0,r1,r4,r0
- ldr r12,[r6,#0x2c]
- ldr lr,[r6,#0x28]
- str r1,[r5,#0x34]
- ldr r1,[r7,#0x30]
- mov r0,r9,lsl #1
- smlal r0,r3,r1,r0
- mov r0,r8,lsl #1
- ldr r1,[r7,#0x2c]
- str r3,[r5,#0x30]
- smlal r0,r12,r1,r0
- ldr r0,[r7,#0x28]
- str r12,[r5,#0x2c]
- smlal r2,lr,r0,r2
- str lr,[r5,#0x28]
- ldr r1,[r6,#4]
- ldr r12,[r7,#0x48]
- mov r2,r1,lsl #1
- ldr r1,[r6,#0x20]
- ldr r0,[r6]
- mov r1,r1,lsl #1
- smull r4,lr,r12,r1
- ldr r3,[r6,#0x1c]
- str lr,[r6]
- ldr r12,[r7,#0x4c]
- mov r3,r3,lsl #1
- smull r4,lr,r12,r3
- mov r0,r0,lsl #1
- ldr r12,[r7,#0x64]
- str lr,[r6,#4]
- smull r4,lr,r12,r2
- ldr r12,[r7,#0x68]
- str lr,[r6,#0x1c]
- smull r4,lr,r12,r0
- ldr r12,[r7,#0x6c]
- str lr,[r6,#0x20]
- smull lr,r0,r12,r0
- ldr r12,[r7,#0x70]
- str r0,[r6,#0x24]
- smull r0,r2,r12,r2
- ldr r0,[r7,#0x88]
- str r2,[r6,#0x28]
- smull r3,r2,r0,r3
- ldr r0,[r7,#0x8c]
- str r2,[r6,#0x40]
- smull r2,r1,r0,r1
- str r1,[r6,#0x44]
- ldr r0,[r6,#0x18]
- ldr lr,[r7,#0x50]
- mov r1,r0,lsl #1
- ldr r0,[r6,#0x14]
- smull r5,r4,lr,r1
- ldr r12,[r6,#0x10]
- mov r3,r0,lsl #1
- ldr r0,[r6,#0xc]
- mov r12,r12,lsl #1
- mov r2,r0,lsl #1
- ldr r0,[r6,#8]
- str r4,[r6,#8]
- ldr lr,[r7,#0x54]
- mov r0,r0,lsl #1
- smull r5,r4,lr,r3
- ldr lr,[r7,#0x58]
- str r4,[r6,#0xc]
- smull r5,r4,lr,r12
- ldr lr,[r7,#0x5c]
- str r4,[r6,#0x10]
- smull r5,r4,lr,r2
- ldr lr,[r7,#0x60]
- str r4,[r6,#0x14]
- smull r5,r4,lr,r0
- ldr lr,[r7,#0x74]
- str r4,[r6,#0x18]
- smull r4,r0,lr,r0
- ldr lr,[r7,#0x78]
- str r0,[r6,#0x2c]
- smull r0,r2,lr,r2
- ldr r0,[r7,#0x7c]
- str r2,[r6,#0x30]
- smull r12,r2,r0,r12
- ldr r0,[r7,#0x80]
- str r2,[r6,#0x34]
- smull r3,r2,r0,r3
- ldr r0,[r7,#0x84]
- str r2,[r6,#0x38]
- smull r2,r1,r0,r1
- str r1,[r6,#0x3c]
- ldmfd sp!,{r4-r10,pc}
-table
- DCD cosTerms_dct18
- ENDP
-
-;------------------------------------------------------------------------------
-
- AREA |.constdata|, DATA, READONLY, ALIGN=2
-
-;------------------------------------------------------------------------------
-
-cosTerms_dct18
- DCD 0x0807d2b0
- DCD 0x08483ee0
- DCD 0x08d3b7d0
- DCD 0x09c42570
- DCD 0x0b504f30
- DCD 0x0df29440
- DCD 0x12edfb20
- DCD 0x1ee8dd40
- DCD 0x5bca2a00
-cosTerms_1_ov_cos_phi
- DCD 0x400f9c00
- DCD 0x408d6080
- DCD 0x418dcb80
- DCD 0x431b1a00
- DCD 0x4545ea00
- DCD 0x48270680
- DCD 0x4be25480
- DCD 0x50ab9480
- DCD 0x56ce4d80
- DCD 0x05ebb630
- DCD 0x06921a98
- DCD 0x0771d3a8
- DCD 0x08a9a830
- DCD 0x0a73d750
- DCD 0x0d4d5260
- DCD 0x127b1ca0
- DCD 0x1ea52b40
- DCD 0x5bb3cc80
-
-
-
- END
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_arm.s b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_arm.s
deleted file mode 100644
index abec599..0000000
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_arm.s
+++ /dev/null
@@ -1,237 +0,0 @@
-; ------------------------------------------------------------------
-; Copyright (C) 1998-2009 PacketVideo
-;
-; Licensed under the Apache License, Version 2.0 (the "License");
-; you may not use this file except in compliance with the License.
-; You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-; express or implied.
-; See the License for the specific language governing permissions
-; and limitations under the License.
-; -------------------------------------------------------------------
-
-;
-;
-; Filename: pvmp3_polyphase_filter_window.s
-;
-;------------------------------------------------------------------------------
-; REVISION HISTORY
-;
-;
-; Who: Date: MM/DD/YYYY
-; Description:
-;
-;------------------------------------------------------------------------------
-
- EXPORT pvmp3_polyphase_filter_window
-
- IMPORT ||Lib$$Request$$armlib|| [WEAK]
- IMPORT ||Lib$$Request$$cpplib|| [WEAK]
- IMPORT pqmfSynthWin
-
-
-
-;------------------------------------------------------------------------------
-
- AREA |.text|, CODE, READONLY, ALIGN=2
-
-
-;------------------------------------------------------------------------------
-
-|pvmp3_polyphase_filter_window| PROC
-
- stmfd sp!,{r0-r2,r4-r11,lr}
-
- sub sp,sp,#4
- ldr r2,[sp,#0xc]
- ldr r1,PolyPh_filter_coeff
-
- sub r2,r2,#1
- mov r10,#1
- str r2,[sp]
-
-; Accumulators r9, r11::> Initialization
-
-Loop_j
- mov r9, #0x20
- mov r11, #0x20
- mov r4, #0x10
-Loop_i
- add r2,r4,r10
- add r3,r0,r2,lsl #2
- sub r2,r4,r10
- ldr r5,[r3]
- ldr lr,[r1]
- add r12,r0,r2,lsl #2
- ldr r6,[r12,#0x780]
- smlal r2,r9,lr,r5
- smlal r2,r11,lr,r6
- ldr r2,[r1,#4]
- ldr r7,[r12,#0x80]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- sub r9,r9,r5
- ldr r5,[r1,#8]
- ldr r8,[r3,#0x700]
- add r4,r4,#0x200
- smlal r6,r9,r5,r7
- smull r6,r2,r5,r8
- ldr r5,[r1,#0xc]
- sub r11,r11,r2
- smlal r8,r9,r5,r8
- smlal r7,r11,r5,r7
- ldr r5,[r3,#0x100]
- ldr r2,[r1,#0x10]
- ldr r6,[r12,#0x680]
- smlal lr,r9,r2,r5
- smlal lr,r11,r2,r6
- ldr r2,[r1,#0x14]
- ldr r7,[r12,#0x180]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- ldr r6,[r1,#0x18]
- ldr r8,[r3,#0x600]
- sub r9,r9,r5
- smlal r5,r9,r6,r7
- smull r2,r5,r6,r8
- ldr r6,[r1,#0x1c]
- sub r11,r11,r5
- smlal r8,r9,r6,r8
- ldr r2,[r1,#0x20]
- ldr r5,[r3,#0x200]
- smlal r7,r11,r6,r7
- ldr r6,[r12,#0x580]
- smlal lr,r9,r2,r5
- smlal lr,r11,r2,r6
- ldr r2,[r1,#0x24]
- ldr r7,[r12,#0x280]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- ldr r6,[r1,#0x28]
- ldr r8,[r3,#0x500]
- sub r9,r9,r5
- smlal r5,r9,r6,r7
- smull r2,r5,r6,r8
- ldr r6,[r1,#0x2c]
- sub r11,r11,r5
-
- smlal r8,r9,r6,r8
- smlal r7,r11,r6,r7
- ldr r5,[r3,#0x300]
- ldr r8,[r1,#0x30]
- ldr r6,[r12,#0x480]
- smlal r7,r9,r8,r5
- smlal r7,r11,r8,r6
- ldr r8,[r1,#0x34]
- ldr r12,[r12,#0x380]
- smlal r5,r11,r8,r5
- smull r6,r5,r8,r6
- ldr r6,[r1,#0x38]
-
-
- ldr r3,[r3,#0x400]
- sub r9,r9,r5
- smlal r7,r9,r6,r12
- smull r8,r7,r6,r3
- cmp r4,#0x210
- sub r11,r11,r7
-
- ldr r2,[r1,#0x3c]
- add r1,r1,#0x40
- smlal r3,r9,r2,r3
- smlal r12,r11,r2,r12
-
- blt Loop_i
-
- mov r3,r9, asr #6
- mov r4,r3, asr #15
- teq r4,r3, asr #31
- ldr r12,LOW_16BITS
- ldr r2,[sp]
- eorne r3,r12,r3,asr #31
- ldr r4,[sp,#8]
- mov r2,r10,lsl r2
- add r4,r4,r2,lsl #1
- strh r3,[r4]
-
- mov r3,r11,asr #6
- mov r4,r3,asr #15
- teq r4,r3,asr #31
- eorne r3,r12,r3,asr #31
- ldr r12,[sp,#0xc]
- ldr r11,[sp,#8]
- rsb r2,r2,r12,lsl #5
- add r2,r11,r2,lsl #1
- strh r3,[r2]
-
- add r10,r10,#1
- cmp r10,#0x10
- blt Loop_j
-
-; Accumulators r4, r5 Initialization
-
- mov r4,#0x20
- mov r5,#0x20
- mov r3,#0x10
-PolyPh_filter_loop2
- add r2,r0,r3,lsl #2
- ldr r12,[r2]
- ldr r8,[r1]
- ldr r6,[r2,#0x80]
- smlal r12,r4,r8,r12
- ldr r12,[r1,#4]
- ldr r7,[r2,#0x40]
- smlal r6,r4,r12,r6
-
- ldr r12,[r1,#8]
- ldr r6,[r2,#0x180]
- smlal r7,r5,r12,r7
- ldr r12,[r2,#0x100]
- ldr r7,[r1,#0xc]
- ldr r2,[r2,#0x140]
- smlal r12,r4,r7,r12
- ldr r12,[r1,#0x10]
- add r3,r3,#0x80
- smlal r6,r4,r12,r6
- ldr r6,[r1,#0x14]
- cmp r3,#0x210
- smlal r2,r5,r6,r2
- add r1,r1,#0x18
-
- blt PolyPh_filter_loop2
- mov r0,r4,asr #6
- mov r2,r0,asr #15
- teq r2,r0,asr #31
- ldrne r12,LOW_16BITS
- ldr r1,[sp,#8]
- eorne r0,r12,r0,asr #31
- strh r0,[r1,#0]
- mov r0,r5,asr #6
- mov r2,r0,asr #15
- teq r2,r0,asr #31
- ldrne r12,LOW_16BITS
- ldr r2,[sp]
- mov r1,#0x10
- eorne r0,r12,r0,asr #31
- ldr r12,[sp,#8]
- mov r1,r1,lsl r2
- add r1,r12,r1,lsl #1
- strh r0,[r1]
- add sp,sp,#0x10
- ldmfd sp!,{r4-r11,pc}
-
-
-PolyPh_filter_coeff
- DCD pqmfSynthWin
-LOW_16BITS
- DCD 0x00007fff
-
- ENDP
-
-
- END
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s
index b74c849..1140ed7 100644
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s
+++ b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_gcc.s
@@ -35,6 +35,7 @@
.text
.extern pqmfSynthWin
+.hidden pqmfSynthWin
diff --git a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_wm.asm b/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_wm.asm
deleted file mode 100644
index f957267..0000000
--- a/media/libstagefright/codecs/mp3dec/src/asm/pvmp3_polyphase_filter_window_wm.asm
+++ /dev/null
@@ -1,231 +0,0 @@
-; ------------------------------------------------------------------
-; Copyright (C) 1998-2009 PacketVideo
-;
-; Licensed under the Apache License, Version 2.0 (the "License");
-; you may not use this file except in compliance with the License.
-; You may obtain a copy of the License at
-;
-; http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-; express or implied.
-; See the License for the specific language governing permissions
-; and limitations under the License.
-; -------------------------------------------------------------------
-
-;
-;
-; Filename: pvmp3_polyphase_filter_window.s
-;
-;------------------------------------------------------------------------------
-; REVISION HISTORY
-;
-;
-; Who: Date: MM/DD/YYYY
-; Description:
-;
-;------------------------------------------------------------------------------
-
- CODE32
-
- AREA |.drectve|, DRECTVE
-
- EXPORT |pvmp3_polyphase_filter_window|
- IMPORT |pqmfSynthWin|
-
- AREA |.pdata|, PDATA
-
- AREA |.text|, CODE, ARM
-
-|pvmp3_polyphase_filter_window| PROC
- stmfd sp!,{r0-r2,r4-r11,lr}
-
- sub sp,sp,#4
- ldr r2,[sp,#0xc]
- ldr r1,PolyPh_filter_coeff
-
- sub r2,r2,#1
- mov r10,#1
- str r2,[sp]
-
-; Accumulators r9, r11::> Initialization
-
-Loop_j
- mov r9, #0x20
- mov r11, #0x20
- mov r4, #0x10
-Loop_i
- add r2,r4,r10
- add r3,r0,r2,lsl #2
- sub r2,r4,r10
- ldr r5,[r3]
- ldr lr,[r1]
- add r12,r0,r2,lsl #2
- ldr r6,[r12,#0x780]
- smlal r2,r9,lr,r5
- smlal r2,r11,lr,r6
- ldr r2,[r1,#4]
- ldr r7,[r12,#0x80]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- sub r9,r9,r5
- ldr r5,[r1,#8]
- ldr r8,[r3,#0x700]
- add r4,r4,#0x200
- smlal r6,r9,r5,r7
- smull r6,r2,r5,r8
- ldr r5,[r1,#0xc]
- sub r11,r11,r2
- smlal r8,r9,r5,r8
- smlal r7,r11,r5,r7
- ldr r5,[r3,#0x100]
- ldr r2,[r1,#0x10]
- ldr r6,[r12,#0x680]
- smlal lr,r9,r2,r5
- smlal lr,r11,r2,r6
- ldr r2,[r1,#0x14]
- ldr r7,[r12,#0x180]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- ldr r6,[r1,#0x18]
- ldr r8,[r3,#0x600]
- sub r9,r9,r5
- smlal r5,r9,r6,r7
- smull r2,r5,r6,r8
- ldr r6,[r1,#0x1c]
- sub r11,r11,r5
- smlal r8,r9,r6,r8
- ldr r2,[r1,#0x20]
- ldr r5,[r3,#0x200]
- smlal r7,r11,r6,r7
- ldr r6,[r12,#0x580]
- smlal lr,r9,r2,r5
- smlal lr,r11,r2,r6
- ldr r2,[r1,#0x24]
- ldr r7,[r12,#0x280]
- smlal r5,r11,r2,r5
- smull r6,r5,r2,r6
- ldr r6,[r1,#0x28]
- ldr r8,[r3,#0x500]
- sub r9,r9,r5
- smlal r5,r9,r6,r7
- smull r2,r5,r6,r8
- ldr r6,[r1,#0x2c]
- sub r11,r11,r5
-
- smlal r8,r9,r6,r8
- smlal r7,r11,r6,r7
- ldr r5,[r3,#0x300]
- ldr r8,[r1,#0x30]
- ldr r6,[r12,#0x480]
- smlal r7,r9,r8,r5
- smlal r7,r11,r8,r6
- ldr r8,[r1,#0x34]
- ldr r12,[r12,#0x380]
- smlal r5,r11,r8,r5
- smull r6,r5,r8,r6
- ldr r6,[r1,#0x38]
-
-
- ldr r3,[r3,#0x400]
- sub r9,r9,r5
- smlal r7,r9,r6,r12
- smull r8,r7,r6,r3
- cmp r4,#0x210
- sub r11,r11,r7
-
- ldr r2,[r1,#0x3c]
- add r1,r1,#0x40
- smlal r3,r9,r2,r3
- smlal r12,r11,r2,r12
-
- blt Loop_i
-
- mov r3,r9, asr #6
- mov r4,r3, asr #15
- teq r4,r3, asr #31
- ldr r12,LOW_16BITS
- ldr r2,[sp]
- eorne r3,r12,r3,asr #31
- ldr r4,[sp,#8]
- mov r2,r10,lsl r2
- add r4,r4,r2,lsl #1
- strh r3,[r4]
-
- mov r3,r11,asr #6
- mov r4,r3,asr #15
- teq r4,r3,asr #31
- eorne r3,r12,r3,asr #31
- ldr r12,[sp,#0xc]
- ldr r11,[sp,#8]
- rsb r2,r2,r12,lsl #5
- add r2,r11,r2,lsl #1
- strh r3,[r2]
-
- add r10,r10,#1
- cmp r10,#0x10
- blt Loop_j
-
-; Accumulators r4, r5 Initialization
-
- mov r4,#0x20
- mov r5,#0x20
- mov r3,#0x10
-PolyPh_filter_loop2
- add r2,r0,r3,lsl #2
- ldr r12,[r2]
- ldr r8,[r1]
- ldr r6,[r2,#0x80]
- smlal r12,r4,r8,r12
- ldr r12,[r1,#4]
- ldr r7,[r2,#0x40]
- smlal r6,r4,r12,r6
-
- ldr r12,[r1,#8]
- ldr r6,[r2,#0x180]
- smlal r7,r5,r12,r7
- ldr r12,[r2,#0x100]
- ldr r7,[r1,#0xc]
- ldr r2,[r2,#0x140]
- smlal r12,r4,r7,r12
- ldr r12,[r1,#0x10]
- add r3,r3,#0x80
- smlal r6,r4,r12,r6
- ldr r6,[r1,#0x14]
- cmp r3,#0x210
- smlal r2,r5,r6,r2
- add r1,r1,#0x18
-
- blt PolyPh_filter_loop2
- mov r0,r4,asr #6
- mov r2,r0,asr #15
- teq r2,r0,asr #31
- ldrne r12,LOW_16BITS
- ldr r1,[sp,#8]
- eorne r0,r12,r0,asr #31
- strh r0,[r1,#0]
- mov r0,r5,asr #6
- mov r2,r0,asr #15
- teq r2,r0,asr #31
- ldrne r12,LOW_16BITS
- ldr r2,[sp]
- mov r1,#0x10
- eorne r0,r12,r0,asr #31
- ldr r12,[sp,#8]
- mov r1,r1,lsl r2
- add r1,r12,r1,lsl #1
- strh r0,[r1]
- add sp,sp,#0x10
- ldmfd sp!,{r4-r11,pc}
-
-
-PolyPh_filter_coeff
- DCD pqmfSynthWin
-LOW_16BITS
- DCD 0x00007fff
-
- ENDP ; |pvmp3_polyphase_filter_window|
- END
-
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_mpeg2_get_scale_data.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_mpeg2_get_scale_data.cpp
index ee42dc5..499672b 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_mpeg2_get_scale_data.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_mpeg2_get_scale_data.cpp
@@ -139,7 +139,7 @@ void pvmp3_mpeg2_get_scale_data(mp3SideInfo *si,
int16 blocknumber = 0;
granuleInfo *gr_info = &(si->ch[ch].gran[gr]);
- uint32 scalefac_comp, int_scalefac_comp, new_slen[4];
+ uint32 scalefac_comp, int_scalefac_comp, new_slen[4] = { 0,0,0,0 };
scalefac_comp = gr_info->scalefac_compress;
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 7f2c46d..93ff64c 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -20,4 +20,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_vpxdec
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 476e986..828577a 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -23,9 +23,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
-#include "vpx/vpx_decoder.h"
-#include "vpx/vpx_codec.h"
-#include "vpx/vp8dx.h"
namespace android {
@@ -41,7 +38,8 @@ SoftVPX::SoftVPX(
NULL /* profileLevels */, 0 /* numProfileLevels */,
320 /* width */, 240 /* height */, callbacks, appData, component),
mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
- mCtx(NULL) {
+ mCtx(NULL),
+ mImg(NULL) {
initPorts(kNumBuffers, 768 * 1024 /* inputBufferSize */,
kNumBuffers,
codingType == OMX_VIDEO_CodingVP8 ? MEDIA_MIMETYPE_VIDEO_VP8 : MEDIA_MIMETYPE_VIDEO_VP9);
@@ -85,7 +83,7 @@ status_t SoftVPX::initDecoder() {
return OK;
}
-void SoftVPX::onQueueFilled(OMX_U32 portIndex) {
+void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
if (mOutputPortSettingsChange != NONE) {
return;
}
@@ -118,35 +116,30 @@ void SoftVPX::onQueueFilled(OMX_U32 portIndex) {
}
}
- if (vpx_codec_decode(
- (vpx_codec_ctx_t *)mCtx,
- inHeader->pBuffer + inHeader->nOffset,
- inHeader->nFilledLen,
- NULL,
- 0)) {
- ALOGE("on2 decoder failed to decode frame.");
+ if (mImg == NULL) {
+ if (vpx_codec_decode(
+ (vpx_codec_ctx_t *)mCtx,
+ inHeader->pBuffer + inHeader->nOffset,
+ inHeader->nFilledLen,
+ NULL,
+ 0)) {
+ ALOGE("on2 decoder failed to decode frame.");
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ vpx_codec_iter_t iter = NULL;
+ mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
}
- vpx_codec_iter_t iter = NULL;
- vpx_image_t *img = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
-
- if (img != NULL) {
- CHECK_EQ(img->fmt, IMG_FMT_I420);
-
- uint32_t width = img->d_w;
- uint32_t height = img->d_h;
+ if (mImg != NULL) {
+ CHECK_EQ(mImg->fmt, IMG_FMT_I420);
- if (width != mWidth || height != mHeight) {
- mWidth = width;
- mHeight = height;
-
- updatePortDefinitions();
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
+ uint32_t width = mImg->d_w;
+ uint32_t height = mImg->d_h;
+ bool portWillReset = false;
+ handlePortSettingsChange(&portWillReset, width, height);
+ if (portWillReset) {
return;
}
@@ -155,31 +148,16 @@ void SoftVPX::onQueueFilled(OMX_U32 portIndex) {
outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
outHeader->nTimeStamp = inHeader->nTimeStamp;
- const uint8_t *srcLine = (const uint8_t *)img->planes[PLANE_Y];
uint8_t *dst = outHeader->pBuffer;
- for (size_t i = 0; i < img->d_h; ++i) {
- memcpy(dst, srcLine, img->d_w);
-
- srcLine += img->stride[PLANE_Y];
- dst += img->d_w;
- }
-
- srcLine = (const uint8_t *)img->planes[PLANE_U];
- for (size_t i = 0; i < img->d_h / 2; ++i) {
- memcpy(dst, srcLine, img->d_w / 2);
-
- srcLine += img->stride[PLANE_U];
- dst += img->d_w / 2;
- }
-
- srcLine = (const uint8_t *)img->planes[PLANE_V];
- for (size_t i = 0; i < img->d_h / 2; ++i) {
- memcpy(dst, srcLine, img->d_w / 2);
-
- srcLine += img->stride[PLANE_V];
- dst += img->d_w / 2;
- }
-
+ const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
+ size_t srcYStride = mImg->stride[PLANE_Y];
+ size_t srcUStride = mImg->stride[PLANE_U];
+ size_t srcVStride = mImg->stride[PLANE_V];
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
+ mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index cd5eb28..8f68693 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -20,6 +20,10 @@
#include "SoftVideoDecoderOMXComponent.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8dx.h"
+
namespace android {
struct SoftVPX : public SoftVideoDecoderOMXComponent {
@@ -47,6 +51,8 @@ private:
void *mCtx;
+ vpx_image_t *mImg;
+
status_t initDecoder();
DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 4060a0a..e265104 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -12,10 +12,6 @@ LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright/include \
frameworks/native/include/media/openmax \
-ifeq ($(TARGET_DEVICE), manta)
- LOCAL_CFLAGS += -DSURFACE_IS_BGR32
-endif
-
LOCAL_STATIC_LIBRARIES := \
libvpx
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 8375cac..eb621d5 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -27,7 +27,6 @@
namespace android {
-
template<class T>
static void InitOMXParams(T *params) {
params->nSize = sizeof(T);
@@ -51,107 +50,37 @@ static int GetCPUCoreCount() {
return cpuCoreCount;
}
-
-// This color conversion utility is copied from SoftMPEG4Encoder.cpp
-inline static void ConvertSemiPlanarToPlanar(uint8_t *inyuv,
- uint8_t* outyuv,
- int32_t width,
- int32_t height) {
- int32_t outYsize = width * height;
- uint32_t *outy = (uint32_t *) outyuv;
- uint16_t *outcb = (uint16_t *) (outyuv + outYsize);
- uint16_t *outcr = (uint16_t *) (outyuv + outYsize + (outYsize >> 2));
-
- /* Y copying */
- memcpy(outy, inyuv, outYsize);
-
- /* U & V copying */
- uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
- for (int32_t i = height >> 1; i > 0; --i) {
- for (int32_t j = width >> 2; j > 0; --j) {
- uint32_t temp = *inyuv_4++;
- uint32_t tempU = temp & 0xFF;
- tempU = tempU | ((temp >> 8) & 0xFF00);
-
- uint32_t tempV = (temp >> 8) & 0xFF;
- tempV = tempV | ((temp >> 16) & 0xFF00);
-
- // Flip U and V
- *outcb++ = tempV;
- *outcr++ = tempU;
- }
- }
-}
-
-static void ConvertRGB32ToPlanar(
- const uint8_t *src, uint8_t *dstY, int32_t width, int32_t height) {
- CHECK((width & 1) == 0);
- CHECK((height & 1) == 0);
-
- uint8_t *dstU = dstY + width * height;
- uint8_t *dstV = dstU + (width / 2) * (height / 2);
-
- for (int32_t y = 0; y < height; ++y) {
- for (int32_t x = 0; x < width; ++x) {
-#ifdef SURFACE_IS_BGR32
- unsigned blue = src[4 * x];
- unsigned green = src[4 * x + 1];
- unsigned red= src[4 * x + 2];
-#else
- unsigned red= src[4 * x];
- unsigned green = src[4 * x + 1];
- unsigned blue = src[4 * x + 2];
-#endif
-
- unsigned luma =
- ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
-
- dstY[x] = luma;
-
- if ((x & 1) == 0 && (y & 1) == 0) {
- unsigned U =
- ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
-
- unsigned V =
- ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
-
- dstU[x / 2] = U;
- dstV[x / 2] = V;
- }
- }
-
- if ((y & 1) == 0) {
- dstU += width / 2;
- dstV += width / 2;
- }
-
- src += 4 * width;
- dstY += width;
- }
-}
-
SoftVPXEncoder::SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
OMX_COMPONENTTYPE **component)
- : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ : SoftVideoEncoderOMXComponent(name, callbacks, appData, component),
mCodecContext(NULL),
mCodecConfiguration(NULL),
mCodecInterface(NULL),
mWidth(176),
mHeight(144),
mBitrate(192000), // in bps
+ mFramerate(30 << 16), // in Q16 format
mBitrateUpdated(false),
mBitrateControlMode(VPX_VBR), // variable bitrate
- mFrameDurationUs(33333), // Defaults to 30 fps
mDCTPartitions(0),
mErrorResilience(OMX_FALSE),
mColorFormat(OMX_COLOR_FormatYUV420Planar),
mLevel(OMX_VIDEO_VP8Level_Version0),
+ mKeyFrameInterval(0),
+ mMinQuantizer(0),
+ mMaxQuantizer(0),
+ mTemporalLayers(0),
+ mTemporalPatternType(OMX_VIDEO_VPXTemporalLayerPatternNone),
+ mTemporalPatternLength(0),
+ mTemporalPatternIdx(0),
+ mLastTimestamp(0x7FFFFFFFFFFFFFFFLL),
mConversionBuffer(NULL),
mInputDataIsMeta(false),
- mGrallocModule(NULL),
mKeyFrameRequested(false) {
+ memset(mTemporalLayerBitrateRatio, 0, sizeof(mTemporalLayerBitrateRatio));
+ mTemporalLayerBitrateRatio[0] = 100;
initPorts();
}
@@ -180,9 +109,8 @@ void SoftVPXEncoder::initPorts() {
inputPort.format.video.nStride = inputPort.format.video.nFrameWidth;
inputPort.format.video.nSliceHeight = inputPort.format.video.nFrameHeight;
inputPort.format.video.nBitrate = 0;
- // frameRate is reciprocal of frameDuration, which is
- // in microseconds. It is also in Q16 format.
- inputPort.format.video.xFramerate = (1000000/mFrameDurationUs) << 16;
+ // frameRate is in Q16 format.
+ inputPort.format.video.xFramerate = mFramerate;
inputPort.format.video.bFlagErrorConcealment = OMX_FALSE;
inputPort.nPortIndex = kInputPortIndex;
inputPort.eDir = OMX_DirInput;
@@ -220,7 +148,7 @@ void SoftVPXEncoder::initPorts() {
outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVP8;
outputPort.format.video.eColorFormat = OMX_COLOR_FormatUnused;
outputPort.format.video.pNativeWindow = NULL;
- outputPort.nBufferSize = 256 * 1024; // arbitrary
+ outputPort.nBufferSize = 1024 * 1024; // arbitrary
addPort(outputPort);
}
@@ -236,7 +164,9 @@ status_t SoftVPXEncoder::initEncoder() {
if (mCodecInterface == NULL) {
return UNKNOWN_ERROR;
}
-
+ ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+ (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+ mMinQuantizer, mMaxQuantizer);
codec_return = vpx_codec_enc_config_default(mCodecInterface,
mCodecConfiguration,
0); // Codec specific flags
@@ -277,8 +207,120 @@ status_t SoftVPXEncoder::initEncoder() {
mCodecConfiguration->g_timebase.num = 1;
mCodecConfiguration->g_timebase.den = 1000000;
// rc_target_bitrate is in kbps, mBitrate in bps
- mCodecConfiguration->rc_target_bitrate = mBitrate/1000;
+ mCodecConfiguration->rc_target_bitrate = (mBitrate + 500) / 1000;
mCodecConfiguration->rc_end_usage = mBitrateControlMode;
+ // Disable frame drop - not allowed in MediaCodec now.
+ mCodecConfiguration->rc_dropframe_thresh = 0;
+ if (mBitrateControlMode == VPX_CBR) {
+ // Disable spatial resizing.
+ mCodecConfiguration->rc_resize_allowed = 0;
+ // Single-pass mode.
+ mCodecConfiguration->g_pass = VPX_RC_ONE_PASS;
+ // Maximum amount of bits that can be subtracted from the target
+ // bitrate - expressed as percentage of the target bitrate.
+ mCodecConfiguration->rc_undershoot_pct = 100;
+ // Maximum amount of bits that can be added to the target
+ // bitrate - expressed as percentage of the target bitrate.
+ mCodecConfiguration->rc_overshoot_pct = 15;
+ // Initial value of the buffer level in ms.
+ mCodecConfiguration->rc_buf_initial_sz = 500;
+ // Amount of data that the encoder should try to maintain in ms.
+ mCodecConfiguration->rc_buf_optimal_sz = 600;
+ // The amount of data that may be buffered by the decoding
+ // application in ms.
+ mCodecConfiguration->rc_buf_sz = 1000;
+ // Enable error resilience - needed for packet loss.
+ mCodecConfiguration->g_error_resilient = 1;
+ // Disable lagged encoding.
+ mCodecConfiguration->g_lag_in_frames = 0;
+ // Maximum key frame interval - for CBR boost to 3000
+ mCodecConfiguration->kf_max_dist = 3000;
+ // Encoder determines optimal key frame placement automatically.
+ mCodecConfiguration->kf_mode = VPX_KF_AUTO;
+ }
+
+ // Frames temporal pattern - for now WebRTC like pattern is only supported.
+ switch (mTemporalLayers) {
+ case 0:
+ {
+ mTemporalPatternLength = 0;
+ break;
+ }
+ case 1:
+ {
+ mCodecConfiguration->ts_number_layers = 1;
+ mCodecConfiguration->ts_rate_decimator[0] = 1;
+ mCodecConfiguration->ts_periodicity = 1;
+ mCodecConfiguration->ts_layer_id[0] = 0;
+ mTemporalPattern[0] = kTemporalUpdateLastRefAll;
+ mTemporalPatternLength = 1;
+ break;
+ }
+ case 2:
+ {
+ mCodecConfiguration->ts_number_layers = 2;
+ mCodecConfiguration->ts_rate_decimator[0] = 2;
+ mCodecConfiguration->ts_rate_decimator[1] = 1;
+ mCodecConfiguration->ts_periodicity = 2;
+ mCodecConfiguration->ts_layer_id[0] = 0;
+ mCodecConfiguration->ts_layer_id[1] = 1;
+ mTemporalPattern[0] = kTemporalUpdateLastAndGoldenRefAltRef;
+ mTemporalPattern[1] = kTemporalUpdateGoldenWithoutDependencyRefAltRef;
+ mTemporalPattern[2] = kTemporalUpdateLastRefAltRef;
+ mTemporalPattern[3] = kTemporalUpdateGoldenRefAltRef;
+ mTemporalPattern[4] = kTemporalUpdateLastRefAltRef;
+ mTemporalPattern[5] = kTemporalUpdateGoldenRefAltRef;
+ mTemporalPattern[6] = kTemporalUpdateLastRefAltRef;
+ mTemporalPattern[7] = kTemporalUpdateNone;
+ mTemporalPatternLength = 8;
+ break;
+ }
+ case 3:
+ {
+ mCodecConfiguration->ts_number_layers = 3;
+ mCodecConfiguration->ts_rate_decimator[0] = 4;
+ mCodecConfiguration->ts_rate_decimator[1] = 2;
+ mCodecConfiguration->ts_rate_decimator[2] = 1;
+ mCodecConfiguration->ts_periodicity = 4;
+ mCodecConfiguration->ts_layer_id[0] = 0;
+ mCodecConfiguration->ts_layer_id[1] = 2;
+ mCodecConfiguration->ts_layer_id[2] = 1;
+ mCodecConfiguration->ts_layer_id[3] = 2;
+ mTemporalPattern[0] = kTemporalUpdateLastAndGoldenRefAltRef;
+ mTemporalPattern[1] = kTemporalUpdateNoneNoRefGoldenRefAltRef;
+ mTemporalPattern[2] = kTemporalUpdateGoldenWithoutDependencyRefAltRef;
+ mTemporalPattern[3] = kTemporalUpdateNone;
+ mTemporalPattern[4] = kTemporalUpdateLastRefAltRef;
+ mTemporalPattern[5] = kTemporalUpdateNone;
+ mTemporalPattern[6] = kTemporalUpdateGoldenRefAltRef;
+ mTemporalPattern[7] = kTemporalUpdateNone;
+ mTemporalPatternLength = 8;
+ break;
+ }
+ default:
+ {
+ ALOGE("Wrong number of temporal layers %zu", mTemporalLayers);
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ // Set bitrate values for each layer
+ for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
+ mCodecConfiguration->ts_target_bitrate[i] =
+ mCodecConfiguration->rc_target_bitrate *
+ mTemporalLayerBitrateRatio[i] / 100;
+ }
+ if (mKeyFrameInterval > 0) {
+ mCodecConfiguration->kf_max_dist = mKeyFrameInterval;
+ mCodecConfiguration->kf_min_dist = mKeyFrameInterval;
+ mCodecConfiguration->kf_mode = VPX_KF_AUTO;
+ }
+ if (mMinQuantizer > 0) {
+ mCodecConfiguration->rc_min_quantizer = mMinQuantizer;
+ }
+ if (mMaxQuantizer > 0) {
+ mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
+ }
codec_return = vpx_codec_enc_init(mCodecContext,
mCodecInterface,
@@ -298,13 +340,39 @@ status_t SoftVPXEncoder::initEncoder() {
return UNKNOWN_ERROR;
}
- if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
- if (mConversionBuffer == NULL) {
- mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
- if (mConversionBuffer == NULL) {
- ALOGE("Allocating conversion buffer failed.");
- return UNKNOWN_ERROR;
+ // Extra CBR settings
+ if (mBitrateControlMode == VPX_CBR) {
+ codec_return = vpx_codec_control(mCodecContext,
+ VP8E_SET_STATIC_THRESHOLD,
+ 1);
+ if (codec_return == VPX_CODEC_OK) {
+ uint32_t rc_max_intra_target =
+ mCodecConfiguration->rc_buf_optimal_sz * (mFramerate >> 17) / 10;
+ // Don't go below 3 times per frame bandwidth.
+ if (rc_max_intra_target < 300) {
+ rc_max_intra_target = 300;
}
+ codec_return = vpx_codec_control(mCodecContext,
+ VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target);
+ }
+ if (codec_return == VPX_CODEC_OK) {
+ codec_return = vpx_codec_control(mCodecContext,
+ VP8E_SET_CPUUSED,
+ -8);
+ }
+ if (codec_return != VPX_CODEC_OK) {
+ ALOGE("Error setting cbr parameters for vpx encoder.");
+ return UNKNOWN_ERROR;
+ }
+ }
+
+ if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
+ free(mConversionBuffer);
+ mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
+ if (mConversionBuffer == NULL) {
+ ALOGE("Allocating conversion buffer failed.");
+ return UNKNOWN_ERROR;
}
}
return OK;
@@ -324,7 +392,7 @@ status_t SoftVPXEncoder::releaseEncoder() {
}
if (mConversionBuffer != NULL) {
- delete mConversionBuffer;
+ free(mConversionBuffer);
mConversionBuffer = NULL;
}
@@ -361,9 +429,7 @@ OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
}
formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
- // Converting from microseconds
- // Also converting to Q16 format
- formatParams->xFramerate = (1000000/mFrameDurationUs) << 16;
+ formatParams->xFramerate = mFramerate;
return OMX_ErrorNone;
} else if (formatParams->nPortIndex == kOutputPortIndex) {
formatParams->eCompressionFormat = OMX_VIDEO_CodingVP8;
@@ -411,6 +477,24 @@ OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
return OMX_ErrorNone;
}
+ case OMX_IndexParamVideoAndroidVp8Encoder: {
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
+ (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
+
+ if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+ vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
+ vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
+ vp8AndroidParams->nMinQuantizer = mMinQuantizer;
+ vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
+ memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
+ mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+ return OMX_ErrorNone;
+ }
+
case OMX_IndexParamVideoProfileLevelQuerySupported: {
OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileAndLevel =
(OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param;
@@ -497,11 +581,15 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
return internalSetVp8Params(
(const OMX_VIDEO_PARAM_VP8TYPE *)param);
+ case OMX_IndexParamVideoAndroidVp8Encoder:
+ return internalSetAndroidVp8Params(
+ (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
+
case OMX_IndexParamVideoProfileLevelCurrent:
return internalSetProfileLevel(
(const OMX_VIDEO_PARAM_PROFILELEVELTYPE *)param);
- case OMX_IndexVendorStartUnused:
+ case kStoreMetaDataExtensionIndex:
{
// storeMetaDataInBuffers
const StoreMetaDataInBuffersParams *storeParam =
@@ -610,6 +698,50 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params(
return OMX_ErrorNone;
}
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
+ if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+ if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+ vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
+ return OMX_ErrorBadParameter;
+ }
+
+ mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
+ if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
+ } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+ mTemporalLayers = 0;
+ }
+ // Check the bitrate distribution between layers is in increasing order
+ if (mTemporalLayers > 1) {
+ for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+ if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+ vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
+ ALOGE("Wrong bitrate ratio - should be in increasing order.");
+ return OMX_ErrorBadParameter;
+ }
+ }
+ }
+ mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
+ mMinQuantizer = vp8AndroidParams->nMinQuantizer;
+ mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
+ memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
+ sizeof(mTemporalLayerBitrateRatio));
+ ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
+ " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+ (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+ mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+ mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+ return OMX_ErrorNone;
+}
OMX_ERRORTYPE SoftVPXEncoder::internalSetFormatParams(
const OMX_VIDEO_PARAM_PORTFORMATTYPE* format) {
@@ -660,9 +792,7 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetPortParams(
mHeight = port->format.video.nFrameHeight;
// xFramerate comes in Q16 format, in frames per second unit
- const uint32_t framerate = port->format.video.xFramerate >> 16;
- // frame duration is in microseconds
- mFrameDurationUs = (1000000/framerate);
+ mFramerate = port->format.video.xFramerate;
if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
@@ -675,12 +805,22 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetPortParams(
OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
def->format.video.nFrameWidth = mWidth;
def->format.video.nFrameHeight = mHeight;
- def->format.video.xFramerate = port->format.video.xFramerate;
+ def->format.video.xFramerate = mFramerate;
def->format.video.eColorFormat = mColorFormat;
+ def = &editPortInfo(kOutputPortIndex)->mDef;
+ def->format.video.nFrameWidth = mWidth;
+ def->format.video.nFrameHeight = mHeight;
return OMX_ErrorNone;
} else if (port->nPortIndex == kOutputPortIndex) {
mBitrate = port->format.video.nBitrate;
+ mWidth = port->format.video.nFrameWidth;
+ mHeight = port->format.video.nFrameHeight;
+
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
+ def->format.video.nFrameWidth = mWidth;
+ def->format.video.nFrameHeight = mHeight;
+ def->format.video.nBitrate = mBitrate;
return OMX_ErrorNone;
} else {
return OMX_ErrorBadPortIndex;
@@ -707,6 +847,74 @@ OMX_ERRORTYPE SoftVPXEncoder::internalSetBitrateParams(
return OMX_ErrorNone;
}
+vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
+ vpx_enc_frame_flags_t flags = 0;
+ int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+ mTemporalPatternIdx++;
+ switch (mTemporalPattern[patternIdx]) {
+ case kTemporalUpdateLast:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ break;
+ case kTemporalUpdateGoldenWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateGolden:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateAltrefWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateAltref:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateNoneNoRefAltref:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ // Deliberately no break here.
+ case kTemporalUpdateNone:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastAndGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateLastRefAll:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
+ return flags;
+}
void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
// Initialize encoder if not already
@@ -746,51 +954,33 @@ void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
return;
}
- uint8_t *source =
+ const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
if (mInputDataIsMeta) {
- CHECK_GE(inputBufferHeader->nFilledLen,
- 4 + sizeof(buffer_handle_t));
-
- uint32_t bufferType = *(uint32_t *)source;
- CHECK_EQ(bufferType, kMetadataBufferTypeGrallocSource);
-
- if (mGrallocModule == NULL) {
- CHECK_EQ(0, hw_get_module(
- GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
+ source = extractGraphicBuffer(
+ mConversionBuffer, mWidth * mHeight * 3 / 2,
+ source, inputBufferHeader->nFilledLen,
+ mWidth, mHeight);
+ if (source == NULL) {
+ ALOGE("Unable to extract gralloc buffer in metadata mode");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+ return;
}
-
- const gralloc_module_t *grmodule =
- (const gralloc_module_t *)mGrallocModule;
-
- buffer_handle_t handle = *(buffer_handle_t *)(source + 4);
-
- void *bits;
- CHECK_EQ(0,
- grmodule->lock(
- grmodule, handle,
- GRALLOC_USAGE_SW_READ_OFTEN
- | GRALLOC_USAGE_SW_WRITE_NEVER,
- 0, 0, mWidth, mHeight, &bits));
-
- ConvertRGB32ToPlanar(
- (const uint8_t *)bits, mConversionBuffer, mWidth, mHeight);
-
- source = mConversionBuffer;
-
- CHECK_EQ(0, grmodule->unlock(grmodule, handle));
} else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
- ConvertSemiPlanarToPlanar(
+ ConvertYUV420SemiPlanarToYUV420Planar(
source, mConversionBuffer, mWidth, mHeight);
source = mConversionBuffer;
}
vpx_image_t raw_frame;
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
- kInputBufferAlignment, source);
+ kInputBufferAlignment, (uint8_t *)source);
vpx_enc_frame_flags_t flags = 0;
+ if (mTemporalPatternLength > 0) {
+ flags = getEncodeFlags();
+ }
if (mKeyFrameRequested) {
flags |= VPX_EFLAG_FORCE_KF;
mKeyFrameRequested = false;
@@ -811,11 +1001,18 @@ void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
mBitrateUpdated = false;
}
+ uint32_t frameDuration;
+ if (inputBufferHeader->nTimeStamp > mLastTimestamp) {
+ frameDuration = (uint32_t)(inputBufferHeader->nTimeStamp - mLastTimestamp);
+ } else {
+ frameDuration = (uint32_t)(((uint64_t)1000000 << 16) / mFramerate);
+ }
+ mLastTimestamp = inputBufferHeader->nTimeStamp;
codec_return = vpx_codec_encode(
mCodecContext,
&raw_frame,
inputBufferHeader->nTimeStamp, // in timebase units
- mFrameDurationUs, // frame duration in timebase units
+ frameDuration, // frame duration in timebase units
flags, // frame flags
VPX_DL_REALTIME); // encoding deadline
if (codec_return != VPX_CODEC_OK) {
@@ -854,16 +1051,6 @@ void SoftVPXEncoder::onQueueFilled(OMX_U32 portIndex) {
}
}
-OMX_ERRORTYPE SoftVPXEncoder::getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index) {
- if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers")) {
- *index = OMX_IndexVendorStartUnused;
- return OMX_ErrorNone;
- }
-
- return SimpleSoftOMXComponent::getExtensionIndex(name, index);
-}
-
} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 076830f..f4c1564 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -18,7 +18,7 @@
#define SOFT_VPX_ENCODER_H_
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoEncoderOMXComponent.h"
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
@@ -59,7 +59,7 @@ namespace android {
// - OMX timestamps are in microseconds, therefore
// encoder timebase is fixed to 1/1000000
-struct SoftVPXEncoder : public SimpleSoftOMXComponent {
+struct SoftVPXEncoder : public SoftVideoEncoderOMXComponent {
SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
@@ -87,10 +87,44 @@ protected:
// encoding of the frame
virtual void onQueueFilled(OMX_U32 portIndex);
- virtual OMX_ERRORTYPE getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index);
-
private:
+ enum TemporalReferences {
+ // For 1 layer case: reference all (last, golden, and alt ref), but only
+ // update last.
+ kTemporalUpdateLastRefAll = 12,
+ // First base layer frame for 3 temporal layers, which updates last and
+ // golden with alt ref dependency.
+ kTemporalUpdateLastAndGoldenRefAltRef = 11,
+ // First enhancement layer with alt ref dependency.
+ kTemporalUpdateGoldenRefAltRef = 10,
+ // First enhancement layer with alt ref dependency.
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef = 9,
+ // Base layer with alt ref dependency.
+ kTemporalUpdateLastRefAltRef = 8,
+ // Highest enhacement layer without dependency on golden with alt ref
+ // dependency.
+ kTemporalUpdateNoneNoRefGoldenRefAltRef = 7,
+ // Second layer and last frame in cycle, for 2 layers.
+ kTemporalUpdateNoneNoRefAltref = 6,
+ // Highest enhancement layer.
+ kTemporalUpdateNone = 5,
+ // Second enhancement layer.
+ kTemporalUpdateAltref = 4,
+ // Second enhancement layer without dependency on previous frames in
+ // the second enhancement layer.
+ kTemporalUpdateAltrefWithoutDependency = 3,
+ // First enhancement layer.
+ kTemporalUpdateGolden = 2,
+ // First enhancement layer without dependency on previous frames in
+ // the first enhancement layer.
+ kTemporalUpdateGoldenWithoutDependency = 1,
+ // Base layer.
+ kTemporalUpdateLast = 0,
+ };
+ enum {
+ kMaxTemporalPattern = 8
+ };
+
// number of buffers allocated per port
static const uint32_t kNumBuffers = 4;
@@ -130,16 +164,15 @@ private:
// Target bitrate set for the encoder, in bits per second.
uint32_t mBitrate;
+ // Target framerate set for the encoder.
+ uint32_t mFramerate;
+
// If a request for a change it bitrate has been received.
bool mBitrateUpdated;
// Bitrate control mode, either constant or variable
vpx_rc_mode mBitrateControlMode;
- // Frame duration is the reciprocal of framerate, denoted
- // in microseconds
- uint64_t mFrameDurationUs;
-
// vp8 specific configuration parameter
// that enables token partitioning of
// the stream into substreams
@@ -160,6 +193,36 @@ private:
// something else.
OMX_VIDEO_VP8LEVELTYPE mLevel;
+ // Key frame interval in frames
+ uint32_t mKeyFrameInterval;
+
+ // Minimum (best quality) quantizer
+ uint32_t mMinQuantizer;
+
+ // Maximum (worst quality) quantizer
+ uint32_t mMaxQuantizer;
+
+ // Number of coding temporal layers to be used.
+ size_t mTemporalLayers;
+
+ // Temporal layer bitrare ratio in percentage
+ uint32_t mTemporalLayerBitrateRatio[OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS];
+
+ // Temporal pattern type
+ OMX_VIDEO_ANDROID_VPXTEMPORALLAYERPATTERNTYPE mTemporalPatternType;
+
+ // Temporal pattern length
+ size_t mTemporalPatternLength;
+
+ // Temporal pattern current index
+ size_t mTemporalPatternIdx;
+
+ // Frame type temporal pattern
+ TemporalReferences mTemporalPattern[kMaxTemporalPattern];
+
+ // Last input buffer timestamp
+ OMX_TICKS mLastTimestamp;
+
// Conversion buffer is needed to convert semi
// planar yuv420 to planar format
// It is only allocated if input format is
@@ -167,7 +230,6 @@ private:
uint8_t* mConversionBuffer;
bool mInputDataIsMeta;
- const hw_module_t *mGrallocModule;
bool mKeyFrameRequested;
@@ -185,6 +247,9 @@ private:
// dtor.
status_t releaseEncoder();
+ // Get current encode flags
+ vpx_enc_frame_flags_t getEncodeFlags();
+
// Handles port changes with respect to color formats
OMX_ERRORTYPE internalSetFormatParams(
const OMX_VIDEO_PARAM_PORTFORMATTYPE* format);
@@ -206,6 +271,10 @@ private:
OMX_ERRORTYPE internalSetVp8Params(
const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+ // Handles Android vp8 specific parameters.
+ OMX_ERRORTYPE internalSetAndroidVp8Params(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
+
// Updates encoder profile
OMX_ERRORTYPE internalSetProfileLevel(
const OMX_VIDEO_PARAM_PROFILELEVELTYPE* profileAndLevel);
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 655b2ab..bf03ad9 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -84,8 +84,8 @@ MY_OMXDL_ASM_SRC := \
./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S \
./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S \
-
-ifeq ($(ARCH_ARM_HAVE_NEON),true)
+ifeq ($(TARGET_ARCH),arm)
+ ifeq ($(ARCH_ARM_HAVE_NEON),true)
LOCAL_ARM_NEON := true
# LOCAL_CFLAGS := -std=c99 -D._NEON -D._OMXDL
LOCAL_CFLAGS := -DH264DEC_NEON -DH264DEC_OMXDL
@@ -94,6 +94,7 @@ ifeq ($(ARCH_ARM_HAVE_NEON),true)
LOCAL_C_INCLUDES += $(LOCAL_PATH)/./omxdl/arm_neon/api \
$(LOCAL_PATH)/./omxdl/arm_neon/vc/api \
$(LOCAL_PATH)/./omxdl/arm_neon/vc/m4p10/api
+ endif
endif
LOCAL_SHARED_LIBRARIES := \
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index 7ddb13c..168208f 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -58,7 +58,6 @@ SoftAVC::SoftAVC(
320 /* width */, 240 /* height */, callbacks, appData, component),
mHandle(NULL),
mInputBufferCount(0),
- mPictureSize(mWidth * mHeight * 3 / 2),
mFirstPicture(NULL),
mFirstPictureId(-1),
mPicId(0),
@@ -98,7 +97,7 @@ status_t SoftAVC::initDecoder() {
return UNKNOWN_ERROR;
}
-void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
+void SoftAVC::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
@@ -118,7 +117,7 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
}
H264SwDecRet ret = H264SWDEC_PIC_RDY;
- bool portSettingsChanged = false;
+ bool portWillReset = false;
while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
&& outQueue.size() == kNumOutputBuffers) {
@@ -161,17 +160,14 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
H264SwDecInfo decoderInfo;
CHECK(H264SwDecGetInfo(mHandle, &decoderInfo) == H264SWDEC_OK);
- if (handlePortSettingChangeEvent(&decoderInfo)) {
- portSettingsChanged = true;
- }
-
- if (decoderInfo.croppingFlag &&
- handleCropRectEvent(&decoderInfo.cropParams)) {
- portSettingsChanged = true;
- }
+ SoftVideoDecoderOMXComponent::CropSettingsMode cropSettingsMode =
+ handleCropParams(decoderInfo);
+ handlePortSettingsChange(
+ &portWillReset, decoderInfo.picWidth, decoderInfo.picHeight,
+ cropSettingsMode);
}
} else {
- if (portSettingsChanged) {
+ if (portWillReset) {
if (H264SwDecNextPicture(mHandle, &decodedPicture, 0)
== H264SWDEC_PIC_RDY) {
@@ -199,8 +195,7 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
- if (portSettingsChanged) {
- portSettingsChanged = false;
+ if (portWillReset) {
return;
}
@@ -215,44 +210,34 @@ void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
}
}
-bool SoftAVC::handlePortSettingChangeEvent(const H264SwDecInfo *info) {
- if (mWidth != info->picWidth || mHeight != info->picHeight) {
- mWidth = info->picWidth;
- mHeight = info->picHeight;
- mPictureSize = mWidth * mHeight * 3 / 2;
- updatePortDefinitions();
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
- return true;
+SoftVideoDecoderOMXComponent::CropSettingsMode SoftAVC::handleCropParams(
+ const H264SwDecInfo& decInfo) {
+ if (!decInfo.croppingFlag) {
+ return kCropUnSet;
}
- return false;
-}
-
-bool SoftAVC::handleCropRectEvent(const CropParams *crop) {
- if (mCropLeft != crop->cropLeftOffset ||
- mCropTop != crop->cropTopOffset ||
- mCropWidth != crop->cropOutWidth ||
- mCropHeight != crop->cropOutHeight) {
- mCropLeft = crop->cropLeftOffset;
- mCropTop = crop->cropTopOffset;
- mCropWidth = crop->cropOutWidth;
- mCropHeight = crop->cropOutHeight;
-
- notify(OMX_EventPortSettingsChanged, 1,
- OMX_IndexConfigCommonOutputCrop, NULL);
-
- return true;
+ const CropParams& crop = decInfo.cropParams;
+ if (mCropLeft == crop.cropLeftOffset &&
+ mCropTop == crop.cropTopOffset &&
+ mCropWidth == crop.cropOutWidth &&
+ mCropHeight == crop.cropOutHeight) {
+ return kCropSet;
}
- return false;
+
+ mCropLeft = crop.cropLeftOffset;
+ mCropTop = crop.cropTopOffset;
+ mCropWidth = crop.cropOutWidth;
+ mCropHeight = crop.cropOutHeight;
+ return kCropChanged;
}
void SoftAVC::saveFirstOutputBuffer(int32_t picId, uint8_t *data) {
CHECK(mFirstPicture == NULL);
mFirstPictureId = picId;
- mFirstPicture = new uint8_t[mPictureSize];
- memcpy(mFirstPicture, data, mPictureSize);
+ uint32_t pictureSize = mWidth * mHeight * 3 / 2;
+ mFirstPicture = new uint8_t[pictureSize];
+ memcpy(mFirstPicture, data, pictureSize);
}
void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) {
@@ -263,9 +248,17 @@ void SoftAVC::drainOneOutputBuffer(int32_t picId, uint8_t* data) {
OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId);
outHeader->nTimeStamp = header->nTimeStamp;
outHeader->nFlags = header->nFlags;
- outHeader->nFilledLen = mPictureSize;
- memcpy(outHeader->pBuffer + outHeader->nOffset,
- data, mPictureSize);
+ outHeader->nFilledLen = mWidth * mHeight * 3 / 2;
+
+ uint8_t *dst = outHeader->pBuffer + outHeader->nOffset;
+ const uint8_t *srcY = data;
+ const uint8_t *srcU = srcY + mWidth * mHeight;
+ const uint8_t *srcV = srcU + mWidth * mHeight / 4;
+ size_t srcYStride = mWidth;
+ size_t srcUStride = mWidth / 2;
+ size_t srcVStride = srcUStride;
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
mPicToHeaderMap.removeItem(picId);
delete header;
outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index ee69926..069107d 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -55,8 +55,6 @@ private:
size_t mInputBufferCount;
- uint32_t mPictureSize;
-
uint8_t *mFirstPicture;
int32_t mFirstPictureId;
@@ -75,8 +73,7 @@ private:
void drainAllOutputBuffers(bool eos);
void drainOneOutputBuffer(int32_t picId, uint8_t *data);
void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
- bool handleCropRectEvent(const CropParams* crop);
- bool handlePortSettingChangeEvent(const H264SwDecInfo *info);
+ CropSettingsMode handleCropParams(const H264SwDecInfo& decInfo);
DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
};
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/omxtypes.h b/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/omxtypes.h
index 8b295a6..912cb0d 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/omxtypes.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm11/api/omxtypes.h
@@ -32,6 +32,7 @@
#define _OMXTYPES_H_
#include <limits.h>
+#include <stdint.h>
#define OMX_IN
#define OMX_OUT
@@ -75,64 +76,22 @@ typedef enum {
/* OMX_U8 */
-#if UCHAR_MAX == 0xff
-typedef unsigned char OMX_U8;
-#elif USHRT_MAX == 0xff
-typedef unsigned short int OMX_U8;
-#else
-#error OMX_U8 undefined
-#endif
-
+typedef uint8_t OMX_U8;
/* OMX_S8 */
-#if SCHAR_MAX == 0x7f
-typedef signed char OMX_S8;
-#elif SHRT_MAX == 0x7f
-typedef signed short int OMX_S8;
-#else
-#error OMX_S8 undefined
-#endif
-
+typedef int8_t OMX_S8;
/* OMX_U16 */
-#if USHRT_MAX == 0xffff
-typedef unsigned short int OMX_U16;
-#elif UINT_MAX == 0xffff
-typedef unsigned int OMX_U16;
-#else
-#error OMX_U16 undefined
-#endif
-
+typedef uint16_t OMX_U16;
/* OMX_S16 */
-#if SHRT_MAX == 0x7fff
-typedef signed short int OMX_S16;
-#elif INT_MAX == 0x7fff
-typedef signed int OMX_S16;
-#else
-#error OMX_S16 undefined
-#endif
-
+typedef int16_t OMX_S16;
/* OMX_U32 */
-#if UINT_MAX == 0xffffffff
-typedef unsigned int OMX_U32;
-#elif LONG_MAX == 0xffffffff
-typedef unsigned long int OMX_U32;
-#else
-#error OMX_U32 undefined
-#endif
-
+typedef uint32_t OMX_U32;
/* OMX_S32 */
-#if INT_MAX == 0x7fffffff
-typedef signed int OMX_S32;
-#elif LONG_MAX == 0x7fffffff
-typedef long signed int OMX_S32;
-#else
-#error OMX_S32 undefined
-#endif
-
+typedef int32_t OMX_S32;
/* OMX_U64 & OMX_S64 */
#if defined( _WIN32 ) || defined ( _WIN64 )
@@ -143,15 +102,14 @@ typedef long signed int OMX_S32;
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFi64)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFi64)
#else
- typedef long long OMX_S64; /** Signed 64-bit integer */
- typedef unsigned long long OMX_U64; /** Unsigned 64-bit integer */
+ typedef int64_t OMX_S64; /** Signed 64-bit integer */
+ typedef uint64_t OMX_U64; /** Unsigned 64-bit integer */
#define OMX_MIN_S64 (0x8000000000000000LL)
#define OMX_MIN_U64 (0x0000000000000000LL)
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFLL)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFLL)
#endif
-
/* OMX_SC8 */
typedef struct
{
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/omxtypes.h b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/omxtypes.h
index 8b295a6..912cb0d 100755
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/omxtypes.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/omxtypes.h
@@ -32,6 +32,7 @@
#define _OMXTYPES_H_
#include <limits.h>
+#include <stdint.h>
#define OMX_IN
#define OMX_OUT
@@ -75,64 +76,22 @@ typedef enum {
/* OMX_U8 */
-#if UCHAR_MAX == 0xff
-typedef unsigned char OMX_U8;
-#elif USHRT_MAX == 0xff
-typedef unsigned short int OMX_U8;
-#else
-#error OMX_U8 undefined
-#endif
-
+typedef uint8_t OMX_U8;
/* OMX_S8 */
-#if SCHAR_MAX == 0x7f
-typedef signed char OMX_S8;
-#elif SHRT_MAX == 0x7f
-typedef signed short int OMX_S8;
-#else
-#error OMX_S8 undefined
-#endif
-
+typedef int8_t OMX_S8;
/* OMX_U16 */
-#if USHRT_MAX == 0xffff
-typedef unsigned short int OMX_U16;
-#elif UINT_MAX == 0xffff
-typedef unsigned int OMX_U16;
-#else
-#error OMX_U16 undefined
-#endif
-
+typedef uint16_t OMX_U16;
/* OMX_S16 */
-#if SHRT_MAX == 0x7fff
-typedef signed short int OMX_S16;
-#elif INT_MAX == 0x7fff
-typedef signed int OMX_S16;
-#else
-#error OMX_S16 undefined
-#endif
-
+typedef int16_t OMX_S16;
/* OMX_U32 */
-#if UINT_MAX == 0xffffffff
-typedef unsigned int OMX_U32;
-#elif LONG_MAX == 0xffffffff
-typedef unsigned long int OMX_U32;
-#else
-#error OMX_U32 undefined
-#endif
-
+typedef uint32_t OMX_U32;
/* OMX_S32 */
-#if INT_MAX == 0x7fffffff
-typedef signed int OMX_S32;
-#elif LONG_MAX == 0x7fffffff
-typedef long signed int OMX_S32;
-#else
-#error OMX_S32 undefined
-#endif
-
+typedef int32_t OMX_S32;
/* OMX_U64 & OMX_S64 */
#if defined( _WIN32 ) || defined ( _WIN64 )
@@ -143,15 +102,14 @@ typedef long signed int OMX_S32;
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFi64)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFi64)
#else
- typedef long long OMX_S64; /** Signed 64-bit integer */
- typedef unsigned long long OMX_U64; /** Unsigned 64-bit integer */
+ typedef int64_t OMX_S64; /** Signed 64-bit integer */
+ typedef uint64_t OMX_U64; /** Unsigned 64-bit integer */
#define OMX_MIN_S64 (0x8000000000000000LL)
#define OMX_MIN_U64 (0x0000000000000000LL)
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFLL)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFLL)
#endif
-
/* OMX_SC8 */
typedef struct
{
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
index 073dbba..bcc6b6b 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
@@ -10,6 +10,22 @@
.fpu neon
.text
+ .extern armVCM4P10_CAVLCCoeffTokenTables
+ .extern armVCM4P10_SuffixToLevel
+ .extern armVCM4P10_CAVLCTotalZeros2x2Tables
+ .extern armVCM4P10_CAVLCTotalZeroTables
+ .extern armVCM4P10_CAVLCRunBeforeTables
+ .extern armVCM4P10_ZigZag_2x2
+ .extern armVCM4P10_ZigZag_4x4
+
+ .hidden armVCM4P10_CAVLCCoeffTokenTables
+ .hidden armVCM4P10_SuffixToLevel
+ .hidden armVCM4P10_CAVLCTotalZeros2x2Tables
+ .hidden armVCM4P10_CAVLCTotalZeroTables
+ .hidden armVCM4P10_CAVLCRunBeforeTables
+ .hidden armVCM4P10_ZigZag_2x2
+ .hidden armVCM4P10_ZigZag_4x4
+
.global armVCM4P10_DecodeCoeffsToPair
.func armVCM4P10_DecodeCoeffsToPair
armVCM4P10_DecodeCoeffsToPair:
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DequantTables_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DequantTables_s.S
index 44eb428..5bc7875 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DequantTables_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DequantTables_s.S
@@ -20,6 +20,14 @@
.global armVCM4P10_QPModuloTable
.global armVCM4P10_VMatrixU16
+ .hidden armVCM4P10_QPDivTable
+ .hidden armVCM4P10_VMatrixQPModTable
+ .hidden armVCM4P10_PosToVCol4x4
+ .hidden armVCM4P10_PosToVCol2x2
+ .hidden armVCM4P10_VMatrix
+ .hidden armVCM4P10_QPModuloTable
+ .hidden armVCM4P10_VMatrixU16
+
armVCM4P10_PosToVCol4x4:
.byte 0, 2, 0, 2
.byte 2, 1, 2, 1
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/omxtypes.h b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/omxtypes.h
index 8b295a6..912cb0d 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/omxtypes.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/omxtypes.h
@@ -32,6 +32,7 @@
#define _OMXTYPES_H_
#include <limits.h>
+#include <stdint.h>
#define OMX_IN
#define OMX_OUT
@@ -75,64 +76,22 @@ typedef enum {
/* OMX_U8 */
-#if UCHAR_MAX == 0xff
-typedef unsigned char OMX_U8;
-#elif USHRT_MAX == 0xff
-typedef unsigned short int OMX_U8;
-#else
-#error OMX_U8 undefined
-#endif
-
+typedef uint8_t OMX_U8;
/* OMX_S8 */
-#if SCHAR_MAX == 0x7f
-typedef signed char OMX_S8;
-#elif SHRT_MAX == 0x7f
-typedef signed short int OMX_S8;
-#else
-#error OMX_S8 undefined
-#endif
-
+typedef int8_t OMX_S8;
/* OMX_U16 */
-#if USHRT_MAX == 0xffff
-typedef unsigned short int OMX_U16;
-#elif UINT_MAX == 0xffff
-typedef unsigned int OMX_U16;
-#else
-#error OMX_U16 undefined
-#endif
-
+typedef uint16_t OMX_U16;
/* OMX_S16 */
-#if SHRT_MAX == 0x7fff
-typedef signed short int OMX_S16;
-#elif INT_MAX == 0x7fff
-typedef signed int OMX_S16;
-#else
-#error OMX_S16 undefined
-#endif
-
+typedef int16_t OMX_S16;
/* OMX_U32 */
-#if UINT_MAX == 0xffffffff
-typedef unsigned int OMX_U32;
-#elif LONG_MAX == 0xffffffff
-typedef unsigned long int OMX_U32;
-#else
-#error OMX_U32 undefined
-#endif
-
+typedef uint32_t OMX_U32;
/* OMX_S32 */
-#if INT_MAX == 0x7fffffff
-typedef signed int OMX_S32;
-#elif LONG_MAX == 0x7fffffff
-typedef long signed int OMX_S32;
-#else
-#error OMX_S32 undefined
-#endif
-
+typedef int32_t OMX_S32;
/* OMX_U64 & OMX_S64 */
#if defined( _WIN32 ) || defined ( _WIN64 )
@@ -143,15 +102,14 @@ typedef long signed int OMX_S32;
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFi64)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFi64)
#else
- typedef long long OMX_S64; /** Signed 64-bit integer */
- typedef unsigned long long OMX_U64; /** Unsigned 64-bit integer */
+ typedef int64_t OMX_S64; /** Signed 64-bit integer */
+ typedef uint64_t OMX_U64; /** Unsigned 64-bit integer */
#define OMX_MIN_S64 (0x8000000000000000LL)
#define OMX_MIN_U64 (0x0000000000000000LL)
#define OMX_MAX_S64 (0x7FFFFFFFFFFFFFFFLL)
#define OMX_MAX_U64 (0xFFFFFFFFFFFFFFFFLL)
#endif
-
/* OMX_SC8 */
typedef struct
{
diff --git a/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c b/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
index 2bb4c4d..524a3f0 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/H264SwDecApi.c
@@ -42,6 +42,8 @@
#include "h264bsd_decoder.h"
#include "h264bsd_util.h"
+#define UNUSED(x) (void)(x)
+
/*------------------------------------------------------------------------------
Version Information
------------------------------------------------------------------------------*/
@@ -73,6 +75,7 @@ H264DEC_EVALUATION Compile evaluation version, restricts number of frames
#endif
void H264SwDecTrace(char *string) {
+ UNUSED(string);
}
void* H264SwDecMalloc(u32 size) {
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_conceal.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_conceal.c
index 493fb9e..7a262ed 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_conceal.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_conceal.c
@@ -267,7 +267,7 @@ u32 ConcealMb(mbStorage_t *pMb, image_t *currImage, u32 row, u32 col,
i32 firstPhase[16];
i32 *pTmp;
/* neighbours above, below, left and right */
- i32 a[4], b[4], l[4], r[4];
+ i32 a[4] = { 0,0,0,0 }, b[4], l[4] = { 0,0,0,0 }, r[4];
u32 A, B, L, R;
#ifdef H264DEC_OMXDL
u8 fillBuff[32*21 + 15 + 32];
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_intra_prediction.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_intra_prediction.c
index 15eabfb..52c85e5 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_intra_prediction.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_intra_prediction.c
@@ -1110,7 +1110,7 @@ void Intra16x16PlanePrediction(u8 *data, u8 *above, u8 *left)
/* Variables */
- u32 i, j;
+ i32 i, j;
i32 a, b, c;
i32 tmp;
@@ -1123,20 +1123,20 @@ void Intra16x16PlanePrediction(u8 *data, u8 *above, u8 *left)
a = 16 * (above[15] + left[15]);
for (i = 0, b = 0; i < 8; i++)
- b += ((i32)i + 1) * (above[8+i] - above[6-i]);
+ b += (i + 1) * (above[8+i] - above[6-i]);
b = (5 * b + 32) >> 6;
for (i = 0, c = 0; i < 7; i++)
- c += ((i32)i + 1) * (left[8+i] - left[6-i]);
+ c += (i + 1) * (left[8+i] - left[6-i]);
/* p[-1,-1] has to be accessed through above pointer */
- c += ((i32)i + 1) * (left[8+i] - above[-1]);
+ c += (i + 1) * (left[8+i] - above[-1]);
c = (5 * c + 32) >> 6;
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
- tmp = (a + b * ((i32)j - 7) + c * ((i32)i - 7) + 16) >> 5;
+ tmp = (a + b * (j - 7) + c * (i - 7) + 16) >> 5;
data[i*16+j] = (u8)CLIP1(tmp);
}
}
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
index c948776..b409a06 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_reconstruct.c
@@ -42,6 +42,8 @@
#include "armVC.h"
#endif /* H264DEC_OMXDL */
+#define UNUSED(x) (void)(x)
+
/*------------------------------------------------------------------------------
2. External compiler flags
--------------------------------------------------------------------------------
@@ -2136,7 +2138,8 @@ static void FillRow1(
i32 center,
i32 right)
{
-
+ UNUSED(left);
+ UNUSED(right);
ASSERT(ref);
ASSERT(fill);
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
index a7c6f64..23401c6 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_slice_header.c
@@ -47,6 +47,8 @@
#include "h264bsd_nal_unit.h"
#include "h264bsd_dpb.h"
+#define UNUSED(x) (void)(x)
+
/*------------------------------------------------------------------------------
2. External compiler flags
--------------------------------------------------------------------------------
@@ -1407,6 +1409,7 @@ u32 h264bsdCheckPriorPicsFlag(u32 * noOutputOfPriorPicsFlag,
u32 tmp, value, i;
i32 ivalue;
strmData_t tmpStrmData[1];
+ UNUSED(nalUnitType);
/* Code */
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c
index cc838fd..fb97a28 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.c
@@ -186,7 +186,7 @@ u32 h264bsdMoreRbspData(strmData_t *pStrmData)
return(HANTRO_FALSE);
if ( (bits > 8) ||
- ((h264bsdShowBits32(pStrmData)>>(32-bits)) != (1 << (bits-1))) )
+ ((h264bsdShowBits32(pStrmData)>>(32-bits)) != (1ul << (bits-1))) )
return(HANTRO_TRUE);
else
return(HANTRO_FALSE);
diff --git a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.h b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.h
index cb3adda..216ad04 100755
--- a/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.h
+++ b/media/libstagefright/codecs/on2/h264dec/source/h264bsd_util.h
@@ -42,6 +42,7 @@
#include <stdio.h>
#endif
+#include <stdint.h>
#include "basetype.h"
#include "h264bsd_stream.h"
#include "h264bsd_image.h"
@@ -150,7 +151,7 @@
}
#define ALIGN(ptr, bytePos) \
- (ptr + ( ((bytePos - (int)ptr) & (bytePos - 1)) / sizeof(*ptr) ))
+ (ptr + ( ((bytePos - (uintptr_t)ptr) & (bytePos - 1)) / sizeof(*ptr) ))
extern const u32 h264bsdQpC[52];
diff --git a/media/libstagefright/codecs/opus/Android.mk b/media/libstagefright/codecs/opus/Android.mk
new file mode 100644
index 0000000..365b179
--- /dev/null
+++ b/media/libstagefright/codecs/opus/Android.mk
@@ -0,0 +1,4 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+include $(call all-makefiles-under,$(LOCAL_PATH)) \ No newline at end of file
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
new file mode 100644
index 0000000..2379c5f
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -0,0 +1,19 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ SoftOpus.cpp
+
+LOCAL_C_INCLUDES := \
+ external/libopus/include \
+ frameworks/av/media/libstagefright/include \
+ frameworks/native/include/media/openmax \
+
+LOCAL_SHARED_LIBRARIES := \
+ libopus libstagefright libstagefright_omx \
+ libstagefright_foundation libutils liblog
+
+LOCAL_MODULE := libstagefright_soft_opusdec
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY) \ No newline at end of file
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
new file mode 100644
index 0000000..b8084ae
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftOpus"
+#include <utils/Log.h>
+
+#include "SoftOpus.h"
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+extern "C" {
+ #include <opus.h>
+ #include <opus_multistream.h>
+}
+
+namespace android {
+
+static const int kRate = 48000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+SoftOpus::SoftOpus(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mInputBufferCount(0),
+ mDecoder(NULL),
+ mHeader(NULL),
+ mCodecDelay(0),
+ mSeekPreRoll(0),
+ mAnchorTimeUs(0),
+ mNumFramesOutput(0),
+ mOutputPortSettingsChange(NONE) {
+ initPorts();
+ CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftOpus::~SoftOpus() {
+ if (mDecoder != NULL) {
+ opus_multistream_decoder_destroy(mDecoder);
+ mDecoder = NULL;
+ }
+ if (mHeader != NULL) {
+ delete mHeader;
+ mHeader = NULL;
+ }
+}
+
+void SoftOpus::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 960 * 6;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType =
+ const_cast<char *>(MEDIA_MIMETYPE_AUDIO_OPUS);
+
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding =
+ (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidOPUS;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = kMaxNumSamplesPerBuffer * sizeof(int16_t);
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+}
+
+status_t SoftOpus::initDecoder() {
+ return OK;
+}
+
+OMX_ERRORTYPE SoftOpus::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+ switch ((int)index) {
+ case OMX_IndexParamAudioAndroidOpus:
+ {
+ OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
+ (OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+
+ if (opusParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ opusParams->nAudioBandWidth = 0;
+ opusParams->nSampleRate = kRate;
+ opusParams->nBitRate = 0;
+
+ if (!isConfigured()) {
+ opusParams->nChannels = 1;
+ } else {
+ opusParams->nChannels = mHeader->channels;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+ pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+ pcmParams->nSamplingRate = kRate;
+
+ if (!isConfigured()) {
+ pcmParams->nChannels = 1;
+ } else {
+ pcmParams->nChannels = mHeader->channels;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftOpus::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ switch ((int)index) {
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_decoder.opus",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAndroidOpus:
+ {
+ const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
+ (const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+
+ if (opusParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+bool SoftOpus::isConfigured() const {
+ return mInputBufferCount >= 1;
+}
+
+static uint16_t ReadLE16(const uint8_t *data, size_t data_size,
+ uint32_t read_offset) {
+ if (read_offset + 1 > data_size)
+ return 0;
+ uint16_t val;
+ val = data[read_offset];
+ val |= data[read_offset + 1] << 8;
+ return val;
+}
+
+// Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
+// mappings for up to 8 channels. This information is part of the Vorbis I
+// Specification:
+// http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+static const int kMaxChannels = 8;
+
+// Maximum packet size used in Xiph's opusdec.
+static const int kMaxOpusOutputPacketSizeSamples = 960 * 6;
+
+// Default audio output channel layout. Used to initialize |stream_map| in
+// OpusHeader, and passed to opus_multistream_decoder_create() when the header
+// does not contain mapping information. The values are valid only for mono and
+// stereo output: Opus streams with more than 2 channels require a stream map.
+static const int kMaxChannelsWithDefaultLayout = 2;
+static const uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = { 0, 1 };
+
+// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
+static bool ParseOpusHeader(const uint8_t *data, size_t data_size,
+ OpusHeader* header) {
+ // Size of the Opus header excluding optional mapping information.
+ const size_t kOpusHeaderSize = 19;
+
+ // Offset to the channel count byte in the Opus header.
+ const size_t kOpusHeaderChannelsOffset = 9;
+
+ // Offset to the pre-skip value in the Opus header.
+ const size_t kOpusHeaderSkipSamplesOffset = 10;
+
+ // Offset to the gain value in the Opus header.
+ const size_t kOpusHeaderGainOffset = 16;
+
+ // Offset to the channel mapping byte in the Opus header.
+ const size_t kOpusHeaderChannelMappingOffset = 18;
+
+ // Opus Header contains a stream map. The mapping values are in the header
+ // beyond the always present |kOpusHeaderSize| bytes of data. The mapping
+ // data contains stream count, coupling information, and per channel mapping
+ // values:
+ // - Byte 0: Number of streams.
+ // - Byte 1: Number coupled.
+ // - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
+ // values.
+ const size_t kOpusHeaderNumStreamsOffset = kOpusHeaderSize;
+ const size_t kOpusHeaderNumCoupledOffset = kOpusHeaderNumStreamsOffset + 1;
+ const size_t kOpusHeaderStreamMapOffset = kOpusHeaderNumStreamsOffset + 2;
+
+ if (data_size < kOpusHeaderSize) {
+ ALOGV("Header size is too small.");
+ return false;
+ }
+ header->channels = *(data + kOpusHeaderChannelsOffset);
+
+ if (header->channels <= 0 || header->channels > kMaxChannels) {
+ ALOGV("Invalid Header, wrong channel count: %d", header->channels);
+ return false;
+ }
+ header->skip_samples = ReadLE16(data, data_size,
+ kOpusHeaderSkipSamplesOffset);
+ header->gain_db = static_cast<int16_t>(
+ ReadLE16(data, data_size,
+ kOpusHeaderGainOffset));
+ header->channel_mapping = *(data + kOpusHeaderChannelMappingOffset);
+ if (!header->channel_mapping) {
+ if (header->channels > kMaxChannelsWithDefaultLayout) {
+ ALOGV("Invalid Header, missing stream map.");
+ return false;
+ }
+ header->num_streams = 1;
+ header->num_coupled = header->channels > 1;
+ header->stream_map[0] = 0;
+ header->stream_map[1] = 1;
+ return true;
+ }
+ if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
+ ALOGV("Invalid stream map; insufficient data for current channel "
+ "count: %d", header->channels);
+ return false;
+ }
+ header->num_streams = *(data + kOpusHeaderNumStreamsOffset);
+ header->num_coupled = *(data + kOpusHeaderNumCoupledOffset);
+ if (header->num_streams + header->num_coupled != header->channels) {
+ ALOGV("Inconsistent channel mapping.");
+ return false;
+ }
+ for (int i = 0; i < header->channels; ++i)
+ header->stream_map[i] = *(data + kOpusHeaderStreamMapOffset + i);
+ return true;
+}
+
+// Convert nanoseconds to number of samples.
+static uint64_t ns_to_samples(uint64_t ns, int kRate) {
+ return static_cast<double>(ns) * kRate / 1000000000;
+}
+
+void SoftOpus::onQueueFilled(OMX_U32 portIndex) {
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ if (mOutputPortSettingsChange != NONE) {
+ return;
+ }
+
+ if (portIndex == 0 && mInputBufferCount < 3) {
+ BufferInfo *info = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *header = info->mHeader;
+
+ const uint8_t *data = header->pBuffer + header->nOffset;
+ size_t size = header->nFilledLen;
+
+ if (mInputBufferCount == 0) {
+ CHECK(mHeader == NULL);
+ mHeader = new OpusHeader();
+ memset(mHeader, 0, sizeof(*mHeader));
+ if (!ParseOpusHeader(data, size, mHeader)) {
+ ALOGV("Parsing Opus Header failed.");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ uint8_t channel_mapping[kMaxChannels] = {0};
+ memcpy(&channel_mapping,
+ kDefaultOpusChannelLayout,
+ kMaxChannelsWithDefaultLayout);
+
+ int status = OPUS_INVALID_STATE;
+ mDecoder = opus_multistream_decoder_create(kRate,
+ mHeader->channels,
+ mHeader->num_streams,
+ mHeader->num_coupled,
+ channel_mapping,
+ &status);
+ if (!mDecoder || status != OPUS_OK) {
+ ALOGV("opus_multistream_decoder_create failed status=%s",
+ opus_strerror(status));
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ status =
+ opus_multistream_decoder_ctl(mDecoder,
+ OPUS_SET_GAIN(mHeader->gain_db));
+ if (status != OPUS_OK) {
+ ALOGV("Failed to set OPUS header gain; status=%s",
+ opus_strerror(status));
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ } else if (mInputBufferCount == 1) {
+ mCodecDelay = ns_to_samples(
+ *(reinterpret_cast<int64_t*>(header->pBuffer +
+ header->nOffset)),
+ kRate);
+ mSamplesToDiscard = mCodecDelay;
+ } else {
+ mSeekPreRoll = ns_to_samples(
+ *(reinterpret_cast<int64_t*>(header->pBuffer +
+ header->nOffset)),
+ kRate);
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ }
+
+ inQueue.erase(inQueue.begin());
+ info->mOwnedByUs = false;
+ notifyEmptyBufferDone(header);
+ ++mInputBufferCount;
+ return;
+ }
+
+ while (!inQueue.empty() && !outQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+
+ outQueue.erase(outQueue.begin());
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+ return;
+ }
+
+ if (inHeader->nOffset == 0) {
+ mAnchorTimeUs = inHeader->nTimeStamp;
+ mNumFramesOutput = 0;
+ }
+
+ // When seeking to zero, |mCodecDelay| samples has to be discarded
+ // instead of |mSeekPreRoll| samples (as we would when seeking to any
+ // other timestamp).
+ if (inHeader->nTimeStamp == 0) {
+ mSamplesToDiscard = mCodecDelay;
+ }
+
+ const uint8_t *data = inHeader->pBuffer + inHeader->nOffset;
+ const uint32_t size = inHeader->nFilledLen;
+
+ int numFrames = opus_multistream_decode(mDecoder,
+ data,
+ size,
+ (int16_t *)outHeader->pBuffer,
+ kMaxOpusOutputPacketSizeSamples,
+ 0);
+ if (numFrames < 0) {
+ ALOGE("opus_multistream_decode returned %d", numFrames);
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ outHeader->nOffset = 0;
+ if (mSamplesToDiscard > 0) {
+ if (mSamplesToDiscard > numFrames) {
+ mSamplesToDiscard -= numFrames;
+ numFrames = 0;
+ } else {
+ numFrames -= mSamplesToDiscard;
+ outHeader->nOffset = mSamplesToDiscard * sizeof(int16_t) *
+ mHeader->channels;
+ mSamplesToDiscard = 0;
+ }
+ }
+
+ outHeader->nFilledLen = numFrames * sizeof(int16_t) * mHeader->channels;
+ outHeader->nFlags = 0;
+
+ outHeader->nTimeStamp = mAnchorTimeUs +
+ (mNumFramesOutput * 1000000ll) /
+ kRate;
+
+ mNumFramesOutput += numFrames;
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+
+ ++mInputBufferCount;
+ }
+}
+
+void SoftOpus::onPortFlushCompleted(OMX_U32 portIndex) {
+ if (portIndex == 0 && mDecoder != NULL) {
+ // Make sure that the next buffer output does not still
+ // depend on fragments from the last one decoded.
+ mNumFramesOutput = 0;
+ opus_multistream_decoder_ctl(mDecoder, OPUS_RESET_STATE);
+ mAnchorTimeUs = 0;
+ mSamplesToDiscard = mSeekPreRoll;
+ }
+}
+
+void SoftOpus::onReset() {
+ mInputBufferCount = 0;
+ mNumFramesOutput = 0;
+ if (mDecoder != NULL) {
+ opus_multistream_decoder_destroy(mDecoder);
+ mDecoder = NULL;
+ }
+ if (mHeader != NULL) {
+ delete mHeader;
+ mHeader = NULL;
+ }
+
+ mOutputPortSettingsChange = NONE;
+}
+
+void SoftOpus::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+ if (portIndex != 1) {
+ return;
+ }
+
+ switch (mOutputPortSettingsChange) {
+ case NONE:
+ break;
+
+ case AWAITING_DISABLED:
+ {
+ CHECK(!enabled);
+ mOutputPortSettingsChange = AWAITING_ENABLED;
+ break;
+ }
+
+ default:
+ {
+ CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+ CHECK(enabled);
+ mOutputPortSettingsChange = NONE;
+ break;
+ }
+ }
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ return new android::SoftOpus(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.h b/media/libstagefright/codecs/opus/dec/SoftOpus.h
new file mode 100644
index 0000000..97f6561
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef SOFT_OPUS_H_
+
+#define SOFT_OPUS_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+struct OpusMSDecoder;
+
+namespace android {
+
+struct OpusHeader {
+ int channels;
+ int skip_samples;
+ int channel_mapping;
+ int num_streams;
+ int num_coupled;
+ int16_t gain_db;
+ uint8_t stream_map[8];
+};
+
+struct SoftOpus : public SimpleSoftOMXComponent {
+ SoftOpus(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftOpus();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+ virtual void onPortFlushCompleted(OMX_U32 portIndex);
+ virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+ virtual void onReset();
+
+private:
+ enum {
+ kNumBuffers = 4,
+ kMaxNumSamplesPerBuffer = 960 * 6
+ };
+
+ size_t mInputBufferCount;
+
+ OpusMSDecoder *mDecoder;
+ OpusHeader *mHeader;
+
+ int64_t mCodecDelay;
+ int64_t mSeekPreRoll;
+ int64_t mSamplesToDiscard;
+ int64_t mAnchorTimeUs;
+ int64_t mNumFramesOutput;
+
+ enum {
+ NONE,
+ AWAITING_DISABLED,
+ AWAITING_ENABLED
+ } mOutputPortSettingsChange;
+
+ void initPorts();
+ status_t initDecoder();
+ bool isConfigured() const;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftOpus);
+};
+
+} // namespace android
+
+#endif // SOFT_OPUS_H_
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
index fe90a03..87080e7 100644
--- a/media/libstagefright/codecs/raw/Android.mk
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -8,6 +8,8 @@ LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright/include \
frameworks/native/include/media/openmax
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 19d6f13..9d514a6 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -163,7 +163,7 @@ OMX_ERRORTYPE SoftRaw::internalSetParameter(
}
}
-void SoftRaw::onQueueFilled(OMX_U32 portIndex) {
+void SoftRaw::onQueueFilled(OMX_U32 /* portIndex */) {
if (mSignalledError) {
return;
}
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 2232353..217a6d2 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -16,4 +16,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := libstagefright_soft_vorbisdec
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 51bb958..8f356b6 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -54,6 +54,8 @@ SoftVorbis::SoftVorbis(
mAnchorTimeUs(0),
mNumFramesOutput(0),
mNumFramesLeftOnPage(-1),
+ mSawInputEos(false),
+ mSignalledOutputEos(false),
mOutputPortSettingsChange(NONE) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
@@ -290,48 +292,47 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) {
return;
}
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) {
+ BufferInfo *inInfo = NULL;
+ OMX_BUFFERHEADERTYPE *inHeader = NULL;
+ if (!inQueue.empty()) {
+ inInfo = *inQueue.begin();
+ inHeader = inInfo->mHeader;
+ }
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
+ int32_t numPageSamples = 0;
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ if (inHeader) {
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ mSawInputEos = true;
+ }
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
- return;
- }
+ if (inHeader->nFilledLen || !mSawInputEos) {
+ CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+ memcpy(&numPageSamples,
+ inHeader->pBuffer
+ + inHeader->nOffset + inHeader->nFilledLen - 4,
+ sizeof(numPageSamples));
- int32_t numPageSamples;
- CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
- memcpy(&numPageSamples,
- inHeader->pBuffer
- + inHeader->nOffset + inHeader->nFilledLen - 4,
- sizeof(numPageSamples));
+ if (inHeader->nOffset == 0) {
+ mAnchorTimeUs = inHeader->nTimeStamp;
+ mNumFramesOutput = 0;
+ }
- if (numPageSamples >= 0) {
- mNumFramesLeftOnPage = numPageSamples;
+ inHeader->nFilledLen -= sizeof(numPageSamples);;
+ }
}
- if (inHeader->nOffset == 0) {
- mAnchorTimeUs = inHeader->nTimeStamp;
- mNumFramesOutput = 0;
+ if (numPageSamples >= 0) {
+ mNumFramesLeftOnPage = numPageSamples;
}
- inHeader->nFilledLen -= sizeof(numPageSamples);;
-
ogg_buffer buf;
- buf.data = inHeader->pBuffer + inHeader->nOffset;
- buf.size = inHeader->nFilledLen;
+ buf.data = inHeader ? inHeader->pBuffer + inHeader->nOffset : NULL;
+ buf.size = inHeader ? inHeader->nFilledLen : 0;
buf.refcount = 1;
buf.ptr.owner = NULL;
@@ -351,9 +352,15 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) {
int numFrames = 0;
+ outHeader->nFlags = 0;
int err = vorbis_dsp_synthesis(mState, &pack, 1);
if (err != 0) {
+ // FIXME temporary workaround for log spam
+#if !defined(__arm__) && !defined(__aarch64__)
+ ALOGV("vorbis_dsp_synthesis returned %d", err);
+#else
ALOGW("vorbis_dsp_synthesis returned %d", err);
+#endif
} else {
numFrames = vorbis_dsp_pcmout(
mState, (int16_t *)outHeader->pBuffer,
@@ -370,13 +377,16 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) {
ALOGV("discarding %d frames at end of page",
numFrames - mNumFramesLeftOnPage);
numFrames = mNumFramesLeftOnPage;
+ if (mSawInputEos) {
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ mSignalledOutputEos = true;
+ }
}
mNumFramesLeftOnPage -= numFrames;
}
outHeader->nFilledLen = numFrames * sizeof(int16_t) * mVi->channels;
outHeader->nOffset = 0;
- outHeader->nFlags = 0;
outHeader->nTimeStamp =
mAnchorTimeUs
@@ -384,11 +394,13 @@ void SoftVorbis::onQueueFilled(OMX_U32 portIndex) {
mNumFramesOutput += numFrames;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (inHeader) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
@@ -425,6 +437,8 @@ void SoftVorbis::onReset() {
mVi = NULL;
}
+ mSawInputEos = false;
+ mSignalledOutputEos = false;
mOutputPortSettingsChange = NONE;
}
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index cb628a0..1d00816 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -59,6 +59,8 @@ private:
int64_t mAnchorTimeUs;
int64_t mNumFramesOutput;
int32_t mNumFramesLeftOnPage;
+ bool mSawInputEos;
+ bool mSignalledOutputEos;
enum {
NONE,
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 77f21b7..1899b40 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -21,7 +21,7 @@
#include <cutils/properties.h> // for property_get
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <system/window.h>
#include <ui/GraphicBufferMapper.h>
#include <gui/IGraphicBufferProducer.h>
@@ -33,34 +33,71 @@ static bool runningInEmulator() {
return (property_get("ro.kernel.qemu", prop, NULL) > 0);
}
-SoftwareRenderer::SoftwareRenderer(
- const sp<ANativeWindow> &nativeWindow, const sp<MetaData> &meta)
- : mConverter(NULL),
+static int ALIGN(int x, int y) {
+ // y must be a power of 2.
+ return (x + y - 1) & ~(y - 1);
+}
+
+SoftwareRenderer::SoftwareRenderer(const sp<ANativeWindow> &nativeWindow)
+ : mColorFormat(OMX_COLOR_FormatUnused),
+ mConverter(NULL),
mYUVMode(None),
- mNativeWindow(nativeWindow) {
- int32_t tmp;
- CHECK(meta->findInt32(kKeyColorFormat, &tmp));
- mColorFormat = (OMX_COLOR_FORMATTYPE)tmp;
-
- CHECK(meta->findInt32(kKeyWidth, &mWidth));
- CHECK(meta->findInt32(kKeyHeight, &mHeight));
-
- if (!meta->findRect(
- kKeyCropRect,
- &mCropLeft, &mCropTop, &mCropRight, &mCropBottom)) {
- mCropLeft = mCropTop = 0;
- mCropRight = mWidth - 1;
- mCropBottom = mHeight - 1;
+ mNativeWindow(nativeWindow),
+ mWidth(0),
+ mHeight(0),
+ mCropLeft(0),
+ mCropTop(0),
+ mCropRight(0),
+ mCropBottom(0),
+ mCropWidth(0),
+ mCropHeight(0) {
+}
+
+SoftwareRenderer::~SoftwareRenderer() {
+ delete mConverter;
+ mConverter = NULL;
+}
+
+void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) {
+ CHECK(format != NULL);
+
+ int32_t colorFormatNew;
+ CHECK(format->findInt32("color-format", &colorFormatNew));
+
+ int32_t widthNew, heightNew;
+ CHECK(format->findInt32("stride", &widthNew));
+ CHECK(format->findInt32("slice-height", &heightNew));
+
+ int32_t cropLeftNew, cropTopNew, cropRightNew, cropBottomNew;
+ if (!format->findRect(
+ "crop", &cropLeftNew, &cropTopNew, &cropRightNew, &cropBottomNew)) {
+ cropLeftNew = cropTopNew = 0;
+ cropRightNew = widthNew - 1;
+ cropBottomNew = heightNew - 1;
}
+ if (static_cast<int32_t>(mColorFormat) == colorFormatNew &&
+ mWidth == widthNew &&
+ mHeight == heightNew &&
+ mCropLeft == cropLeftNew &&
+ mCropTop == cropTopNew &&
+ mCropRight == cropRightNew &&
+ mCropBottom == cropBottomNew) {
+ // Nothing changed, no need to reset renderer.
+ return;
+ }
+
+ mColorFormat = static_cast<OMX_COLOR_FORMATTYPE>(colorFormatNew);
+ mWidth = widthNew;
+ mHeight = heightNew;
+ mCropLeft = cropLeftNew;
+ mCropTop = cropTopNew;
+ mCropRight = cropRightNew;
+ mCropBottom = cropBottomNew;
+
mCropWidth = mCropRight - mCropLeft + 1;
mCropHeight = mCropBottom - mCropTop + 1;
- int32_t rotationDegrees;
- if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
- rotationDegrees = 0;
- }
-
int halFormat;
size_t bufWidth, bufHeight;
@@ -106,12 +143,29 @@ SoftwareRenderer::SoftwareRenderer(
NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW));
// Width must be multiple of 32???
- CHECK_EQ(0, native_window_set_buffers_geometry(
+ CHECK_EQ(0, native_window_set_buffers_dimensions(
mNativeWindow.get(),
bufWidth,
- bufHeight,
+ bufHeight));
+ CHECK_EQ(0, native_window_set_buffers_format(
+ mNativeWindow.get(),
halFormat));
+ // NOTE: native window uses extended right-bottom coordinate
+ android_native_rect_t crop;
+ crop.left = mCropLeft;
+ crop.top = mCropTop;
+ crop.right = mCropRight + 1;
+ crop.bottom = mCropBottom + 1;
+ ALOGV("setting crop: [%d, %d, %d, %d] for size [%zu, %zu]",
+ crop.left, crop.top, crop.right, crop.bottom, bufWidth, bufHeight);
+
+ CHECK_EQ(0, native_window_set_crop(mNativeWindow.get(), &crop));
+
+ int32_t rotationDegrees;
+ if (!format->findInt32("rotation-degrees", &rotationDegrees)) {
+ rotationDegrees = 0;
+ }
uint32_t transform;
switch (rotationDegrees) {
case 0: transform = 0; break;
@@ -121,24 +175,15 @@ SoftwareRenderer::SoftwareRenderer(
default: transform = 0; break;
}
- if (transform) {
- CHECK_EQ(0, native_window_set_buffers_transform(
- mNativeWindow.get(), transform));
- }
-}
-
-SoftwareRenderer::~SoftwareRenderer() {
- delete mConverter;
- mConverter = NULL;
-}
-
-static int ALIGN(int x, int y) {
- // y must be a power of 2.
- return (x + y - 1) & ~(y - 1);
+ CHECK_EQ(0, native_window_set_buffers_transform(
+ mNativeWindow.get(), transform));
}
void SoftwareRenderer::render(
- const void *data, size_t size, void *platformPrivate) {
+ const void *data, size_t /*size*/, int64_t timestampNs,
+ void* /*platformPrivate*/, const sp<AMessage>& format) {
+ resetFormatIfChanged(format);
+
ANativeWindowBuffer *buf;
int err;
if ((err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(),
@@ -230,6 +275,11 @@ void SoftwareRenderer::render(
CHECK_EQ(0, mapper.unlock(buf->handle));
+ if ((err = native_window_set_buffers_timestamp(mNativeWindow.get(),
+ timestampNs)) != 0) {
+ ALOGW("Surface::set_buffers_timestamp returned error %d", err);
+ }
+
if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf,
-1)) != 0) {
ALOGW("Surface::queueBuffer returned error %d", err);
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
new file mode 100644
index 0000000..f599004
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="OMX.google.mp3.decoder" type="audio/mpeg">
+ <Limit name="channel-count" max="2" />
+ <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
+ <Limit name="bitrate" range="8000-320000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.amrnb.decoder" type="audio/3gpp">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="4750-12200" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.amrwb.decoder" type="audio/amr-wb">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="16000" />
+ <Limit name="bitrate" range="6600-23850" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.aac.decoder" type="audio/mp4a-latm">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
+ <Limit name="bitrate" range="8000-960000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.g711.alaw.decoder" type="audio/g711-alaw">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.g711.mlaw.decoder" type="audio/g711-mlaw">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vorbis.decoder" type="audio/vorbis">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000,96000" />
+ <Limit name="bitrate" range="32000-500000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.opus.decoder" type="audio/opus">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="48000" />
+ <Limit name="bitrate" range="6000-510000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.raw.decoder" type="audio/raw">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="bitrate" range="1-10000000" />
+ </MediaCodec>
+ </Decoders>
+ <Encoders>
+ <MediaCodec name="OMX.google.aac.encoder" type="audio/mp4a-latm">
+ <Limit name="channel-count" max="6" />
+ <Limit name="sample-rate" ranges="11025,12000,16000,22050,24000,32000,44100,48000" />
+ <Limit name="bitrate" range="8000-960000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.amrnb.encoder" type="audio/3gpp">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="4750-12200" />
+ <Feature name="bitrate-modes" value="CBR" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.amrwb.encoder" type="audio/amr-wb">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="16000" />
+ <Limit name="bitrate" range="6600-23850" />
+ <Feature name="bitrate-modes" value="CBR" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.flac.encoder" type="audio/flac">
+ <Limit name="channel-count" max="2" />
+ <Limit name="sample-rate" ranges="1-655350" />
+ <Limit name="bitrate" range="1-21000000" />
+ <Limit name="complexity" range="0-8" default="5" />
+ <Feature name="bitrate-modes" value="CQ" />
+ </MediaCodec>
+ </Encoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_telephony.xml b/media/libstagefright/data/media_codecs_google_telephony.xml
new file mode 100644
index 0000000..5ad90d9
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_telephony.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="OMX.google.gsm.decoder" type="audio/gsm">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="13000" />
+ </MediaCodec>
+ </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
new file mode 100644
index 0000000..1cbef39
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-983040" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-139264" />
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ </Decoders>
+
+ <Encoders>
+ <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="16x16" max="176x144" />
+ <Limit name="alignment" value="16x16" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level2 -->
+ <Limit name="size" min="16x16" max="896x896" />
+ <Limit name="alignment" value="16x16" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-11880" />
+ <Limit name="bitrate" range="1-2000000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="16x16" max="176x144" />
+ <Limit name="alignment" value="16x16" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
+ </Encoders>
+</Included>
diff --git a/media/libstagefright/foundation/ABitReader.cpp b/media/libstagefright/foundation/ABitReader.cpp
index 5499c32..beb5cc0 100644
--- a/media/libstagefright/foundation/ABitReader.cpp
+++ b/media/libstagefright/foundation/ABitReader.cpp
@@ -27,6 +27,9 @@ ABitReader::ABitReader(const uint8_t *data, size_t size)
mNumBitsLeft(0) {
}
+ABitReader::~ABitReader() {
+}
+
void ABitReader::fillReservoir() {
CHECK_GT(mSize, 0u);
@@ -99,4 +102,69 @@ const uint8_t *ABitReader::data() const {
return mData - (mNumBitsLeft + 7) / 8;
}
+NALBitReader::NALBitReader(const uint8_t *data, size_t size)
+ : ABitReader(data, size),
+ mNumZeros(0) {
+}
+
+bool NALBitReader::atLeastNumBitsLeft(size_t n) const {
+ // check against raw size and reservoir bits first
+ size_t numBits = numBitsLeft();
+ if (n > numBits) {
+ return false;
+ }
+
+ ssize_t numBitsRemaining = n - mNumBitsLeft;
+
+ size_t size = mSize;
+ const uint8_t *data = mData;
+ int32_t numZeros = mNumZeros;
+ while (size > 0 && numBitsRemaining > 0) {
+ bool isEmulationPreventionByte = (numZeros >= 2 && *data == 3);
+
+ if (*data == 0) {
+ ++numZeros;
+ } else {
+ numZeros = 0;
+ }
+
+ if (!isEmulationPreventionByte) {
+ numBitsRemaining -= 8;
+ }
+
+ ++data;
+ --size;
+ }
+
+ return (numBitsRemaining <= 0);
+}
+
+void NALBitReader::fillReservoir() {
+ CHECK_GT(mSize, 0u);
+
+ mReservoir = 0;
+ size_t i = 0;
+ while (mSize > 0 && i < 4) {
+ bool isEmulationPreventionByte = (mNumZeros >= 2 && *mData == 3);
+
+ if (*mData == 0) {
+ ++mNumZeros;
+ } else {
+ mNumZeros = 0;
+ }
+
+ // skip emulation_prevention_three_byte
+ if (!isEmulationPreventionByte) {
+ mReservoir = (mReservoir << 8) | *mData;
+ ++i;
+ }
+
+ ++mData;
+ --mSize;
+ }
+
+ mNumBitsLeft = 8 * i;
+ mReservoir <<= 32 - mNumBitsLeft;
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index 6173db4..b214870 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -19,11 +19,13 @@
#include "ADebug.h"
#include "ALooper.h"
#include "AMessage.h"
+#include "MediaBufferBase.h"
namespace android {
ABuffer::ABuffer(size_t capacity)
- : mData(malloc(capacity)),
+ : mMediaBufferBase(NULL),
+ mData(malloc(capacity)),
mCapacity(capacity),
mRangeOffset(0),
mRangeLength(capacity),
@@ -32,7 +34,8 @@ ABuffer::ABuffer(size_t capacity)
}
ABuffer::ABuffer(void *data, size_t capacity)
- : mData(data),
+ : mMediaBufferBase(NULL),
+ mData(data),
mCapacity(capacity),
mRangeOffset(0),
mRangeLength(capacity),
@@ -40,6 +43,14 @@ ABuffer::ABuffer(void *data, size_t capacity)
mOwnsData(false) {
}
+// static
+sp<ABuffer> ABuffer::CreateAsCopy(const void *data, size_t capacity)
+{
+ sp<ABuffer> res = new ABuffer(capacity);
+ memcpy(res->data(), data, capacity);
+ return res;
+}
+
ABuffer::~ABuffer() {
if (mOwnsData) {
if (mData != NULL) {
@@ -51,6 +62,8 @@ ABuffer::~ABuffer() {
if (mFarewell != NULL) {
mFarewell->post();
}
+
+ setMediaBufferBase(NULL);
}
void ABuffer::setRange(size_t offset, size_t size) {
@@ -72,5 +85,19 @@ sp<AMessage> ABuffer::meta() {
return mMeta;
}
+MediaBufferBase *ABuffer::getMediaBufferBase() {
+ if (mMediaBufferBase != NULL) {
+ mMediaBufferBase->add_ref();
+ }
+ return mMediaBufferBase;
+}
+
+void ABuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) {
+ if (mMediaBufferBase != NULL) {
+ mMediaBufferBase->release();
+ }
+ mMediaBufferBase = mediaBuffer;
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
index f7a00d8..5f7c70d 100644
--- a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
+++ b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
@@ -51,7 +51,7 @@ AHierarchicalStateMachine::AHierarchicalStateMachine() {
AHierarchicalStateMachine::~AHierarchicalStateMachine() {
}
-void AHierarchicalStateMachine::onMessageReceived(const sp<AMessage> &msg) {
+void AHierarchicalStateMachine::handleMessage(const sp<AMessage> &msg) {
sp<AState> save = mState;
sp<AState> cur = mState;
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index ebf9d8d..88b1c92 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -68,14 +68,14 @@ int64_t ALooper::GetNowUs() {
ALooper::ALooper()
: mRunningLocally(false) {
+ // clean up stale AHandlers. Doing it here instead of in the destructor avoids
+ // the side effect of objects being deleted from the unregister function recursively.
+ gLooperRoster.unregisterStaleHandlers();
}
ALooper::~ALooper() {
stop();
-
- // Since this looper is "dead" (or as good as dead by now),
- // have ALooperRoster unregister any handlers still registered for it.
- gLooperRoster.unregisterStaleHandlers();
+ // stale AHandlers are now cleaned up in the constructor of the next ALooper to come along
}
void ALooper::setName(const char *name) {
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 0c181ff..e0dc768 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -72,50 +72,40 @@ void ALooperRoster::unregisterHandler(ALooper::handler_id handlerID) {
}
void ALooperRoster::unregisterStaleHandlers() {
- Mutex::Autolock autoLock(mLock);
- for (size_t i = mHandlers.size(); i-- > 0;) {
- const HandlerInfo &info = mHandlers.valueAt(i);
+ Vector<sp<ALooper> > activeLoopers;
+ {
+ Mutex::Autolock autoLock(mLock);
- sp<ALooper> looper = info.mLooper.promote();
- if (looper == NULL) {
- ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
- mHandlers.removeItemsAt(i);
+ for (size_t i = mHandlers.size(); i-- > 0;) {
+ const HandlerInfo &info = mHandlers.valueAt(i);
+
+ sp<ALooper> looper = info.mLooper.promote();
+ if (looper == NULL) {
+ ALOGV("Unregistering stale handler %d", mHandlers.keyAt(i));
+ mHandlers.removeItemsAt(i);
+ } else {
+ // At this point 'looper' might be the only sp<> keeping
+ // the object alive. To prevent it from going out of scope
+ // and having ~ALooper call this method again recursively
+ // and then deadlocking because of the Autolock above, add
+ // it to a Vector which will go out of scope after the lock
+ // has been released.
+ activeLoopers.add(looper);
+ }
}
}
}
status_t ALooperRoster::postMessage(
const sp<AMessage> &msg, int64_t delayUs) {
- Mutex::Autolock autoLock(mLock);
- return postMessage_l(msg, delayUs);
-}
-
-status_t ALooperRoster::postMessage_l(
- const sp<AMessage> &msg, int64_t delayUs) {
- ssize_t index = mHandlers.indexOfKey(msg->target());
- if (index < 0) {
- ALOGW("failed to post message '%s'. Target handler not registered.",
- msg->debugString().c_str());
- return -ENOENT;
- }
-
- const HandlerInfo &info = mHandlers.valueAt(index);
-
- sp<ALooper> looper = info.mLooper.promote();
+ sp<ALooper> looper = findLooper(msg->target());
if (looper == NULL) {
- ALOGW("failed to post message. "
- "Target handler %d still registered, but object gone.",
- msg->target());
-
- mHandlers.removeItemsAt(index);
return -ENOENT;
}
-
looper->post(msg, delayUs);
-
return OK;
}
@@ -169,18 +159,23 @@ sp<ALooper> ALooperRoster::findLooper(ALooper::handler_id handlerID) {
status_t ALooperRoster::postAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response) {
+ sp<ALooper> looper = findLooper(msg->target());
+
+ if (looper == NULL) {
+ ALOGW("failed to post message. "
+ "Target handler %d still registered, but object gone.",
+ msg->target());
+ response->clear();
+ return -ENOENT;
+ }
+
Mutex::Autolock autoLock(mLock);
uint32_t replyID = mNextReplyID++;
msg->setInt32("replyID", replyID);
- status_t err = postMessage_l(msg, 0 /* delayUs */);
-
- if (err != OK) {
- response->clear();
- return err;
- }
+ looper->post(msg, 0 /* delayUs */);
ssize_t index;
while ((index = mReplies.indexOfKey(replyID)) < 0) {
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index dc42f91..795e8a6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -14,6 +14,11 @@
* limitations under the License.
*/
+#define LOG_TAG "AMessage"
+//#define LOG_NDEBUG 0
+//#define DUMP_STATS
+#include <cutils/log.h>
+
#include "AMessage.h"
#include <ctype.h>
@@ -60,12 +65,14 @@ ALooper::handler_id AMessage::target() const {
void AMessage::clear() {
for (size_t i = 0; i < mNumItems; ++i) {
Item *item = &mItems[i];
- freeItem(item);
+ delete[] item->mName;
+ item->mName = NULL;
+ freeItemValue(item);
}
mNumItems = 0;
}
-void AMessage::freeItem(Item *item) {
+void AMessage::freeItemValue(Item *item) {
switch (item->mType) {
case kTypeString:
{
@@ -88,25 +95,85 @@ void AMessage::freeItem(Item *item) {
}
}
-AMessage::Item *AMessage::allocateItem(const char *name) {
- name = AAtomizer::Atomize(name);
+#ifdef DUMP_STATS
+#include <utils/Mutex.h>
+
+Mutex gLock;
+static int32_t gFindItemCalls = 1;
+static int32_t gDupCalls = 1;
+static int32_t gAverageNumItems = 0;
+static int32_t gAverageNumChecks = 0;
+static int32_t gAverageNumMemChecks = 0;
+static int32_t gAverageDupItems = 0;
+static int32_t gLastChecked = -1;
+
+static void reportStats() {
+ int32_t time = (ALooper::GetNowUs() / 1000);
+ if (time / 1000 != gLastChecked / 1000) {
+ gLastChecked = time;
+ ALOGI("called findItemIx %zu times (for len=%.1f i=%.1f/%.1f mem) dup %zu times (for len=%.1f)",
+ gFindItemCalls,
+ gAverageNumItems / (float)gFindItemCalls,
+ gAverageNumChecks / (float)gFindItemCalls,
+ gAverageNumMemChecks / (float)gFindItemCalls,
+ gDupCalls,
+ gAverageDupItems / (float)gDupCalls);
+ gFindItemCalls = gDupCalls = 1;
+ gAverageNumItems = gAverageNumChecks = gAverageNumMemChecks = gAverageDupItems = 0;
+ gLastChecked = time;
+ }
+}
+#endif
+inline size_t AMessage::findItemIndex(const char *name, size_t len) const {
+#ifdef DUMP_STATS
+ size_t memchecks = 0;
+#endif
size_t i = 0;
- while (i < mNumItems && mItems[i].mName != name) {
- ++i;
+ for (; i < mNumItems; i++) {
+ if (len != mItems[i].mNameLength) {
+ continue;
+ }
+#ifdef DUMP_STATS
+ ++memchecks;
+#endif
+ if (!memcmp(mItems[i].mName, name, len)) {
+ break;
+ }
}
+#ifdef DUMP_STATS
+ {
+ Mutex::Autolock _l(gLock);
+ ++gFindItemCalls;
+ gAverageNumItems += mNumItems;
+ gAverageNumMemChecks += memchecks;
+ gAverageNumChecks += i;
+ reportStats();
+ }
+#endif
+ return i;
+}
+
+// assumes item's name was uninitialized or NULL
+void AMessage::Item::setName(const char *name, size_t len) {
+ mNameLength = len;
+ mName = new char[len + 1];
+ memcpy((void*)mName, name, len + 1);
+}
+AMessage::Item *AMessage::allocateItem(const char *name) {
+ size_t len = strlen(name);
+ size_t i = findItemIndex(name, len);
Item *item;
if (i < mNumItems) {
item = &mItems[i];
- freeItem(item);
+ freeItemValue(item);
} else {
CHECK(mNumItems < kMaxNumItems);
i = mNumItems++;
item = &mItems[i];
-
- item->mName = name;
+ item->setName(name, len);
}
return item;
@@ -114,19 +181,20 @@ AMessage::Item *AMessage::allocateItem(const char *name) {
const AMessage::Item *AMessage::findItem(
const char *name, Type type) const {
- name = AAtomizer::Atomize(name);
-
- for (size_t i = 0; i < mNumItems; ++i) {
+ size_t i = findItemIndex(name, strlen(name));
+ if (i < mNumItems) {
const Item *item = &mItems[i];
+ return item->mType == type ? item : NULL;
- if (item->mName == name) {
- return item->mType == type ? item : NULL;
- }
}
-
return NULL;
}
+bool AMessage::contains(const char *name) const {
+ size_t i = findItemIndex(name, strlen(name));
+ return i < mNumItems;
+}
+
#define BASIC_TYPE(NAME,FIELDNAME,TYPENAME) \
void AMessage::set##NAME(const char *name, TYPENAME value) { \
Item *item = allocateItem(name); \
@@ -160,6 +228,11 @@ void AMessage::setString(
item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len);
}
+void AMessage::setString(
+ const char *name, const AString &s) {
+ setString(name, s.c_str(), s.size());
+}
+
void AMessage::setObjectInternal(
const char *name, const sp<RefBase> &obj, Type type) {
Item *item = allocateItem(name);
@@ -278,11 +351,20 @@ sp<AMessage> AMessage::dup() const {
sp<AMessage> msg = new AMessage(mWhat, mTarget);
msg->mNumItems = mNumItems;
+#ifdef DUMP_STATS
+ {
+ Mutex::Autolock _l(gLock);
+ ++gDupCalls;
+ gAverageDupItems += mNumItems;
+ reportStats();
+ }
+#endif
+
for (size_t i = 0; i < mNumItems; ++i) {
const Item *from = &mItems[i];
Item *to = &msg->mItems[i];
- to->mName = from->mName;
+ to->setName(from->mName, from->mNameLength);
to->mType = from->mType;
switch (from->mType) {
@@ -403,7 +485,7 @@ AString AMessage::debugString(int32_t indent) const {
{
sp<ABuffer> buffer = static_cast<ABuffer *>(item.u.refValue);
- if (buffer != NULL && buffer->size() <= 64) {
+ if (buffer != NULL && buffer->data() != NULL && buffer->size() <= 64) {
tmp = StringPrintf("Buffer %s = {\n", item.mName);
hexdump(buffer->data(), buffer->size(), indent + 4, &tmp);
appendIndent(&tmp, indent + 2);
@@ -453,11 +535,11 @@ sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
sp<AMessage> msg = new AMessage(what);
msg->mNumItems = static_cast<size_t>(parcel.readInt32());
-
for (size_t i = 0; i < msg->mNumItems; ++i) {
Item *item = &msg->mItems[i];
- item->mName = AAtomizer::Atomize(parcel.readCString());
+ const char *name = parcel.readCString();
+ item->setName(name, strlen(name));
item->mType = static_cast<Type>(parcel.readInt32());
switch (item->mType) {
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index e629588..4504c2b 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -521,7 +521,7 @@ status_t ANetworkSession::Session::readMore() {
return err;
}
-void ANetworkSession::Session::dumpFragmentStats(const Fragment &frag) {
+void ANetworkSession::Session::dumpFragmentStats(const Fragment & /* frag */) {
#if 0
int64_t nowUs = ALooper::GetNowUs();
int64_t delayMs = (nowUs - frag.mTimeUs) / 1000ll;
@@ -579,7 +579,7 @@ status_t ANetworkSession::Session::writeMore() {
if (err == -EAGAIN) {
if (!mOutFragments.empty()) {
- ALOGI("%d datagrams remain queued.", mOutFragments.size());
+ ALOGI("%zu datagrams remain queued.", mOutFragments.size());
}
err = OK;
}
@@ -623,7 +623,7 @@ status_t ANetworkSession::Session::writeMore() {
CHECK_EQ(mState, CONNECTED);
CHECK(!mOutFragments.empty());
- ssize_t n;
+ ssize_t n = -1;
while (!mOutFragments.empty()) {
const Fragment &frag = *mOutFragments.begin();
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index dee786d..9835ca3 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -20,6 +20,8 @@
#include <stdlib.h>
#include <string.h>
+#include <binder/Parcel.h>
+#include <utils/String8.h>
#include "ADebug.h"
#include "AString.h"
@@ -48,6 +50,13 @@ AString::AString(const char *s, size_t size)
setTo(s, size);
}
+AString::AString(const String8 &from)
+ : mData(NULL),
+ mSize(0),
+ mAllocSize(1) {
+ setTo(from.string(), from.length());
+}
+
AString::AString(const AString &from)
: mData(NULL),
mSize(0),
@@ -189,64 +198,64 @@ void AString::append(const AString &from, size_t offset, size_t n) {
void AString::append(int x) {
char s[16];
- sprintf(s, "%d", x);
-
+ int result = snprintf(s, sizeof(s), "%d", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(unsigned x) {
char s[16];
- sprintf(s, "%u", x);
-
+ int result = snprintf(s, sizeof(s), "%u", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(long x) {
- char s[16];
- sprintf(s, "%ld", x);
-
+ char s[32];
+ int result = snprintf(s, sizeof(s), "%ld", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(unsigned long x) {
- char s[16];
- sprintf(s, "%lu", x);
-
+ char s[32];
+ int result = snprintf(s, sizeof(s), "%lu", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(long long x) {
char s[32];
- sprintf(s, "%lld", x);
-
+ int result = snprintf(s, sizeof(s), "%lld", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(unsigned long long x) {
char s[32];
- sprintf(s, "%llu", x);
-
+ int result = snprintf(s, sizeof(s), "%llu", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(float x) {
char s[16];
- sprintf(s, "%f", x);
-
+ int result = snprintf(s, sizeof(s), "%f", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(double x) {
char s[16];
- sprintf(s, "%f", x);
-
+ int result = snprintf(s, sizeof(s), "%f", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
void AString::append(void *x) {
- char s[16];
- sprintf(s, "%p", x);
-
+ char s[32];
+ int result = snprintf(s, sizeof(s), "%p", x);
+ CHECK((result > 0) && ((size_t) result) < sizeof(s));
append(s);
}
@@ -298,6 +307,14 @@ int AString::compare(const AString &other) const {
return strcmp(mData, other.mData);
}
+int AString::compareIgnoreCase(const AString &other) const {
+ return strcasecmp(mData, other.mData);
+}
+
+bool AString::equalsIgnoreCase(const AString &other) const {
+ return compareIgnoreCase(other) == 0;
+}
+
void AString::tolower() {
makeMutable();
@@ -320,6 +337,35 @@ bool AString::endsWith(const char *suffix) const {
return !strcmp(mData + mSize - suffixLen, suffix);
}
+bool AString::startsWithIgnoreCase(const char *prefix) const {
+ return !strncasecmp(mData, prefix, strlen(prefix));
+}
+
+bool AString::endsWithIgnoreCase(const char *suffix) const {
+ size_t suffixLen = strlen(suffix);
+
+ if (mSize < suffixLen) {
+ return false;
+ }
+
+ return !strcasecmp(mData + mSize - suffixLen, suffix);
+}
+
+// static
+AString AString::FromParcel(const Parcel &parcel) {
+ size_t size = static_cast<size_t>(parcel.readInt32());
+ return AString(static_cast<const char *>(parcel.readInplace(size)), size);
+}
+
+status_t AString::writeToParcel(Parcel *parcel) const {
+ CHECK_LE(mSize, static_cast<size_t>(INT32_MAX));
+ status_t err = parcel->writeInt32(mSize);
+ if (err == OK) {
+ err = parcel->write(mData, mSize);
+ }
+ return err;
+}
+
AString StringPrintf(const char *format, ...) {
va_list ap;
va_start(ap, format);
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index ad2dab5..90a6a23 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -24,7 +24,7 @@ LOCAL_SHARED_LIBRARIES := \
libutils \
liblog
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror
LOCAL_MODULE:= libstagefright_foundation
diff --git a/media/libstagefright/foundation/base64.cpp b/media/libstagefright/foundation/base64.cpp
index d5fb4e0..dcf5bef 100644
--- a/media/libstagefright/foundation/base64.cpp
+++ b/media/libstagefright/foundation/base64.cpp
@@ -33,6 +33,10 @@ sp<ABuffer> decodeBase64(const AString &s) {
if (n >= 2 && s.c_str()[n - 2] == '=') {
padding = 2;
+
+ if (n >= 3 && s.c_str()[n - 3] == '=') {
+ padding = 3;
+ }
}
}
@@ -71,7 +75,7 @@ sp<ABuffer> decodeBase64(const AString &s) {
if (((i + 1) % 4) == 0) {
out[j++] = (accum >> 16);
- if (j < outLen) { out[j++] = (accum >> 8) & 0xff; }
+ if (j < outLen) { out[j++] = (accum >> 8) & 0xff; }
if (j < outLen) { out[j++] = accum & 0xff; }
accum = 0;
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
new file mode 100644
index 0000000..7f3307d
--- /dev/null
+++ b/media/libstagefright/http/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH:= $(call my-dir)
+
+ifneq ($(TARGET_BUILD_PDK), true)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ HTTPHelper.cpp \
+
+LOCAL_C_INCLUDES:= \
+ $(TOP)/frameworks/av/media/libstagefright \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/base/core/jni \
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright liblog libutils libbinder libstagefright_foundation \
+ libandroid_runtime \
+ libmedia
+
+LOCAL_MODULE:= libstagefright_http_support
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_CFLAGS += -Werror
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/media/libstagefright/http/HTTPHelper.cpp b/media/libstagefright/http/HTTPHelper.cpp
new file mode 100644
index 0000000..77845e2
--- /dev/null
+++ b/media/libstagefright/http/HTTPHelper.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HTTPHelper"
+#include <utils/Log.h>
+
+#include "HTTPHelper.h"
+
+#include "android_runtime/AndroidRuntime.h"
+#include "android_util_Binder.h"
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <nativehelper/ScopedLocalRef.h>
+#include "jni.h"
+
+namespace android {
+
+sp<IMediaHTTPService> CreateHTTPServiceInCurrentJavaContext() {
+ if (AndroidRuntime::getJavaVM() == NULL) {
+ ALOGE("CreateHTTPServiceInCurrentJavaContext called outside "
+ "JAVA environment.");
+ return NULL;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ ScopedLocalRef<jclass> clazz(
+ env, env->FindClass("android/media/MediaHTTPService"));
+ CHECK(clazz.get() != NULL);
+
+ jmethodID constructID = env->GetMethodID(clazz.get(), "<init>", "()V");
+ CHECK(constructID != NULL);
+
+ ScopedLocalRef<jobject> httpServiceObj(
+ env, env->NewObject(clazz.get(), constructID));
+
+ sp<IMediaHTTPService> httpService;
+ if (httpServiceObj.get() != NULL) {
+ jmethodID asBinderID =
+ env->GetMethodID(clazz.get(), "asBinder", "()Landroid/os/IBinder;");
+ CHECK(asBinderID != NULL);
+
+ ScopedLocalRef<jobject> httpServiceBinderObj(
+ env, env->CallObjectMethod(httpServiceObj.get(), asBinderID));
+ CHECK(httpServiceBinderObj.get() != NULL);
+
+ sp<IBinder> binder =
+ ibinderForJavaObject(env, httpServiceBinderObj.get());
+
+ httpService = interface_cast<IMediaHTTPService>(binder);
+ }
+
+ return httpService;
+}
+
+} // namespace android
diff --git a/media/libstagefright/chromium_http/chromium_http_stub.cpp b/media/libstagefright/http/HTTPHelper.h
index 289f6de..8aef115 100644
--- a/media/libstagefright/chromium_http/chromium_http_stub.cpp
+++ b/media/libstagefright/http/HTTPHelper.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,25 +14,18 @@
* limitations under the License.
*/
-#include <dlfcn.h>
+#ifndef HTTP_HELPER_H_
-#include <include/chromium_http_stub.h>
-#include <include/ChromiumHTTPDataSource.h>
-#include <include/DataUriSource.h>
+#define HTTP_HELPER_H_
+
+#include <utils/RefBase.h>
namespace android {
-HTTPBase *createChromiumHTTPDataSource(uint32_t flags) {
- return new ChromiumHTTPDataSource(flags);
-}
+struct IMediaHTTPService;
-status_t UpdateChromiumHTTPDataSourceProxyConfig(
- const char *host, int32_t port, const char *exclusionList) {
- return ChromiumHTTPDataSource::UpdateProxyConfig(host, port, exclusionList);
-}
+sp<IMediaHTTPService> CreateHTTPServiceInCurrentJavaContext();
-DataSource *createDataUriSource(const char *uri) {
- return new DataUriSource(uri);
-}
+} // namespace android
-}
+#endif // HTTP_HELPER_H_
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
new file mode 100644
index 0000000..2d29913
--- /dev/null
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaHTTP"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaHTTP.h>
+
+#include <binder/IServiceManager.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <media/IMediaHTTPConnection.h>
+
+namespace android {
+
+MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
+ : mInitCheck(NO_INIT),
+ mHTTPConnection(conn),
+ mCachedSizeValid(false),
+ mCachedSize(0ll),
+ mDrmManagerClient(NULL) {
+ mInitCheck = OK;
+}
+
+MediaHTTP::~MediaHTTP() {
+ clearDRMState_l();
+}
+
+status_t MediaHTTP::connect(
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ off64_t /* offset */) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ KeyedVector<String8, String8> extHeaders;
+ if (headers != NULL) {
+ extHeaders = *headers;
+ }
+ extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+
+ bool success = mHTTPConnection->connect(uri, &extHeaders);
+
+ mLastHeaders = extHeaders;
+ mLastURI = uri;
+
+ mCachedSizeValid = false;
+
+ return success ? OK : UNKNOWN_ERROR;
+}
+
+void MediaHTTP::disconnect() {
+ if (mInitCheck != OK) {
+ return;
+ }
+
+ mHTTPConnection->disconnect();
+}
+
+status_t MediaHTTP::initCheck() const {
+ return mInitCheck;
+}
+
+ssize_t MediaHTTP::readAt(off64_t offset, void *data, size_t size) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ int64_t startTimeUs = ALooper::GetNowUs();
+
+ size_t numBytesRead = 0;
+ while (numBytesRead < size) {
+ size_t copy = size - numBytesRead;
+
+ if (copy > 64 * 1024) {
+ // limit the buffer sizes transferred across binder boundaries
+ // to avoid spurious transaction failures.
+ copy = 64 * 1024;
+ }
+
+ ssize_t n = mHTTPConnection->readAt(
+ offset + numBytesRead, (uint8_t *)data + numBytesRead, copy);
+
+ if (n < 0) {
+ return n;
+ } else if (n == 0) {
+ break;
+ }
+
+ numBytesRead += n;
+ }
+
+ int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
+
+ addBandwidthMeasurement(numBytesRead, delayUs);
+
+ return numBytesRead;
+}
+
+status_t MediaHTTP::getSize(off64_t *size) {
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ // Caching the returned size so that it stays valid even after a
+ // disconnect. NuCachedSource2 relies on this.
+
+ if (!mCachedSizeValid) {
+ mCachedSize = mHTTPConnection->getSize();
+ mCachedSizeValid = true;
+ }
+
+ *size = mCachedSize;
+
+ return *size < 0 ? *size : OK;
+}
+
+uint32_t MediaHTTP::flags() {
+ return kWantsPrefetching | kIsHTTPBasedSource;
+}
+
+status_t MediaHTTP::reconnectAtOffset(off64_t offset) {
+ return connect(mLastURI.c_str(), &mLastHeaders, offset);
+}
+
+// DRM...
+
+sp<DecryptHandle> MediaHTTP::DrmInitialization(const char* mime) {
+ if (mDrmManagerClient == NULL) {
+ mDrmManagerClient = new DrmManagerClient();
+ }
+
+ if (mDrmManagerClient == NULL) {
+ return NULL;
+ }
+
+ if (mDecryptHandle == NULL) {
+ mDecryptHandle = mDrmManagerClient->openDecryptSession(
+ String8(mLastURI.c_str()), mime);
+ }
+
+ if (mDecryptHandle == NULL) {
+ delete mDrmManagerClient;
+ mDrmManagerClient = NULL;
+ }
+
+ return mDecryptHandle;
+}
+
+void MediaHTTP::getDrmInfo(
+ sp<DecryptHandle> &handle, DrmManagerClient **client) {
+ handle = mDecryptHandle;
+ *client = mDrmManagerClient;
+}
+
+String8 MediaHTTP::getUri() {
+ String8 uri;
+ if (OK == mHTTPConnection->getUri(&uri)) {
+ return uri;
+ }
+ return String8(mLastURI.c_str());
+}
+
+String8 MediaHTTP::getMIMEType() const {
+ if (mInitCheck != OK) {
+ return String8("application/octet-stream");
+ }
+
+ String8 mimeType;
+ status_t err = mHTTPConnection->getMIMEType(&mimeType);
+
+ if (err != OK) {
+ return String8("application/octet-stream");
+ }
+
+ return mimeType;
+}
+
+void MediaHTTP::clearDRMState_l() {
+ if (mDecryptHandle != NULL) {
+ // To release mDecryptHandle
+ CHECK(mDrmManagerClient);
+ mDrmManagerClient->closeDecryptSession(mDecryptHandle);
+ mDecryptHandle = NULL;
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index f3529f9..e8d558c 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -13,6 +13,8 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/openssl/include
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libbinder \
libcrypto \
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index e53e35a..fba6b09 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -27,6 +27,8 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include <cutils/properties.h>
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -34,52 +36,53 @@
#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <utils/Mutex.h>
#include <ctype.h>
+#include <inttypes.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
namespace android {
LiveSession::LiveSession(
- const sp<AMessage> &notify, uint32_t flags, bool uidValid, uid_t uid)
+ const sp<AMessage> &notify, uint32_t flags,
+ const sp<IMediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
- mUIDValid(uidValid),
- mUID(uid),
+ mHTTPService(httpService),
mInPreparationPhase(true),
- mHTTPDataSource(
- HTTPBase::Create(
- (mFlags & kFlagIncognito)
- ? HTTPBase::kFlagIncognito
- : 0)),
- mPrevBandwidthIndex(-1),
+ mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
+ mCurBandwidthIndex(-1),
mStreamMask(0),
mNewStreamMask(0),
mSwapMask(0),
mCheckBandwidthGeneration(0),
mSwitchGeneration(0),
+ mSubtitleGeneration(0),
mLastDequeuedTimeUs(0ll),
mRealTimeBaseUs(0ll),
mReconfigurationInProgress(false),
mSwitchInProgress(false),
mDisconnectReplyID(0),
- mSeekReplyID(0) {
- if (mUIDValid) {
- mHTTPDataSource->setUID(mUID);
- }
+ mSeekReplyID(0),
+ mFirstTimeUsValid(false),
+ mFirstTimeUs(0),
+ mLastSeekTimeUs(0) {
mStreams[kAudioIndex] = StreamItem("audio");
mStreams[kVideoIndex] = StreamItem("video");
mStreams[kSubtitleIndex] = StreamItem("subtitles");
for (size_t i = 0; i < kMaxStreams; ++i) {
+ mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
+ mBuffering[i] = false;
}
}
@@ -112,31 +115,90 @@ status_t LiveSession::dequeueAccessUnit(
return -EWOULDBLOCK;
}
+ status_t finalResult;
+ sp<AnotherPacketSource> discontinuityQueue = mDiscontinuities.valueFor(stream);
+ if (discontinuityQueue->hasBufferAvailable(&finalResult)) {
+ discontinuityQueue->dequeueAccessUnit(accessUnit);
+ // seeking, track switching
+ sp<AMessage> extra;
+ int64_t timeUs;
+ if ((*accessUnit)->meta()->findMessage("extra", &extra)
+ && extra != NULL
+ && extra->findInt64("timeUs", &timeUs)) {
+ // seeking only
+ mLastSeekTimeUs = timeUs;
+ mDiscontinuityOffsetTimesUs.clear();
+ mDiscontinuityAbsStartTimesUs.clear();
+ }
+ return INFO_DISCONTINUITY;
+ }
+
sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
- status_t finalResult;
+ ssize_t idx = typeToIndex(stream);
if (!packetSource->hasBufferAvailable(&finalResult)) {
+ if (finalResult == OK) {
+ mBuffering[idx] = true;
+ return -EAGAIN;
+ } else {
+ return finalResult;
+ }
+ }
+
+ if (mBuffering[idx]) {
+ if (mSwitchInProgress
+ || packetSource->isFinished(0)
+ || packetSource->getEstimatedDurationUs() > 10000000ll) {
+ mBuffering[idx] = false;
+ }
+ }
+
+ if (mBuffering[idx]) {
+ return -EAGAIN;
+ }
+
+ // wait for counterpart
+ sp<AnotherPacketSource> otherSource;
+ uint32_t mask = mNewStreamMask & mStreamMask;
+ uint32_t fetchersMask = 0;
+ for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+ uint32_t fetcherMask = mFetcherInfos.valueAt(i).mFetcher->getStreamTypeMask();
+ fetchersMask |= fetcherMask;
+ }
+ mask &= fetchersMask;
+ if (stream == STREAMTYPE_AUDIO && (mask & STREAMTYPE_VIDEO)) {
+ otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+ } else if (stream == STREAMTYPE_VIDEO && (mask & STREAMTYPE_AUDIO)) {
+ otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+ }
+ if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
return finalResult == OK ? -EAGAIN : finalResult;
}
status_t err = packetSource->dequeueAccessUnit(accessUnit);
+ size_t streamIdx;
const char *streamStr;
switch (stream) {
case STREAMTYPE_AUDIO:
+ streamIdx = kAudioIndex;
streamStr = "audio";
break;
case STREAMTYPE_VIDEO:
+ streamIdx = kVideoIndex;
streamStr = "video";
break;
case STREAMTYPE_SUBTITLES:
+ streamIdx = kSubtitleIndex;
streamStr = "subs";
break;
default:
TRESPASS();
}
+ StreamItem& strm = mStreams[streamIdx];
if (err == INFO_DISCONTINUITY) {
+ // adaptive streaming, discontinuities in the playlist
int32_t type;
CHECK((*accessUnit)->meta()->findInt32("discontinuity", &type));
@@ -151,10 +213,7 @@ status_t LiveSession::dequeueAccessUnit(
extra == NULL ? "NULL" : extra->debugString().c_str());
int32_t swap;
- if (type == ATSParser::DISCONTINUITY_FORMATCHANGE
- && (*accessUnit)->meta()->findInt32("swapPacketSource", &swap)
- && swap) {
-
+ if ((*accessUnit)->meta()->findInt32("swapPacketSource", &swap) && swap) {
int32_t switchGeneration;
CHECK((*accessUnit)->meta()->findInt32("switchGeneration", &switchGeneration));
{
@@ -167,16 +226,75 @@ status_t LiveSession::dequeueAccessUnit(
msg->post();
}
}
+ } else {
+ size_t seq = strm.mCurDiscontinuitySeq;
+ int64_t offsetTimeUs;
+ if (mDiscontinuityOffsetTimesUs.indexOfKey(seq) >= 0) {
+ offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(seq);
+ } else {
+ offsetTimeUs = 0;
+ }
+
+ seq += 1;
+ if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+ int64_t firstTimeUs;
+ firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+ offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
+ offsetTimeUs += strm.mLastSampleDurationUs;
+ } else {
+ offsetTimeUs += strm.mLastSampleDurationUs;
+ }
+
+ mDiscontinuityOffsetTimesUs.add(seq, offsetTimeUs);
}
} else if (err == OK) {
+
if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
int64_t timeUs;
+ int32_t discontinuitySeq = 0;
CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
- ALOGV("[%s] read buffer at time %lld us", streamStr, timeUs);
+ (*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
+ strm.mCurDiscontinuitySeq = discontinuitySeq;
+
+ int32_t discard = 0;
+ int64_t firstTimeUs;
+ if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+ int64_t durUs; // approximate sample duration
+ if (timeUs > strm.mLastDequeuedTimeUs) {
+ durUs = timeUs - strm.mLastDequeuedTimeUs;
+ } else {
+ durUs = strm.mLastDequeuedTimeUs - timeUs;
+ }
+ strm.mLastSampleDurationUs = durUs;
+ firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+ } else if ((*accessUnit)->meta()->findInt32("discard", &discard) && discard) {
+ firstTimeUs = timeUs;
+ } else {
+ mDiscontinuityAbsStartTimesUs.add(strm.mCurDiscontinuitySeq, timeUs);
+ firstTimeUs = timeUs;
+ }
+
+ strm.mLastDequeuedTimeUs = timeUs;
+ if (timeUs >= firstTimeUs) {
+ timeUs -= firstTimeUs;
+ } else {
+ timeUs = 0;
+ }
+ timeUs += mLastSeekTimeUs;
+ if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
+ timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
+ }
+ ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+ (*accessUnit)->meta()->setInt64("timeUs", timeUs);
mLastDequeuedTimeUs = timeUs;
mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
} else if (stream == STREAMTYPE_SUBTITLES) {
+ int32_t subtitleGeneration;
+ if ((*accessUnit)->meta()->findInt32("subtitleGeneration", &subtitleGeneration)
+ && subtitleGeneration != mSubtitleGeneration) {
+ return -EAGAIN;
+ };
(*accessUnit)->meta()->setInt32(
"trackIndex", mPlaylist->getSelectedIndex());
(*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
@@ -235,10 +353,6 @@ status_t LiveSession::seekTo(int64_t timeUs) {
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
- uint32_t replyID;
- CHECK(response == mSeekReply && 0 != mSeekReplyID);
- mSeekReply.clear();
- mSeekReplyID = 0;
return err;
}
@@ -264,12 +378,16 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
case kWhatSeek:
{
- CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
+ uint32_t seekReplyID;
+ CHECK(msg->senderAwaitsResponse(&seekReplyID));
+ mSeekReplyID = seekReplyID;
+ mSeekReply = new AMessage;
status_t err = onSeek(msg);
- mSeekReply = new AMessage;
- mSeekReply->setInt32("err", err);
+ if (err != OK) {
+ msg->post(50000);
+ }
break;
}
@@ -292,7 +410,9 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- tryToFinishBandwidthSwitch();
+ if (mSwitchInProgress) {
+ tryToFinishBandwidthSwitch();
+ }
}
if (mContinuation != NULL) {
@@ -302,7 +422,10 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
if (mSeekReplyID != 0) {
CHECK(mSeekReply != NULL);
+ mSeekReply->setInt32("err", OK);
mSeekReply->postReply(mSeekReplyID);
+ mSeekReplyID = 0;
+ mSeekReply.clear();
}
}
}
@@ -329,6 +452,23 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
ALOGE("XXX Received error %d from PlaylistFetcher.", err);
+ // handle EOS on subtitle tracks independently
+ AString uri;
+ if (err == ERROR_END_OF_STREAM && msg->findString("uri", &uri)) {
+ ssize_t i = mFetcherInfos.indexOfKey(uri);
+ if (i >= 0) {
+ const sp<PlaylistFetcher> &fetcher = mFetcherInfos.valueAt(i).mFetcher;
+ if (fetcher != NULL) {
+ uint32_t type = fetcher->getStreamTypeMask();
+ if (type == STREAMTYPE_SUBTITLES) {
+ mPacketSources.valueFor(
+ STREAMTYPE_SUBTITLES)->signalEOS(err);;
+ break;
+ }
+ }
+ }
+ }
+
if (mInPreparationPhase) {
postPrepared(err);
}
@@ -354,6 +494,10 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
AString uri;
CHECK(msg->findString("uri", &uri));
+ if (mFetcherInfos.indexOfKey(uri) < 0) {
+ ALOGE("couldn't find uri");
+ break;
+ }
FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
info->mIsPrepared = true;
@@ -410,7 +554,7 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
break;
}
- onCheckBandwidth();
+ onCheckBandwidth(msg);
break;
}
@@ -443,6 +587,19 @@ void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
onSwapped(msg);
break;
}
+
+ case kWhatCheckSwitchDown:
+ {
+ onCheckSwitchDown();
+ break;
+ }
+
+ case kWhatSwitchDown:
+ {
+ onSwitchDown();
+ break;
+ }
+
default:
TRESPASS();
break;
@@ -466,6 +623,21 @@ LiveSession::StreamType LiveSession::indexToType(int idx) {
return (StreamType)(1 << idx);
}
+// static
+ssize_t LiveSession::typeToIndex(int32_t type) {
+ switch (type) {
+ case STREAMTYPE_AUDIO:
+ return 0;
+ case STREAMTYPE_VIDEO:
+ return 1;
+ case STREAMTYPE_SUBTITLES:
+ return 2;
+ default:
+ return -1;
+ };
+ return -1;
+}
+
void LiveSession::onConnect(const sp<AMessage> &msg) {
AString url;
CHECK(msg->findString("url", &url));
@@ -480,11 +652,8 @@ void LiveSession::onConnect(const sp<AMessage> &msg) {
headers = NULL;
}
-#if 1
- ALOGI("onConnect <URL suppressed>");
-#else
- ALOGI("onConnect %s", url.c_str());
-#endif
+ // TODO currently we don't know if we are coming here from incognito mode
+ ALOGI("onConnect %s", uriDebugString(url).c_str());
mMasterURL = url;
@@ -492,7 +661,7 @@ void LiveSession::onConnect(const sp<AMessage> &msg) {
mPlaylist = fetchPlaylist(url.c_str(), NULL /* curPlaylistHash */, &dummy);
if (mPlaylist == NULL) {
- ALOGE("unable to fetch master playlist '%s'.", url.c_str());
+ ALOGE("unable to fetch master playlist %s.", uriDebugString(url).c_str());
postPrepared(ERROR_IO);
return;
@@ -544,8 +713,9 @@ void LiveSession::onConnect(const sp<AMessage> &msg) {
mBandwidthItems.push(item);
}
+ mPlaylist->pickRandomMediaItems();
changeConfiguration(
- 0ll /* timeUs */, initialBandwidthIndex, true /* pickTrack */);
+ 0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
}
void LiveSession::finishDisconnect() {
@@ -557,6 +727,9 @@ void LiveSession::finishDisconnect() {
// (finishDisconnect, onFinishDisconnect2)
cancelBandwidthSwitch();
+ // cancel switch down monitor
+ mSwitchDownMonitor.clear();
+
for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
mFetcherInfos.valueAt(i).mFetcher->stopAsync();
}
@@ -599,7 +772,7 @@ sp<PlaylistFetcher> LiveSession::addFetcher(const char *uri) {
notify->setInt32("switchGeneration", mSwitchGeneration);
FetcherInfo info;
- info.mFetcher = new PlaylistFetcher(notify, this, uri);
+ info.mFetcher = new PlaylistFetcher(notify, this, uri, mSubtitleGeneration);
info.mDurationUs = -1ll;
info.mIsPrepared = false;
info.mToBeRemoved = false;
@@ -679,7 +852,7 @@ ssize_t LiveSession::fetchFile(
ssize_t bytesRead = 0;
// adjust range_length if only reading partial block
- if (block_size > 0 && (range_length == -1 || buffer->size() + block_size < range_length)) {
+ if (block_size > 0 && (range_length == -1 || (int64_t)(buffer->size() + block_size) < range_length)) {
range_length = buffer->size() + block_size;
}
for (;;) {
@@ -688,7 +861,7 @@ ssize_t LiveSession::fetchFile(
if (bufferRemaining == 0 && getSizeErr != OK) {
bufferRemaining = 32768;
- ALOGV("increasing download buffer to %d bytes",
+ ALOGV("increasing download buffer to %zu bytes",
buffer->size() + bufferRemaining);
sp<ABuffer> copy = new ABuffer(buffer->size() + bufferRemaining);
@@ -701,7 +874,7 @@ ssize_t LiveSession::fetchFile(
size_t maxBytesToRead = bufferRemaining;
if (range_length >= 0) {
int64_t bytesLeftInRange = range_length - buffer->size();
- if (bytesLeftInRange < maxBytesToRead) {
+ if (bytesLeftInRange < (int64_t)maxBytesToRead) {
maxBytesToRead = bytesLeftInRange;
if (bytesLeftInRange == 0) {
@@ -769,9 +942,6 @@ sp<M3UParser> LiveSession::fetchPlaylist(
// playlist unchanged
*unchanged = true;
- ALOGV("Playlist unchanged, refresh state is now %d",
- (int)mRefreshState);
-
return NULL;
}
@@ -836,14 +1006,22 @@ size_t LiveSession::getBandwidthIndex() {
}
}
- // Consider only 80% of the available bandwidth usable.
- bandwidthBps = (bandwidthBps * 8) / 10;
-
// Pick the highest bandwidth stream below or equal to estimated bandwidth.
index = mBandwidthItems.size() - 1;
- while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
- > (size_t)bandwidthBps) {
+ while (index > 0) {
+ // consider only 80% of the available bandwidth, but if we are switching up,
+ // be even more conservative (70%) to avoid overestimating and immediately
+ // switching back.
+ size_t adjustedBandwidthBps = bandwidthBps;
+ if (index > mCurBandwidthIndex) {
+ adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10;
+ } else {
+ adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10;
+ }
+ if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) {
+ break;
+ }
--index;
}
}
@@ -856,20 +1034,20 @@ size_t LiveSession::getBandwidthIndex() {
// to lowest)
const size_t kMinIndex = 0;
- static ssize_t mPrevBandwidthIndex = -1;
+ static ssize_t mCurBandwidthIndex = -1;
size_t index;
- if (mPrevBandwidthIndex < 0) {
+ if (mCurBandwidthIndex < 0) {
index = kMinIndex;
} else if (uniformRand() < 0.5) {
- index = (size_t)mPrevBandwidthIndex;
+ index = (size_t)mCurBandwidthIndex;
} else {
- index = mPrevBandwidthIndex + 1;
+ index = mCurBandwidthIndex + 1;
if (index == mBandwidthItems.size()) {
index = kMinIndex;
}
}
- mPrevBandwidthIndex = index;
+ mCurBandwidthIndex = index;
#elif 0
// Pick the highest bandwidth stream below or equal to 1.2 Mbit/sec
@@ -900,15 +1078,34 @@ size_t LiveSession::getBandwidthIndex() {
return index;
}
+int64_t LiveSession::latestMediaSegmentStartTimeUs() {
+ sp<AMessage> audioMeta = mPacketSources.valueFor(STREAMTYPE_AUDIO)->getLatestDequeuedMeta();
+ int64_t minSegmentStartTimeUs = -1, videoSegmentStartTimeUs = -1;
+ if (audioMeta != NULL) {
+ audioMeta->findInt64("segmentStartTimeUs", &minSegmentStartTimeUs);
+ }
+
+ sp<AMessage> videoMeta = mPacketSources.valueFor(STREAMTYPE_VIDEO)->getLatestDequeuedMeta();
+ if (videoMeta != NULL
+ && videoMeta->findInt64("segmentStartTimeUs", &videoSegmentStartTimeUs)) {
+ if (minSegmentStartTimeUs < 0 || videoSegmentStartTimeUs < minSegmentStartTimeUs) {
+ minSegmentStartTimeUs = videoSegmentStartTimeUs;
+ }
+
+ }
+ return minSegmentStartTimeUs;
+}
+
status_t LiveSession::onSeek(const sp<AMessage> &msg) {
int64_t timeUs;
CHECK(msg->findInt64("timeUs", &timeUs));
if (!mReconfigurationInProgress) {
- changeConfiguration(timeUs, getBandwidthIndex());
+ changeConfiguration(timeUs, mCurBandwidthIndex);
+ return OK;
+ } else {
+ return -EWOULDBLOCK;
}
-
- return OK;
}
status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -935,14 +1132,34 @@ bool LiveSession::hasDynamicDuration() const {
return false;
}
-status_t LiveSession::getTrackInfo(Parcel *reply) const {
- return mPlaylist->getTrackInfo(reply);
+size_t LiveSession::getTrackCount() const {
+ if (mPlaylist == NULL) {
+ return 0;
+ } else {
+ return mPlaylist->getTrackCount();
+ }
+}
+
+sp<AMessage> LiveSession::getTrackInfo(size_t trackIndex) const {
+ if (mPlaylist == NULL) {
+ return NULL;
+ } else {
+ return mPlaylist->getTrackInfo(trackIndex);
+ }
}
status_t LiveSession::selectTrack(size_t index, bool select) {
+ if (mPlaylist == NULL) {
+ return INVALID_OPERATION;
+ }
+
+ ++mSubtitleGeneration;
status_t err = mPlaylist->selectTrack(index, select);
if (err == OK) {
- (new AMessage(kWhatChangeConfiguration, id()))->post();
+ sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id());
+ msg->setInt32("bandwidthIndex", mCurBandwidthIndex);
+ msg->setInt32("pickTrack", select);
+ msg->post();
}
return err;
}
@@ -969,15 +1186,11 @@ void LiveSession::changeConfiguration(
CHECK(!mReconfigurationInProgress);
mReconfigurationInProgress = true;
- mPrevBandwidthIndex = bandwidthIndex;
+ mCurBandwidthIndex = bandwidthIndex;
- ALOGV("changeConfiguration => timeUs:%lld us, bwIndex:%d, pickTrack:%d",
+ ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
timeUs, bandwidthIndex, pickTrack);
- if (pickTrack) {
- mPlaylist->pickRandomMediaItems();
- }
-
CHECK_LT(bandwidthIndex, mBandwidthItems.size());
const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
@@ -1000,14 +1213,15 @@ void LiveSession::changeConfiguration(
// If we're seeking all current fetchers are discarded.
if (timeUs < 0ll) {
- // delay fetcher removal
- discardFetcher = false;
+ // delay fetcher removal if not picking tracks
+ discardFetcher = pickTrack;
for (size_t j = 0; j < kMaxStreams; ++j) {
StreamType type = indexToType(j);
if ((streamMask & type) && uri == URIs[j]) {
resumeMask |= type;
streamMask &= ~type;
+ discardFetcher = false;
}
}
}
@@ -1021,16 +1235,17 @@ void LiveSession::changeConfiguration(
sp<AMessage> msg;
if (timeUs < 0ll) {
- // skip onChangeConfiguration2 (decoder destruction) if switching.
+ // skip onChangeConfiguration2 (decoder destruction) if not seeking.
msg = new AMessage(kWhatChangeConfiguration3, id());
} else {
msg = new AMessage(kWhatChangeConfiguration2, id());
}
msg->setInt32("streamMask", streamMask);
msg->setInt32("resumeMask", resumeMask);
+ msg->setInt32("pickTrack", pickTrack);
msg->setInt64("timeUs", timeUs);
for (size_t i = 0; i < kMaxStreams; ++i) {
- if (streamMask & indexToType(i)) {
+ if ((streamMask | resumeMask) & indexToType(i)) {
msg->setString(mStreams[i].uriKey().c_str(), URIs[i].c_str());
}
}
@@ -1047,14 +1262,20 @@ void LiveSession::changeConfiguration(
if (mSeekReplyID != 0) {
CHECK(mSeekReply != NULL);
+ mSeekReply->setInt32("err", OK);
mSeekReply->postReply(mSeekReplyID);
+ mSeekReplyID = 0;
+ mSeekReply.clear();
}
}
}
void LiveSession::onChangeConfiguration(const sp<AMessage> &msg) {
if (!mReconfigurationInProgress) {
- changeConfiguration(-1ll /* timeUs */, getBandwidthIndex());
+ int32_t pickTrack = 0, bandwidthIndex = mCurBandwidthIndex;
+ msg->findInt32("pickTrack", &pickTrack);
+ msg->findInt32("bandwidthIndex", &bandwidthIndex);
+ changeConfiguration(-1ll /* timeUs */, bandwidthIndex, pickTrack);
} else {
msg->post(1000000ll); // retry in 1 sec
}
@@ -1065,8 +1286,14 @@ void LiveSession::onChangeConfiguration2(const sp<AMessage> &msg) {
// All fetchers are either suspended or have been removed now.
- uint32_t streamMask;
+ uint32_t streamMask, resumeMask;
CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+ CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
+
+ // currently onChangeConfiguration2 is only called for seeking;
+ // remove the following CHECK if using it else where.
+ CHECK_EQ(resumeMask, 0);
+ streamMask |= resumeMask;
AString URIs[kMaxStreams];
for (size_t i = 0; i < kMaxStreams; ++i) {
@@ -1123,23 +1350,35 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
- for (size_t i = 0; i < kMaxStreams; ++i) {
- if (streamMask & indexToType(i)) {
- CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri));
- }
- }
-
int64_t timeUs;
+ int32_t pickTrack;
bool switching = false;
CHECK(msg->findInt64("timeUs", &timeUs));
+ CHECK(msg->findInt32("pickTrack", &pickTrack));
if (timeUs < 0ll) {
- timeUs = mLastDequeuedTimeUs;
- switching = true;
+ if (!pickTrack) {
+ switching = true;
+ }
+ mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
+ } else {
+ mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
}
- mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
- mNewStreamMask = streamMask;
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ if (streamMask & indexToType(i)) {
+ if (switching) {
+ CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mNewUri));
+ } else {
+ CHECK(msg->findString(mStreams[i].uriKey().c_str(), &mStreams[i].mUri));
+ }
+ }
+ }
+
+ mNewStreamMask = streamMask | resumeMask;
+ if (switching) {
+ mSwapMask = mStreamMask & ~resumeMask;
+ }
// Of all existing fetchers:
// * Resume fetchers that are still needed and assign them original packet sources.
@@ -1152,6 +1391,16 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
for (size_t j = 0; j < kMaxStreams; ++j) {
if ((resumeMask & indexToType(j)) && uri == mStreams[j].mUri) {
sources[j] = mPacketSources.valueFor(indexToType(j));
+
+ if (j != kSubtitleIndex) {
+ ALOGV("queueing dummy discontinuity for stream type %d", indexToType(j));
+ sp<AnotherPacketSource> discontinuityQueue;
+ discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+ discontinuityQueue->queueDiscontinuity(
+ ATSParser::DISCONTINUITY_NONE,
+ NULL,
+ true);
+ }
}
}
@@ -1179,43 +1428,86 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
}
AString uri;
- uri = mStreams[i].mUri;
+ uri = switching ? mStreams[i].mNewUri : mStreams[i].mUri;
sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str());
CHECK(fetcher != NULL);
int32_t latestSeq = -1;
- int64_t latestTimeUs = 0ll;
+ int64_t startTimeUs = -1;
+ int64_t segmentStartTimeUs = -1ll;
+ int32_t discontinuitySeq = -1;
sp<AnotherPacketSource> sources[kMaxStreams];
+ if (i == kSubtitleIndex) {
+ segmentStartTimeUs = latestMediaSegmentStartTimeUs();
+ }
+
// TRICKY: looping from i as earlier streams are already removed from streamMask
for (size_t j = i; j < kMaxStreams; ++j) {
- if ((streamMask & indexToType(j)) && uri == mStreams[j].mUri) {
+ const AString &streamUri = switching ? mStreams[j].mNewUri : mStreams[j].mUri;
+ if ((streamMask & indexToType(j)) && uri == streamUri) {
sources[j] = mPacketSources.valueFor(indexToType(j));
- if (!switching) {
+ if (timeUs >= 0) {
sources[j]->clear();
+ startTimeUs = timeUs;
+
+ sp<AnotherPacketSource> discontinuityQueue;
+ sp<AMessage> extra = new AMessage;
+ extra->setInt64("timeUs", timeUs);
+ discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+ discontinuityQueue->queueDiscontinuity(
+ ATSParser::DISCONTINUITY_SEEK, extra, true);
} else {
- int32_t type, seq;
- int64_t srcTimeUs;
- sp<AMessage> meta = sources[j]->getLatestMeta();
+ int32_t type;
+ int64_t srcSegmentStartTimeUs;
+ sp<AMessage> meta;
+ if (pickTrack) {
+ // selecting
+ meta = sources[j]->getLatestDequeuedMeta();
+ } else {
+ // adapting
+ meta = sources[j]->getLatestEnqueuedMeta();
+ }
if (meta != NULL && !meta->findInt32("discontinuity", &type)) {
- CHECK(meta->findInt32("seq", &seq));
- if (seq > latestSeq) {
- latestSeq = seq;
+ int64_t tmpUs;
+ CHECK(meta->findInt64("timeUs", &tmpUs));
+ if (startTimeUs < 0 || tmpUs < startTimeUs) {
+ startTimeUs = tmpUs;
}
- CHECK(meta->findInt64("timeUs", &srcTimeUs));
- if (srcTimeUs > latestTimeUs) {
- latestTimeUs = srcTimeUs;
+
+ CHECK(meta->findInt64("segmentStartTimeUs", &tmpUs));
+ if (segmentStartTimeUs < 0 || tmpUs < segmentStartTimeUs) {
+ segmentStartTimeUs = tmpUs;
+ }
+
+ int32_t seq;
+ CHECK(meta->findInt32("discontinuitySeq", &seq));
+ if (discontinuitySeq < 0 || seq < discontinuitySeq) {
+ discontinuitySeq = seq;
}
}
- sources[j] = mPacketSources2.valueFor(indexToType(j));
- sources[j]->clear();
- uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
- if (extraStreams & indexToType(j)) {
- sources[j]->queueAccessUnit(createFormatChangeBuffer(/* swap = */ false));
+ if (pickTrack) {
+ // selecting track, queue discontinuities before content
+ sources[j]->clear();
+ if (j == kSubtitleIndex) {
+ break;
+ }
+ sp<AnotherPacketSource> discontinuityQueue;
+ discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
+ discontinuityQueue->queueDiscontinuity(
+ ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+ } else {
+ // adapting, queue discontinuities after resume
+ sources[j] = mPacketSources2.valueFor(indexToType(j));
+ sources[j]->clear();
+ uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
+ if (extraStreams & indexToType(j)) {
+ sources[j]->queueAccessUnit(createFormatChangeBuffer(/*swap*/ false));
+ }
}
}
@@ -1227,14 +1519,16 @@ void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
sources[kAudioIndex],
sources[kVideoIndex],
sources[kSubtitleIndex],
- timeUs,
- latestTimeUs /* min start time(us) */,
- latestSeq >= 0 ? latestSeq + 1 : -1 /* starting sequence number hint */ );
+ startTimeUs < 0 ? mLastSeekTimeUs : startTimeUs,
+ segmentStartTimeUs,
+ discontinuitySeq,
+ switching);
}
// All fetchers have now been started, the configuration change
// has completed.
+ cancelCheckBandwidthEvent();
scheduleCheckBandwidthEvent();
ALOGV("XXX configuration change completed.");
@@ -1259,8 +1553,17 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) {
int32_t stream;
CHECK(msg->findInt32("stream", &stream));
- mSwapMask |= stream;
- if (mSwapMask != mStreamMask) {
+
+ ssize_t idx = typeToIndex(stream);
+ CHECK(idx >= 0);
+ if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
+ ALOGW("swapping stream type %d %s to empty stream", stream, mStreams[idx].mUri.c_str());
+ }
+ mStreams[idx].mUri = mStreams[idx].mNewUri;
+ mStreams[idx].mNewUri.clear();
+
+ mSwapMask &= ~stream;
+ if (mSwapMask != 0) {
return;
}
@@ -1270,15 +1573,65 @@ void LiveSession::onSwapped(const sp<AMessage> &msg) {
StreamType extraStream = (StreamType) (extraStreams & ~(extraStreams - 1));
swapPacketSource(extraStream);
extraStreams &= ~extraStream;
+
+ idx = typeToIndex(extraStream);
+ CHECK(idx >= 0);
+ if (mStreams[idx].mNewUri.empty()) {
+ ALOGW("swapping extra stream type %d %s to empty stream",
+ extraStream, mStreams[idx].mUri.c_str());
+ }
+ mStreams[idx].mUri = mStreams[idx].mNewUri;
+ mStreams[idx].mNewUri.clear();
}
tryToFinishBandwidthSwitch();
}
+void LiveSession::onCheckSwitchDown() {
+ if (mSwitchDownMonitor == NULL) {
+ return;
+ }
+
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ int32_t targetDuration;
+ sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(indexToType(i));
+ sp<AMessage> meta = packetSource->getLatestDequeuedMeta();
+
+ if (meta != NULL && meta->findInt32("targetDuration", &targetDuration) ) {
+ int64_t bufferedDurationUs = packetSource->getEstimatedDurationUs();
+ int64_t targetDurationUs = targetDuration * 1000000ll;
+
+ if (bufferedDurationUs < targetDurationUs / 3) {
+ (new AMessage(kWhatSwitchDown, id()))->post();
+ break;
+ }
+ }
+ }
+
+ mSwitchDownMonitor->post(1000000ll);
+}
+
+void LiveSession::onSwitchDown() {
+ if (mReconfigurationInProgress || mSwitchInProgress || mCurBandwidthIndex == 0) {
+ return;
+ }
+
+ ssize_t bandwidthIndex = getBandwidthIndex();
+ if (bandwidthIndex < mCurBandwidthIndex) {
+ changeConfiguration(-1, bandwidthIndex, false);
+ return;
+ }
+
+ changeConfiguration(-1, mCurBandwidthIndex - 1, false);
+}
+
// Mark switch done when:
-// 1. all old buffers are swapped out, AND
-// 2. all old fetchers are removed.
+// 1. all old buffers are swapped out
void LiveSession::tryToFinishBandwidthSwitch() {
+ if (!mSwitchInProgress) {
+ return;
+ }
+
bool needToRemoveFetchers = false;
for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
if (mFetcherInfos.valueAt(i).mToBeRemoved) {
@@ -1286,10 +1639,11 @@ void LiveSession::tryToFinishBandwidthSwitch() {
break;
}
}
- if (!needToRemoveFetchers && mSwapMask == mStreamMask) {
+
+ if (!needToRemoveFetchers && mSwapMask == 0) {
+ ALOGI("mSwitchInProgress = false");
mStreamMask = mNewStreamMask;
mSwitchInProgress = false;
- mSwapMask = 0;
}
}
@@ -1308,6 +1662,28 @@ void LiveSession::cancelBandwidthSwitch() {
mSwitchGeneration++;
mSwitchInProgress = false;
mSwapMask = 0;
+
+ for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+ FetcherInfo& info = mFetcherInfos.editValueAt(i);
+ if (info.mToBeRemoved) {
+ info.mToBeRemoved = false;
+ }
+ }
+
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ if (!mStreams[i].mNewUri.empty()) {
+ ssize_t j = mFetcherInfos.indexOfKey(mStreams[i].mNewUri);
+ if (j < 0) {
+ mStreams[i].mNewUri.clear();
+ continue;
+ }
+
+ const FetcherInfo &info = mFetcherInfos.valueAt(j);
+ info.mFetcher->stopAsync();
+ mFetcherInfos.removeItemsAt(j);
+ mStreams[i].mNewUri.clear();
+ }
+ }
}
bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) {
@@ -1315,33 +1691,29 @@ bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) {
return false;
}
- if (mPrevBandwidthIndex < 0) {
+ if (mCurBandwidthIndex < 0) {
return true;
}
- if (bandwidthIndex == (size_t)mPrevBandwidthIndex) {
+ if (bandwidthIndex == (size_t)mCurBandwidthIndex) {
return false;
- } else if (bandwidthIndex > (size_t)mPrevBandwidthIndex) {
+ } else if (bandwidthIndex > (size_t)mCurBandwidthIndex) {
return canSwitchUp();
} else {
return true;
}
}
-void LiveSession::onCheckBandwidth() {
+void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) {
size_t bandwidthIndex = getBandwidthIndex();
if (canSwitchBandwidthTo(bandwidthIndex)) {
changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
} else {
- scheduleCheckBandwidthEvent();
+ // Come back and check again 10 seconds later in case there is nothing to do now.
+ // If we DO change configuration, once that completes it'll schedule a new
+ // check bandwidth event with an incremented mCheckBandwidthGeneration.
+ msg->post(10000000ll);
}
-
- // Handling the kWhatCheckBandwidth even here does _not_ automatically
- // schedule another one on return, only an explicit call to
- // scheduleCheckBandwidthEvent will do that.
- // This ensures that only one configuration change is ongoing at any
- // one time, once that completes it'll schedule another check bandwidth
- // event.
}
void LiveSession::postPrepared(status_t err) {
@@ -1358,6 +1730,9 @@ void LiveSession::postPrepared(status_t err) {
notify->post();
mInPreparationPhase = false;
+
+ mSwitchDownMonitor = new AMessage(kWhatCheckSwitchDown, id());
+ mSwitchDownMonitor->post();
}
} // namespace android
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 3f8fee5..7aacca6 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -28,6 +28,7 @@ struct ABuffer;
struct AnotherPacketSource;
struct DataSource;
struct HTTPBase;
+struct IMediaHTTPService;
struct LiveDataSource;
struct M3UParser;
struct PlaylistFetcher;
@@ -40,7 +41,8 @@ struct LiveSession : public AHandler {
};
LiveSession(
const sp<AMessage> &notify,
- uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+ uint32_t flags,
+ const sp<IMediaHTTPService> &httpService);
enum StreamIndex {
kAudioIndex = 0,
@@ -68,7 +70,8 @@ struct LiveSession : public AHandler {
status_t seekTo(int64_t timeUs);
status_t getDuration(int64_t *durationUs) const;
- status_t getTrackInfo(Parcel *reply) const;
+ size_t getTrackCount() const;
+ sp<AMessage> getTrackInfo(size_t trackIndex) const;
status_t selectTrack(size_t index, bool select);
bool isSeekable() const;
@@ -105,6 +108,8 @@ private:
kWhatChangeConfiguration3 = 'chC3',
kWhatFinishDisconnect2 = 'fin2',
kWhatSwapped = 'swap',
+ kWhatCheckSwitchDown = 'ckSD',
+ kWhatSwitchDown = 'sDwn',
};
struct BandwidthItem {
@@ -121,9 +126,20 @@ private:
struct StreamItem {
const char *mType;
- AString mUri;
- StreamItem() : mType("") {}
- StreamItem(const char *type) : mType(type) {}
+ AString mUri, mNewUri;
+ size_t mCurDiscontinuitySeq;
+ int64_t mLastDequeuedTimeUs;
+ int64_t mLastSampleDurationUs;
+ StreamItem()
+ : mType(""),
+ mCurDiscontinuitySeq(0),
+ mLastDequeuedTimeUs(0),
+ mLastSampleDurationUs(0) {}
+ StreamItem(const char *type)
+ : mType(type),
+ mCurDiscontinuitySeq(0),
+ mLastDequeuedTimeUs(0),
+ mLastSampleDurationUs(0) {}
AString uriKey() {
AString key(mType);
key.append("URI");
@@ -134,10 +150,10 @@ private:
sp<AMessage> mNotify;
uint32_t mFlags;
- bool mUIDValid;
- uid_t mUID;
+ sp<IMediaHTTPService> mHTTPService;
bool mInPreparationPhase;
+ bool mBuffering[kMaxStreams];
sp<HTTPBase> mHTTPDataSource;
KeyedVector<String8, String8> mExtraHeaders;
@@ -145,7 +161,7 @@ private:
AString mMasterURL;
Vector<BandwidthItem> mBandwidthItems;
- ssize_t mPrevBandwidthIndex;
+ ssize_t mCurBandwidthIndex;
sp<M3UParser> mPlaylist;
@@ -161,6 +177,7 @@ private:
// we use this to track reconfiguration progress.
uint32_t mSwapMask;
+ KeyedVector<StreamType, sp<AnotherPacketSource> > mDiscontinuities;
KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
// A second set of packet sources that buffer content for the variant we're switching to.
KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources2;
@@ -172,6 +189,7 @@ private:
int32_t mCheckBandwidthGeneration;
int32_t mSwitchGeneration;
+ int32_t mSubtitleGeneration;
size_t mContinuationCounter;
sp<AMessage> mContinuation;
@@ -185,6 +203,13 @@ private:
uint32_t mDisconnectReplyID;
uint32_t mSeekReplyID;
+ bool mFirstTimeUsValid;
+ int64_t mFirstTimeUs;
+ int64_t mLastSeekTimeUs;
+ sp<AMessage> mSwitchDownMonitor;
+ KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
+ KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
+
sp<PlaylistFetcher> addFetcher(const char *uri);
void onConnect(const sp<AMessage> &msg);
@@ -216,9 +241,11 @@ private:
const char *url, uint8_t *curPlaylistHash, bool *unchanged);
size_t getBandwidthIndex();
+ int64_t latestMediaSegmentStartTimeUs();
static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
static StreamType indexToType(int idx);
+ static ssize_t typeToIndex(int32_t type);
void changeConfiguration(
int64_t timeUs, size_t bandwidthIndex, bool pickTrack = false);
@@ -226,6 +253,8 @@ private:
void onChangeConfiguration2(const sp<AMessage> &msg);
void onChangeConfiguration3(const sp<AMessage> &msg);
void onSwapped(const sp<AMessage> &msg);
+ void onCheckSwitchDown();
+ void onSwitchDown();
void tryToFinishBandwidthSwitch();
void scheduleCheckBandwidthEvent();
@@ -237,7 +266,7 @@ private:
void cancelBandwidthSwitch();
bool canSwitchBandwidthTo(size_t bandwidthIndex);
- void onCheckBandwidth();
+ void onCheckBandwidth(const sp<AMessage> &msg);
void finishDisconnect();
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index e31ad40..1651dee 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -23,6 +23,7 @@
#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
#include <media/mediaplayer.h>
@@ -58,8 +59,8 @@ struct M3UParser::MediaGroup : public RefBase {
void pickRandomMediaItems();
status_t selectTrack(size_t index, bool select);
- void getTrackInfo(Parcel* reply) const;
size_t countTracks() const;
+ sp<AMessage> getTrackInfo(size_t index) const;
protected:
virtual ~MediaGroup();
@@ -126,7 +127,7 @@ void M3UParser::MediaGroup::pickRandomMediaItems() {
mSelectedIndex = strtoul(value, &end, 10);
CHECK(end > value && *end == '\0');
- if (mSelectedIndex >= mMediaItems.size()) {
+ if (mSelectedIndex >= (ssize_t)mMediaItems.size()) {
mSelectedIndex = mMediaItems.size() - 1;
}
} else {
@@ -156,63 +157,70 @@ void M3UParser::MediaGroup::pickRandomMediaItems() {
}
status_t M3UParser::MediaGroup::selectTrack(size_t index, bool select) {
- if (mType != TYPE_SUBS) {
- ALOGE("only select subtitile tracks for now!");
+ if (mType != TYPE_SUBS && mType != TYPE_AUDIO) {
+ ALOGE("only select subtitile/audio tracks for now!");
return INVALID_OPERATION;
}
if (select) {
if (index >= mMediaItems.size()) {
- ALOGE("track %d does not exist", index);
+ ALOGE("track %zu does not exist", index);
return INVALID_OPERATION;
}
- if (mSelectedIndex == index) {
- ALOGE("track %d already selected", index);
+ if (mSelectedIndex == (ssize_t)index) {
+ ALOGE("track %zu already selected", index);
return BAD_VALUE;
}
- ALOGV("selected track %d", index);
+ ALOGV("selected track %zu", index);
mSelectedIndex = index;
} else {
- if (mSelectedIndex != index) {
- ALOGE("track %d is not selected", index);
+ if (mSelectedIndex != (ssize_t)index) {
+ ALOGE("track %zu is not selected", index);
return BAD_VALUE;
}
- ALOGV("unselected track %d", index);
+ ALOGV("unselected track %zu", index);
mSelectedIndex = -1;
}
return OK;
}
-void M3UParser::MediaGroup::getTrackInfo(Parcel* reply) const {
- for (size_t i = 0; i < mMediaItems.size(); ++i) {
- reply->writeInt32(2); // 2 fields
-
- if (mType == TYPE_AUDIO) {
- reply->writeInt32(MEDIA_TRACK_TYPE_AUDIO);
- } else if (mType == TYPE_VIDEO) {
- reply->writeInt32(MEDIA_TRACK_TYPE_VIDEO);
- } else if (mType == TYPE_SUBS) {
- reply->writeInt32(MEDIA_TRACK_TYPE_SUBTITLE);
- } else {
- reply->writeInt32(MEDIA_TRACK_TYPE_UNKNOWN);
- }
+size_t M3UParser::MediaGroup::countTracks() const {
+ return mMediaItems.size();
+}
+
+sp<AMessage> M3UParser::MediaGroup::getTrackInfo(size_t index) const {
+ if (index >= mMediaItems.size()) {
+ return NULL;
+ }
- const Media &item = mMediaItems.itemAt(i);
- const char *lang = item.mLanguage.empty() ? "und" : item.mLanguage.c_str();
- reply->writeString16(String16(lang));
+ sp<AMessage> format = new AMessage();
- if (mType == TYPE_SUBS) {
- // TO-DO: pass in a MediaFormat instead
- reply->writeInt32(!!(item.mFlags & MediaGroup::FLAG_AUTOSELECT));
- reply->writeInt32(!!(item.mFlags & MediaGroup::FLAG_DEFAULT));
- reply->writeInt32(!!(item.mFlags & MediaGroup::FLAG_FORCED));
- }
+ int32_t trackType;
+ if (mType == TYPE_AUDIO) {
+ trackType = MEDIA_TRACK_TYPE_AUDIO;
+ } else if (mType == TYPE_VIDEO) {
+ trackType = MEDIA_TRACK_TYPE_VIDEO;
+ } else if (mType == TYPE_SUBS) {
+ trackType = MEDIA_TRACK_TYPE_SUBTITLE;
+ } else {
+ trackType = MEDIA_TRACK_TYPE_UNKNOWN;
+ }
+ format->setInt32("type", trackType);
+
+ const Media &item = mMediaItems.itemAt(index);
+ const char *lang = item.mLanguage.empty() ? "und" : item.mLanguage.c_str();
+ format->setString("language", lang);
+
+ if (mType == TYPE_SUBS) {
+ // TO-DO: pass in a MediaFormat instead
+ format->setString("mime", MEDIA_MIMETYPE_TEXT_VTT);
+ format->setInt32("auto", !!(item.mFlags & MediaGroup::FLAG_AUTOSELECT));
+ format->setInt32("default", !!(item.mFlags & MediaGroup::FLAG_DEFAULT));
+ format->setInt32("forced", !!(item.mFlags & MediaGroup::FLAG_FORCED));
}
-}
-size_t M3UParser::MediaGroup::countTracks() const {
- return mMediaItems.size();
+ return format;
}
bool M3UParser::MediaGroup::getActiveURI(AString *uri) const {
@@ -238,6 +246,7 @@ M3UParser::M3UParser(
mIsVariantPlaylist(false),
mIsComplete(false),
mIsEvent(false),
+ mDiscontinuitySeq(0),
mSelectedIndex(-1) {
mInitCheck = parse(data, size);
}
@@ -265,6 +274,10 @@ bool M3UParser::isEvent() const {
return mIsEvent;
}
+size_t M3UParser::getDiscontinuitySeq() const {
+ return mDiscontinuitySeq;
+}
+
sp<AMessage> M3UParser::meta() {
return mMeta;
}
@@ -319,17 +332,24 @@ status_t M3UParser::selectTrack(size_t index, bool select) {
return INVALID_OPERATION;
}
-status_t M3UParser::getTrackInfo(Parcel* reply) const {
+size_t M3UParser::getTrackCount() const {
size_t trackCount = 0;
for (size_t i = 0; i < mMediaGroups.size(); ++i) {
trackCount += mMediaGroups.valueAt(i)->countTracks();
}
- reply->writeInt32(trackCount);
+ return trackCount;
+}
- for (size_t i = 0; i < mMediaGroups.size(); ++i) {
- mMediaGroups.valueAt(i)->getTrackInfo(reply);
+sp<AMessage> M3UParser::getTrackInfo(size_t index) const {
+ for (size_t i = 0, ii = index; i < mMediaGroups.size(); ++i) {
+ sp<MediaGroup> group = mMediaGroups.valueAt(i);
+ size_t tracks = group->countTracks();
+ if (ii < tracks) {
+ return group->getTrackInfo(ii);
+ }
+ ii -= tracks;
}
- return OK;
+ return NULL;
}
ssize_t M3UParser::getSelectedIndex() const {
@@ -398,6 +418,8 @@ static bool MakeURL(const char *baseURL, const char *url, AString *out) {
// Base URL must be absolute
return false;
}
+ const size_t schemeEnd = (strstr(baseURL, "//") - baseURL) + 2;
+ CHECK(schemeEnd == 7 || schemeEnd == 8);
if (!strncasecmp("http://", url, 7) || !strncasecmp("https://", url, 8)) {
// "url" is already an absolute URL, ignore base URL.
@@ -442,7 +464,7 @@ static bool MakeURL(const char *baseURL, const char *url, AString *out) {
// Check whether the found slash actually is part of the path
// and not part of the "http://".
- if (end > 6) {
+ if (end >= schemeEnd) {
out->setTo(baseURL, end);
} else {
out->setTo(baseURL);
@@ -550,6 +572,12 @@ status_t M3UParser::parse(const void *_data, size_t size) {
}
} else if (line.startsWith("#EXT-X-MEDIA")) {
err = parseMedia(line);
+ } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
+ size_t seq;
+ err = parseDiscontinuitySequence(line, &seq);
+ if (err == OK) {
+ mDiscontinuitySeq = seq;
+ }
}
if (err != OK) {
@@ -713,6 +741,9 @@ status_t M3UParser::parseStreamInf(
key.tolower();
const AString &codecs = unquoteString(val);
+ if (meta->get() == NULL) {
+ *meta = new AMessage;
+ }
(*meta)->setString(key.c_str(), codecs.c_str());
} else if (!strcasecmp("audio", key.c_str())
|| !strcasecmp("video", key.c_str())
@@ -736,6 +767,9 @@ status_t M3UParser::parseStreamInf(
}
key.tolower();
+ if (meta->get() == NULL) {
+ *meta = new AMessage;
+ }
(*meta)->setString(key.c_str(), groupID.c_str());
}
}
@@ -798,8 +832,8 @@ status_t M3UParser::parseCipherInfo(
if (MakeURL(baseURI.c_str(), val.c_str(), &absURI)) {
val = absURI;
} else {
- ALOGE("failed to make absolute url for '%s'.",
- val.c_str());
+ ALOGE("failed to make absolute url for %s.",
+ uriDebugString(baseURI).c_str());
}
}
@@ -1087,6 +1121,30 @@ status_t M3UParser::parseMedia(const AString &line) {
}
// static
+status_t M3UParser::parseDiscontinuitySequence(const AString &line, size_t *seq) {
+ ssize_t colonPos = line.find(":");
+
+ if (colonPos < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ int32_t x;
+ status_t err = ParseInt32(line.c_str() + colonPos + 1, &x);
+ if (err != OK) {
+ return err;
+ }
+
+ if (x < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ if (seq) {
+ *seq = x;
+ }
+ return OK;
+}
+
+// static
status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
char *end;
long lval = strtol(s, &end, 10);
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index ccd6556..d588afe 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -34,6 +34,7 @@ struct M3UParser : public RefBase {
bool isVariantPlaylist() const;
bool isComplete() const;
bool isEvent() const;
+ size_t getDiscontinuitySeq() const;
sp<AMessage> meta();
@@ -42,7 +43,8 @@ struct M3UParser : public RefBase {
void pickRandomMediaItems();
status_t selectTrack(size_t index, bool select);
- status_t getTrackInfo(Parcel* reply) const;
+ size_t getTrackCount() const;
+ sp<AMessage> getTrackInfo(size_t index) const;
ssize_t getSelectedIndex() const;
bool getTypeURI(size_t index, const char *key, AString *uri) const;
@@ -65,6 +67,7 @@ private:
bool mIsVariantPlaylist;
bool mIsComplete;
bool mIsEvent;
+ size_t mDiscontinuitySeq;
sp<AMessage> mMeta;
Vector<Item> mItems;
@@ -93,6 +96,8 @@ private:
status_t parseMedia(const AString &line);
+ static status_t parseDiscontinuitySequence(const AString &line, size_t *seq);
+
static status_t ParseInt32(const char *s, int32_t *x);
static status_t ParseDouble(const char *s, double *x);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 668cbd4..30fa868 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -40,6 +40,7 @@
#include <media/stagefright/Utils.h>
#include <ctype.h>
+#include <inttypes.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
@@ -48,31 +49,36 @@ namespace android {
// static
const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
-const int32_t PlaylistFetcher::kDownloadBlockSize = 192;
+const int32_t PlaylistFetcher::kDownloadBlockSize = 2048;
const int32_t PlaylistFetcher::kNumSkipFrames = 10;
PlaylistFetcher::PlaylistFetcher(
const sp<AMessage> &notify,
const sp<LiveSession> &session,
- const char *uri)
+ const char *uri,
+ int32_t subtitleGeneration)
: mNotify(notify),
mStartTimeUsNotify(notify->dup()),
mSession(session),
mURI(uri),
mStreamTypeMask(0),
mStartTimeUs(-1ll),
- mMinStartTimeUs(0ll),
- mStopParams(NULL),
+ mSegmentStartTimeUs(-1ll),
+ mDiscontinuitySeq(-1ll),
+ mStartTimeUsRelative(false),
mLastPlaylistFetchTimeUs(-1ll),
mSeqNumber(-1),
mNumRetries(0),
mStartup(true),
+ mAdaptive(false),
mPrepared(false),
mNextPTSTimeUs(-1ll),
mMonitorQueueGeneration(0),
+ mSubtitleGeneration(subtitleGeneration),
mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
mFirstPTSValid(false),
- mAbsoluteTimeAnchorUs(0ll) {
+ mAbsoluteTimeAnchorUs(0ll),
+ mVideoBuffer(new AnotherPacketSource(NULL)) {
memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
mStartTimeUsNotify->setInt32("streamMask", 0);
@@ -316,7 +322,7 @@ void PlaylistFetcher::postMonitorQueue(int64_t delayUs, int64_t minDelayUs) {
maxDelayUs = minDelayUs;
}
if (delayUs > maxDelayUs) {
- ALOGV("Need to refresh playlist in %lld", maxDelayUs);
+ ALOGV("Need to refresh playlist in %" PRId64 , maxDelayUs);
delayUs = maxDelayUs;
}
sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
@@ -333,8 +339,9 @@ void PlaylistFetcher::startAsync(
const sp<AnotherPacketSource> &videoSource,
const sp<AnotherPacketSource> &subtitleSource,
int64_t startTimeUs,
- int64_t minStartTimeUs,
- int32_t startSeqNumberHint) {
+ int64_t segmentStartTimeUs,
+ int32_t startDiscontinuitySeq,
+ bool adaptive) {
sp<AMessage> msg = new AMessage(kWhatStart, id());
uint32_t streamTypeMask = 0ul;
@@ -356,8 +363,9 @@ void PlaylistFetcher::startAsync(
msg->setInt32("streamTypeMask", streamTypeMask);
msg->setInt64("startTimeUs", startTimeUs);
- msg->setInt64("minStartTimeUs", minStartTimeUs);
- msg->setInt32("startSeqNumberHint", startSeqNumberHint);
+ msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
+ msg->setInt32("startDiscontinuitySeq", startDiscontinuitySeq);
+ msg->setInt32("adaptive", adaptive);
msg->post();
}
@@ -365,9 +373,9 @@ void PlaylistFetcher::pauseAsync() {
(new AMessage(kWhatPause, id()))->post();
}
-void PlaylistFetcher::stopAsync(bool selfTriggered) {
+void PlaylistFetcher::stopAsync(bool clear) {
sp<AMessage> msg = new AMessage(kWhatStop, id());
- msg->setInt32("selfTriggered", selfTriggered);
+ msg->setInt32("clear", clear);
msg->post();
}
@@ -447,10 +455,13 @@ status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
int64_t startTimeUs;
- int32_t startSeqNumberHint;
+ int64_t segmentStartTimeUs;
+ int32_t startDiscontinuitySeq;
+ int32_t adaptive;
CHECK(msg->findInt64("startTimeUs", &startTimeUs));
- CHECK(msg->findInt64("minStartTimeUs", (int64_t *) &mMinStartTimeUs));
- CHECK(msg->findInt32("startSeqNumberHint", &startSeqNumberHint));
+ CHECK(msg->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
+ CHECK(msg->findInt32("startDiscontinuitySeq", &startDiscontinuitySeq));
+ CHECK(msg->findInt32("adaptive", &adaptive));
if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
void *ptr;
@@ -480,16 +491,16 @@ status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
}
mStreamTypeMask = streamTypeMask;
- mStartTimeUs = startTimeUs;
- if (mStartTimeUs >= 0ll) {
+ mSegmentStartTimeUs = segmentStartTimeUs;
+ mDiscontinuitySeq = startDiscontinuitySeq;
+
+ if (startTimeUs >= 0) {
+ mStartTimeUs = startTimeUs;
mSeqNumber = -1;
mStartup = true;
mPrepared = false;
- }
-
- if (startSeqNumberHint >= 0) {
- mSeqNumber = startSeqNumberHint;
+ mAdaptive = adaptive;
}
postMonitorQueue();
@@ -504,11 +515,9 @@ void PlaylistFetcher::onPause() {
void PlaylistFetcher::onStop(const sp<AMessage> &msg) {
cancelMonitorQueue();
- int32_t selfTriggered;
- CHECK(msg->findInt32("selfTriggered", &selfTriggered));
- if (!selfTriggered) {
- // Self triggered stops only happen during switching, in which case we do not want
- // to clear the discontinuities queued at the end of packet sources.
+ int32_t clear;
+ CHECK(msg->findInt32("clear", &clear));
+ if (clear) {
for (size_t i = 0; i < mPacketSources.size(); i++) {
sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
packetSource->clear();
@@ -550,15 +559,16 @@ status_t PlaylistFetcher::onResumeUntil(const sp<AMessage> &msg) {
}
// Don't resume if we would stop within a resume threshold.
+ int32_t discontinuitySeq;
int64_t latestTimeUs = 0, stopTimeUs = 0;
- sp<AMessage> latestMeta = packetSource->getLatestMeta();
+ sp<AMessage> latestMeta = packetSource->getLatestDequeuedMeta();
if (latestMeta != NULL
- && (latestMeta->findInt64("timeUs", &latestTimeUs)
- && params->findInt64(stopKey, &stopTimeUs))) {
- int64_t diffUs = stopTimeUs - latestTimeUs;
- if (diffUs < resumeThreshold(latestMeta)) {
- stop = true;
- }
+ && latestMeta->findInt32("discontinuitySeq", &discontinuitySeq)
+ && discontinuitySeq == mDiscontinuitySeq
+ && latestMeta->findInt64("timeUs", &latestTimeUs)
+ && params->findInt64(stopKey, &stopTimeUs)
+ && stopTimeUs - latestTimeUs < resumeThreshold(latestMeta)) {
+ stop = true;
}
}
@@ -566,7 +576,7 @@ status_t PlaylistFetcher::onResumeUntil(const sp<AMessage> &msg) {
for (size_t i = 0; i < mPacketSources.size(); i++) {
mPacketSources.valueAt(i)->queueAccessUnit(mSession->createFormatChangeBuffer());
}
- stopAsync(/* selfTriggered = */ true);
+ stopAsync(/* clear = */ false);
return OK;
}
@@ -586,7 +596,10 @@ void PlaylistFetcher::notifyError(status_t err) {
void PlaylistFetcher::queueDiscontinuity(
ATSParser::DiscontinuityType type, const sp<AMessage> &extra) {
for (size_t i = 0; i < mPacketSources.size(); ++i) {
- mPacketSources.valueAt(i)->queueDiscontinuity(type, extra);
+ // do not discard buffer upon #EXT-X-DISCONTINUITY tag
+ // (seek will discard buffer by abandoning old fetchers)
+ mPacketSources.valueAt(i)->queueDiscontinuity(
+ type, extra, false /* discard */);
}
}
@@ -627,7 +640,7 @@ void PlaylistFetcher::onMonitorQueue() {
int64_t bufferedStreamDurationUs =
mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
- ALOGV("buffered %lld for stream %d",
+ ALOGV("buffered %" PRId64 " for stream %d",
bufferedStreamDurationUs, mPacketSources.keyAt(i));
if (bufferedStreamDurationUs > bufferedDurationUs) {
bufferedDurationUs = bufferedStreamDurationUs;
@@ -640,7 +653,7 @@ void PlaylistFetcher::onMonitorQueue() {
if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) {
mPrepared = true;
- ALOGV("prepared, buffered=%lld > %lld",
+ ALOGV("prepared, buffered=%" PRId64 " > %" PRId64 "",
bufferedDurationUs, targetDurationUs);
sp<AMessage> msg = mNotify->dup();
msg->setInt32("what", kWhatTemporarilyDoneFetching);
@@ -648,7 +661,7 @@ void PlaylistFetcher::onMonitorQueue() {
}
if (finalResult == OK && downloadMore) {
- ALOGV("monitoring, buffered=%lld < %lld",
+ ALOGV("monitoring, buffered=%" PRId64 " < %" PRId64 "",
bufferedDurationUs, durationToBufferUs);
// delay the next download slightly; hopefully this gives other concurrent fetchers
// a better chance to run.
@@ -664,7 +677,7 @@ void PlaylistFetcher::onMonitorQueue() {
msg->post();
int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2;
- ALOGV("pausing for %lld, buffered=%lld > %lld",
+ ALOGV("pausing for %" PRId64 ", buffered=%" PRId64 " > %" PRId64 "",
delayUs, bufferedDurationUs, durationToBufferUs);
// :TRICKY: need to enforce minimum delay because the delay to
// refresh the playlist will become 0
@@ -721,38 +734,55 @@ void PlaylistFetcher::onDownloadNext() {
firstSeqNumberInPlaylist = 0;
}
- bool seekDiscontinuity = false;
- bool explicitDiscontinuity = false;
+ bool discontinuity = false;
const int32_t lastSeqNumberInPlaylist =
firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
- if (mStartup && mSeqNumber >= 0
- && (mSeqNumber < firstSeqNumberInPlaylist || mSeqNumber > lastSeqNumberInPlaylist)) {
- // in case we guessed wrong during reconfiguration, try fetching the latest content.
- mSeqNumber = lastSeqNumberInPlaylist;
+ if (mDiscontinuitySeq < 0) {
+ mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
}
if (mSeqNumber < 0) {
CHECK_GE(mStartTimeUs, 0ll);
- if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
- mSeqNumber = getSeqNumberForTime(mStartTimeUs);
- ALOGV("Initial sequence number for time %lld is %ld from (%ld .. %ld)",
+ if (mSegmentStartTimeUs < 0) {
+ if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
+ // If this is a live session, start 3 segments from the end on connect
+ mSeqNumber = lastSeqNumberInPlaylist - 3;
+ if (mSeqNumber < firstSeqNumberInPlaylist) {
+ mSeqNumber = firstSeqNumberInPlaylist;
+ }
+ } else {
+ mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+ mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
+ }
+ mStartTimeUsRelative = true;
+ ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
lastSeqNumberInPlaylist);
} else {
- // If this is a live session, start 3 segments from the end.
- mSeqNumber = lastSeqNumberInPlaylist - 3;
+ mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
+ if (mAdaptive) {
+ // avoid double fetch/decode
+ mSeqNumber += 1;
+ }
+ ssize_t minSeq = getSeqNumberForDiscontinuity(mDiscontinuitySeq);
+ if (mSeqNumber < minSeq) {
+ mSeqNumber = minSeq;
+ }
+
if (mSeqNumber < firstSeqNumberInPlaylist) {
mSeqNumber = firstSeqNumberInPlaylist;
}
- ALOGV("Initial sequence number for live event %ld from (%ld .. %ld)",
+
+ if (mSeqNumber > lastSeqNumberInPlaylist) {
+ mSeqNumber = lastSeqNumberInPlaylist;
+ }
+ ALOGV("Initial sequence number for live event %d from (%d .. %d)",
mSeqNumber, firstSeqNumberInPlaylist,
lastSeqNumberInPlaylist);
}
-
- mStartTimeUs = -1ll;
}
if (mSeqNumber < firstSeqNumberInPlaylist
@@ -771,7 +801,8 @@ void PlaylistFetcher::onDownloadNext() {
if (delayUs > kMaxMonitorDelayUs) {
delayUs = kMaxMonitorDelayUs;
}
- ALOGV("sequence number high: %ld from (%ld .. %ld), monitor in %lld (retry=%d)",
+ ALOGV("sequence number high: %d from (%d .. %d), "
+ "monitor in %" PRId64 " (retry=%d)",
mSeqNumber, firstSeqNumberInPlaylist,
lastSeqNumberInPlaylist, delayUs, mNumRetries);
postMonitorQueue(delayUs);
@@ -789,14 +820,14 @@ void PlaylistFetcher::onDownloadNext() {
if (mSeqNumber < firstSeqNumberInPlaylist) {
mSeqNumber = firstSeqNumberInPlaylist;
}
- explicitDiscontinuity = true;
+ discontinuity = true;
// fall through
} else {
ALOGE("Cannot find sequence number %d in playlist "
"(contains %d - %d)",
mSeqNumber, firstSeqNumberInPlaylist,
- firstSeqNumberInPlaylist + mPlaylist->size() - 1);
+ firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
notifyError(ERROR_END_OF_STREAM);
return;
@@ -814,7 +845,8 @@ void PlaylistFetcher::onDownloadNext() {
int32_t val;
if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
- explicitDiscontinuity = true;
+ mDiscontinuitySeq++;
+ discontinuity = true;
}
int64_t range_offset, range_length;
@@ -845,6 +877,7 @@ void PlaylistFetcher::onDownloadNext() {
}
// block-wise download
+ bool startup = mStartup;
ssize_t bytesRead;
do {
bytesRead = mSession->fetchFile(
@@ -874,7 +907,7 @@ void PlaylistFetcher::onDownloadNext() {
return;
}
- if (mStartup || seekDiscontinuity || explicitDiscontinuity) {
+ if (startup || discontinuity) {
// Signal discontinuity.
if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
@@ -884,16 +917,17 @@ void PlaylistFetcher::onDownloadNext() {
mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
}
- if (seekDiscontinuity || explicitDiscontinuity) {
- ALOGI("queueing discontinuity (seek=%d, explicit=%d)",
- seekDiscontinuity, explicitDiscontinuity);
+ if (discontinuity) {
+ ALOGI("queueing discontinuity (explicit=%d)", discontinuity);
queueDiscontinuity(
- explicitDiscontinuity
- ? ATSParser::DISCONTINUITY_FORMATCHANGE
- : ATSParser::DISCONTINUITY_SEEK,
+ ATSParser::DISCONTINUITY_FORMATCHANGE,
NULL /* extra */);
+
+ discontinuity = false;
}
+
+ startup = false;
}
err = OK;
@@ -913,23 +947,19 @@ void PlaylistFetcher::onDownloadNext() {
}
if (err == -EAGAIN) {
- // bad starting sequence number hint
+ // starting sequence number too low/high
+ mTSParser.clear();
postMonitorQueue();
return;
- }
-
- if (err == ERROR_OUT_OF_RANGE) {
+ } else if (err == ERROR_OUT_OF_RANGE) {
// reached stopping point
- stopAsync(/* selfTriggered = */ true);
+ stopAsync(/* clear = */ false);
return;
- }
-
- if (err != OK) {
+ } else if (err != OK) {
notifyError(err);
return;
}
- mStartup = false;
} while (bytesRead != 0);
if (bufferStartsWithTsSyncByte(buffer)) {
@@ -981,7 +1011,16 @@ void PlaylistFetcher::onDownloadNext() {
// bulk extract non-ts files
if (tsBuffer == NULL) {
- err = extractAndQueueAccessUnits(buffer, itemMeta);
+ err = extractAndQueueAccessUnits(buffer, itemMeta);
+ if (err == -EAGAIN) {
+ // starting sequence number too low/high
+ postMonitorQueue();
+ return;
+ } else if (err == ERROR_OUT_OF_RANGE) {
+ // reached stopping point
+ stopAsync(/* clear = */false);
+ return;
+ }
}
if (err != OK) {
@@ -994,6 +1033,66 @@ void PlaylistFetcher::onDownloadNext() {
postMonitorQueue();
}
+int32_t PlaylistFetcher::getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const {
+ int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist;
+ if (mPlaylist->meta() == NULL
+ || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
+ firstSeqNumberInPlaylist = 0;
+ }
+ lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + mPlaylist->size() - 1;
+
+ int32_t index = mSeqNumber - firstSeqNumberInPlaylist - 1;
+ while (index >= 0 && anchorTimeUs > mStartTimeUs) {
+ sp<AMessage> itemMeta;
+ CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta));
+
+ int64_t itemDurationUs;
+ CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+ anchorTimeUs -= itemDurationUs;
+ --index;
+ }
+
+ int32_t newSeqNumber = firstSeqNumberInPlaylist + index + 1;
+ if (newSeqNumber <= lastSeqNumberInPlaylist) {
+ return newSeqNumber;
+ } else {
+ return lastSeqNumberInPlaylist;
+ }
+}
+
+int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const {
+ int32_t firstSeqNumberInPlaylist;
+ if (mPlaylist->meta() == NULL
+ || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
+ firstSeqNumberInPlaylist = 0;
+ }
+
+ size_t curDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
+ if (discontinuitySeq < curDiscontinuitySeq) {
+ return firstSeqNumberInPlaylist <= 0 ? 0 : (firstSeqNumberInPlaylist - 1);
+ }
+
+ size_t index = 0;
+ while (index < mPlaylist->size()) {
+ sp<AMessage> itemMeta;
+ CHECK(mPlaylist->itemAt( index, NULL /* uri */, &itemMeta));
+
+ int64_t discontinuity;
+ if (itemMeta->findInt64("discontinuity", &discontinuity)) {
+ curDiscontinuitySeq++;
+ }
+
+ if (curDiscontinuitySeq == discontinuitySeq) {
+ return firstSeqNumberInPlaylist + index;
+ }
+
+ ++index;
+ }
+
+ return firstSeqNumberInPlaylist + mPlaylist->size();
+}
+
int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
int32_t firstSeqNumberInPlaylist;
if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
@@ -1026,6 +1125,23 @@ int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
return firstSeqNumberInPlaylist + index;
}
+const sp<ABuffer> &PlaylistFetcher::setAccessUnitProperties(
+ const sp<ABuffer> &accessUnit, const sp<AnotherPacketSource> &source, bool discard) {
+ sp<MetaData> format = source->getFormat();
+ if (format != NULL) {
+ // for simplicity, store a reference to the format in each unit
+ accessUnit->meta()->setObject("format", format);
+ }
+
+ if (discard) {
+ accessUnit->meta()->setInt32("discard", discard);
+ }
+
+ accessUnit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+ accessUnit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+ return accessUnit;
+}
+
status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
if (mTSParser == NULL) {
// Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
@@ -1041,7 +1157,9 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
mTSParser->signalDiscontinuity(
ATSParser::DISCONTINUITY_SEEK, extra);
+ mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
mNextPTSTimeUs = -1ll;
+ mFirstPTSValid = false;
}
size_t offset = 0;
@@ -1101,43 +1219,108 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
&& source->dequeueAccessUnit(&accessUnit) == OK) {
CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
- if (mMinStartTimeUs > 0) {
- if (timeUs < mMinStartTimeUs) {
- // TODO untested path
- // try a later ts
- int32_t targetDuration;
- mPlaylist->meta()->findInt32("target-duration", &targetDuration);
- int32_t incr = (mMinStartTimeUs - timeUs) / 1000000 / targetDuration;
- if (incr == 0) {
- // increment mSeqNumber by at least one
- incr = 1;
+
+ if (mStartup) {
+ if (!mFirstPTSValid) {
+ mFirstTimeUs = timeUs;
+ mFirstPTSValid = true;
+ }
+ if (mStartTimeUsRelative) {
+ timeUs -= mFirstTimeUs;
+ if (timeUs < 0) {
+ timeUs = 0;
}
- mSeqNumber += incr;
- err = -EAGAIN;
- break;
- } else {
- int64_t startTimeUs;
- if (mStartTimeUsNotify != NULL
- && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
- mStartTimeUsNotify->setInt64(key, timeUs);
-
- uint32_t streamMask = 0;
- mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
- streamMask |= mPacketSources.keyAt(i);
- mStartTimeUsNotify->setInt32("streamMask", streamMask);
-
- if (streamMask == mStreamTypeMask) {
- mStartTimeUsNotify->post();
- mStartTimeUsNotify.clear();
- }
+ }
+
+ if (timeUs < mStartTimeUs) {
+ // buffer up to the closest preceding IDR frame
+ ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us",
+ timeUs, mStartTimeUs);
+ const char *mime;
+ sp<MetaData> format = source->getFormat();
+ bool isAvc = false;
+ if (format != NULL && format->findCString(kKeyMIMEType, &mime)
+ && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ isAvc = true;
+ }
+ if (isAvc && IsIDR(accessUnit)) {
+ mVideoBuffer->clear();
+ }
+ if (isAvc) {
+ mVideoBuffer->queueAccessUnit(accessUnit);
+ }
+
+ continue;
+ }
+ }
+
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+ if (mStartTimeUsNotify != NULL && timeUs > mStartTimeUs) {
+
+ int32_t targetDurationSecs;
+ CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+ int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+ // mStartup
+ // mStartup is true until we have queued a packet for all the streams
+ // we are fetching. We queue packets whose timestamps are greater than
+ // mStartTimeUs.
+ // mSegmentStartTimeUs >= 0
+ // mSegmentStartTimeUs is non-negative when adapting or switching tracks
+ // timeUs - mStartTimeUs > targetDurationUs:
+ // This and the 2 above conditions should only happen when adapting in a live
+ // stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher
+ // would start fetching after timeUs, which should be greater than mStartTimeUs;
+ // the old fetcher would then continue fetching data until timeUs. We don't want
+ // timeUs to be too far ahead of mStartTimeUs because we want the old fetcher to
+ // stop as early as possible. The definition of being "too far ahead" is
+ // arbitrary; here we use targetDurationUs as threshold.
+ if (mStartup && mSegmentStartTimeUs >= 0
+ && timeUs - mStartTimeUs > targetDurationUs) {
+ // we just guessed a starting timestamp that is too high when adapting in a
+ // live stream; re-adjust based on the actual timestamp extracted from the
+ // media segment; if we didn't move backward after the re-adjustment
+ // (newSeqNumber), start at least 1 segment prior.
+ int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
+ if (newSeqNumber >= mSeqNumber) {
+ --mSeqNumber;
+ } else {
+ mSeqNumber = newSeqNumber;
+ }
+ mStartTimeUsNotify = mNotify->dup();
+ mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
+ return -EAGAIN;
+ }
+
+ int32_t seq;
+ if (!mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) {
+ mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
+ }
+ int64_t startTimeUs;
+ if (!mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
+ mStartTimeUsNotify->setInt64(key, timeUs);
+
+ uint32_t streamMask = 0;
+ mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+ streamMask |= mPacketSources.keyAt(i);
+ mStartTimeUsNotify->setInt32("streamMask", streamMask);
+
+ if (streamMask == mStreamTypeMask) {
+ mStartup = false;
+ mStartTimeUsNotify->post();
+ mStartTimeUsNotify.clear();
}
}
}
if (mStopParams != NULL) {
// Queue discontinuity in original stream.
+ int32_t discontinuitySeq;
int64_t stopTimeUs;
- if (!mStopParams->findInt64(key, &stopTimeUs) || timeUs >= stopTimeUs) {
+ if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
+ || discontinuitySeq > mDiscontinuitySeq
+ || !mStopParams->findInt64(key, &stopTimeUs)
+ || (discontinuitySeq == mDiscontinuitySeq
+ && timeUs >= stopTimeUs)) {
packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
mStreamTypeMask &= ~stream;
mPacketSources.removeItemsAt(i);
@@ -1146,15 +1329,18 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
}
// Note that we do NOT dequeue any discontinuities except for format change.
-
- // for simplicity, store a reference to the format in each unit
- sp<MetaData> format = source->getFormat();
- if (format != NULL) {
- accessUnit->meta()->setObject("format", format);
+ if (stream == LiveSession::STREAMTYPE_VIDEO) {
+ const bool discard = true;
+ status_t status;
+ while (mVideoBuffer->hasBufferAvailable(&status)) {
+ sp<ABuffer> videoBuffer;
+ mVideoBuffer->dequeueAccessUnit(&videoBuffer);
+ setAccessUnitProperties(videoBuffer, source, discard);
+ packetSource->queueAccessUnit(videoBuffer);
+ }
}
- // Stash the sequence number so we can hint future playlist where to start at.
- accessUnit->meta()->setInt32("seq", mSeqNumber);
+ setAccessUnitProperties(accessUnit, source);
packetSource->queueAccessUnit(accessUnit);
}
@@ -1180,9 +1366,35 @@ status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &bu
return OK;
}
+/* static */
+bool PlaylistFetcher::bufferStartsWithWebVTTMagicSequence(
+ const sp<ABuffer> &buffer) {
+ size_t pos = 0;
+
+ // skip possible BOM
+ if (buffer->size() >= pos + 3 &&
+ !memcmp("\xef\xbb\xbf", buffer->data() + pos, 3)) {
+ pos += 3;
+ }
+
+ // accept WEBVTT followed by SPACE, TAB or (CR) LF
+ if (buffer->size() < pos + 6 ||
+ memcmp("WEBVTT", buffer->data() + pos, 6)) {
+ return false;
+ }
+ pos += 6;
+
+ if (buffer->size() == pos) {
+ return true;
+ }
+
+ uint8_t sep = buffer->data()[pos];
+ return sep == ' ' || sep == '\t' || sep == '\n' || sep == '\r';
+}
+
status_t PlaylistFetcher::extractAndQueueAccessUnits(
const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta) {
- if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
+ if (bufferStartsWithWebVTTMagicSequence(buffer)) {
if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES) {
ALOGE("This stream only contains subtitles.");
return ERROR_MALFORMED;
@@ -1195,7 +1407,9 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
CHECK(itemMeta->findInt64("durationUs", &durationUs));
buffer->meta()->setInt64("timeUs", getSegmentStartTimeUs(mSeqNumber));
buffer->meta()->setInt64("durationUs", durationUs);
- buffer->meta()->setInt32("seq", mSeqNumber);
+ buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+ buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
+ buffer->meta()->setInt32("subtitleGeneration", mSubtitleGeneration);
packetSource->queueAccessUnit(buffer);
return OK;
@@ -1261,14 +1475,6 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
firstID3Tag = false;
}
- if (!mFirstPTSValid) {
- mFirstPTSValid = true;
- mFirstPTS = PTS;
- }
- PTS -= mFirstPTS;
-
- int64_t timeUs = (PTS * 100ll) / 9ll + mAbsoluteTimeAnchorUs;
-
if (mStreamTypeMask != LiveSession::STREAMTYPE_AUDIO) {
ALOGW("This stream only contains audio data!");
@@ -1311,6 +1517,12 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
int32_t sampleRate;
CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
+ int64_t timeUs = (PTS * 100ll) / 9ll;
+ if (!mFirstPTSValid) {
+ mFirstPTSValid = true;
+ mFirstTimeUs = timeUs;
+ }
+
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
@@ -1335,19 +1547,70 @@ status_t PlaylistFetcher::extractAndQueueAccessUnits(
CHECK_LE(offset + aac_frame_length, buffer->size());
- sp<ABuffer> unit = new ABuffer(aac_frame_length);
- memcpy(unit->data(), adtsHeader, aac_frame_length);
-
int64_t unitTimeUs = timeUs + numSamples * 1000000ll / sampleRate;
- unit->meta()->setInt64("timeUs", unitTimeUs);
+ offset += aac_frame_length;
// Each AAC frame encodes 1024 samples.
numSamples += 1024;
- unit->meta()->setInt32("seq", mSeqNumber);
- packetSource->queueAccessUnit(unit);
+ if (mStartup) {
+ int64_t startTimeUs = unitTimeUs;
+ if (mStartTimeUsRelative) {
+ startTimeUs -= mFirstTimeUs;
+ if (startTimeUs < 0) {
+ startTimeUs = 0;
+ }
+ }
+ if (startTimeUs < mStartTimeUs) {
+ continue;
+ }
- offset += aac_frame_length;
+ if (mStartTimeUsNotify != NULL) {
+ int32_t targetDurationSecs;
+ CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+ int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+
+ // Duplicated logic from how we handle .ts playlists.
+ if (mStartup && mSegmentStartTimeUs >= 0
+ && timeUs - mStartTimeUs > targetDurationUs) {
+ int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
+ if (newSeqNumber >= mSeqNumber) {
+ --mSeqNumber;
+ } else {
+ mSeqNumber = newSeqNumber;
+ }
+ return -EAGAIN;
+ }
+
+ mStartTimeUsNotify->setInt64("timeUsAudio", timeUs);
+ mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
+ mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO);
+ mStartTimeUsNotify->post();
+ mStartTimeUsNotify.clear();
+ }
+ }
+
+ if (mStopParams != NULL) {
+ // Queue discontinuity in original stream.
+ int32_t discontinuitySeq;
+ int64_t stopTimeUs;
+ if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
+ || discontinuitySeq > mDiscontinuitySeq
+ || !mStopParams->findInt64("timeUsAudio", &stopTimeUs)
+ || (discontinuitySeq == mDiscontinuitySeq && unitTimeUs >= stopTimeUs)) {
+ packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
+ mStreamTypeMask = 0;
+ mPacketSources.clear();
+ return ERROR_OUT_OF_RANGE;
+ }
+ }
+
+ sp<ABuffer> unit = new ABuffer(aac_frame_length);
+ memcpy(unit->data(), adtsHeader, aac_frame_length);
+
+ unit->meta()->setInt64("timeUs", unitTimeUs);
+ setAccessUnitProperties(unit, packetSource);
+ packetSource->queueAccessUnit(unit);
}
return OK;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 7e21523..78c358f 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -49,7 +49,8 @@ struct PlaylistFetcher : public AHandler {
PlaylistFetcher(
const sp<AMessage> &notify,
const sp<LiveSession> &session,
- const char *uri);
+ const char *uri,
+ int32_t subtitleGeneration);
sp<DataSource> getDataSource();
@@ -57,16 +58,22 @@ struct PlaylistFetcher : public AHandler {
const sp<AnotherPacketSource> &audioSource,
const sp<AnotherPacketSource> &videoSource,
const sp<AnotherPacketSource> &subtitleSource,
- int64_t startTimeUs = -1ll,
- int64_t minStartTimeUs = 0ll /* start after this timestamp */,
- int32_t startSeqNumberHint = -1 /* try starting at this sequence number */);
+ int64_t startTimeUs = -1ll, // starting timestamps
+ int64_t segmentStartTimeUs = -1ll, // starting position within playlist
+ // startTimeUs!=segmentStartTimeUs only when playlist is live
+ int32_t startDiscontinuitySeq = 0,
+ bool adaptive = false);
void pauseAsync();
- void stopAsync(bool selfTriggered = false);
+ void stopAsync(bool clear = true);
void resumeUntilAsync(const sp<AMessage> &params);
+ uint32_t getStreamTypeMask() const {
+ return mStreamTypeMask;
+ }
+
protected:
virtual ~PlaylistFetcher();
virtual void onMessageReceived(const sp<AMessage> &msg);
@@ -91,6 +98,7 @@ private:
static const int32_t kNumSkipFrames;
static bool bufferStartsWithTsSyncByte(const sp<ABuffer>& buffer);
+ static bool bufferStartsWithWebVTTMagicSequence(const sp<ABuffer>& buffer);
// notifications to mSession
sp<AMessage> mNotify;
@@ -101,7 +109,14 @@ private:
uint32_t mStreamTypeMask;
int64_t mStartTimeUs;
- int64_t mMinStartTimeUs; // start fetching no earlier than this value
+
+ // Start time relative to the beginning of the first segment in the initial
+ // playlist. It's value is initialized to a non-negative value only when we are
+ // adapting or switching tracks.
+ int64_t mSegmentStartTimeUs;
+
+ ssize_t mDiscontinuitySeq;
+ bool mStartTimeUsRelative;
sp<AMessage> mStopParams; // message containing the latest timestamps we should fetch.
KeyedVector<LiveSession::StreamType, sp<AnotherPacketSource> >
@@ -114,10 +129,12 @@ private:
int32_t mSeqNumber;
int32_t mNumRetries;
bool mStartup;
+ bool mAdaptive;
bool mPrepared;
int64_t mNextPTSTimeUs;
int32_t mMonitorQueueGeneration;
+ const int32_t mSubtitleGeneration;
enum RefreshState {
INITIAL_MINIMUM_RELOAD_DELAY,
@@ -133,7 +150,9 @@ private:
bool mFirstPTSValid;
uint64_t mFirstPTS;
+ int64_t mFirstTimeUs;
int64_t mAbsoluteTimeAnchorUs;
+ sp<AnotherPacketSource> mVideoBuffer;
// Stores the initialization vector to decrypt the next block of cipher text, which can
// either be derived from the sequence number, read from the manifest, or copied from
@@ -172,6 +191,10 @@ private:
// Resume a fetcher to continue until the stopping point stored in msg.
status_t onResumeUntil(const sp<AMessage> &msg);
+ const sp<ABuffer> &setAccessUnitProperties(
+ const sp<ABuffer> &accessUnit,
+ const sp<AnotherPacketSource> &source,
+ bool discard = false);
status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
status_t extractAndQueueAccessUnits(
@@ -182,6 +205,8 @@ private:
void queueDiscontinuity(
ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
+ int32_t getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const;
+ int32_t getSeqNumberForDiscontinuity(size_t discontinuitySeq) const;
int32_t getSeqNumberForTime(int64_t timeUs) const;
void updateDuration();
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index bf6f7bb..2194c38 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -4,6 +4,8 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
ID3.cpp
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := libstagefright_id3
include $(BUILD_STATIC_LIBRARY)
@@ -15,6 +17,8 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
testid3.cpp
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libstagefright libutils liblog libbinder libstagefright_foundation
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 1ec4a40..7f221a0 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -41,9 +41,9 @@ struct MemorySource : public DataSource {
}
virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
- off64_t available = (offset >= mSize) ? 0ll : mSize - offset;
+ off64_t available = (offset >= (off64_t)mSize) ? 0ll : mSize - offset;
- size_t copy = (available > size) ? size : available;
+ size_t copy = (available > (off64_t)size) ? size : available;
memcpy(data, mData + offset, copy);
return copy;
@@ -172,7 +172,7 @@ struct id3_header {
}
if (size > kMaxMetadataSize) {
- ALOGE("skipping huge ID3 metadata of size %d", size);
+ ALOGE("skipping huge ID3 metadata of size %zu", size);
return false;
}
@@ -468,49 +468,6 @@ void ID3::Iterator::getID(String8 *id) const {
}
}
-static void convertISO8859ToString8(
- const uint8_t *data, size_t size,
- String8 *s) {
- size_t utf8len = 0;
- for (size_t i = 0; i < size; ++i) {
- if (data[i] == '\0') {
- size = i;
- break;
- } else if (data[i] < 0x80) {
- ++utf8len;
- } else {
- utf8len += 2;
- }
- }
-
- if (utf8len == size) {
- // Only ASCII characters present.
-
- s->setTo((const char *)data, size);
- return;
- }
-
- char *tmp = new char[utf8len];
- char *ptr = tmp;
- for (size_t i = 0; i < size; ++i) {
- if (data[i] == '\0') {
- break;
- } else if (data[i] < 0x80) {
- *ptr++ = data[i];
- } else if (data[i] < 0xc0) {
- *ptr++ = 0xc2;
- *ptr++ = data[i];
- } else {
- *ptr++ = 0xc3;
- *ptr++ = data[i] - 64;
- }
- }
-
- s->setTo(tmp, utf8len);
-
- delete[] tmp;
- tmp = NULL;
-}
// the 2nd argument is used to get the data following the \0 in a comment field
void ID3::Iterator::getString(String8 *id, String8 *comment) const {
@@ -543,7 +500,9 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
return;
}
- convertISO8859ToString8(frameData, mFrameSize, id);
+ // this is supposed to be ISO-8859-1, but pass it up as-is to the caller, who will figure
+ // out the real encoding
+ id->setTo((const char*)frameData, mFrameSize);
return;
}
@@ -561,13 +520,13 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
}
if (encoding == 0x00) {
- // ISO 8859-1
- convertISO8859ToString8(frameData + 1, n, id);
+ // supposedly ISO 8859-1
+ id->setTo((const char*)frameData + 1, n);
} else if (encoding == 0x03) {
- // UTF-8
+ // supposedly UTF-8
id->setTo((const char *)(frameData + 1), n);
} else if (encoding == 0x02) {
- // UTF-16 BE, no byte order mark.
+ // supposedly UTF-16 BE, no byte order mark.
// API wants number of characters, not number of bytes...
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
@@ -583,7 +542,7 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
- } else {
+ } else if (encoding == 0x01) {
// UCS-2
// API wants number of characters, not number of bytes...
int len = n / 2;
@@ -602,7 +561,27 @@ void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
framedata++;
len--;
}
- id->setTo(framedata, len);
+
+ // check if the resulting data consists entirely of 8-bit values
+ bool eightBit = true;
+ for (int i = 0; i < len; i++) {
+ if (framedata[i] > 0xff) {
+ eightBit = false;
+ break;
+ }
+ }
+ if (eightBit) {
+ // collapse to 8 bit, then let the media scanner client figure out the real encoding
+ char *frame8 = new char[len];
+ for (int i = 0; i < len; i++) {
+ frame8[i] = framedata[i];
+ }
+ id->setTo(frame8, len);
+ delete [] frame8;
+ } else {
+ id->setTo(framedata, len);
+ }
+
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
@@ -654,8 +633,8 @@ void ID3::Iterator::findFrame() {
mFrameSize += 6;
if (mOffset + mFrameSize > mParent.mSize) {
- ALOGV("partial frame at offset %d (size = %d, bytes-remaining = %d)",
- mOffset, mFrameSize, mParent.mSize - mOffset - 6);
+ ALOGV("partial frame at offset %zu (size = %zu, bytes-remaining = %zu)",
+ mOffset, mFrameSize, mParent.mSize - mOffset - (size_t)6);
return;
}
@@ -695,8 +674,8 @@ void ID3::Iterator::findFrame() {
mFrameSize = 10 + baseSize;
if (mOffset + mFrameSize > mParent.mSize) {
- ALOGV("partial frame at offset %d (size = %d, bytes-remaining = %d)",
- mOffset, mFrameSize, mParent.mSize - mOffset - 10);
+ ALOGV("partial frame at offset %zu (size = %zu, bytes-remaining = %zu)",
+ mOffset, mFrameSize, mParent.mSize - mOffset - (size_t)10);
return;
}
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index bc4572c..b2f4188 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -33,7 +33,7 @@ static void hexdump(const void *_data, size_t size) {
const uint8_t *data = (const uint8_t *)_data;
size_t offset = 0;
while (offset < size) {
- printf("0x%04x ", offset);
+ printf("0x%04zx ", offset);
size_t n = size - offset;
if (n > 16) {
@@ -101,7 +101,7 @@ void scanFile(const char *path) {
const void *data = tag.getAlbumArt(&dataSize, &mime);
if (data) {
- printf("found album art: size=%d mime='%s'\n", dataSize,
+ printf("found album art: size=%zu mime='%s'\n", dataSize,
mime.string());
hexdump(data, dataSize > 128 ? 128 : dataSize);
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 271df8e..77d65e0 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -32,6 +32,7 @@
namespace android {
struct AudioPlayer;
+struct ClockEstimator;
struct DataSource;
struct MediaBuffer;
struct MediaExtractor;
@@ -63,6 +64,7 @@ struct AwesomePlayer {
void setUID(uid_t uid);
status_t setDataSource(
+ const sp<IMediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers = NULL);
@@ -159,6 +161,7 @@ private:
SystemTimeSource mSystemTimeSource;
TimeSource *mTimeSource;
+ sp<IMediaHTTPService> mHTTPService;
String8 mUri;
KeyedVector<String8, String8> mUriHeaders;
@@ -234,6 +237,7 @@ private:
MediaBuffer *mVideoBuffer;
+ sp<ClockEstimator> mClockEstimator;
sp<HTTPBase> mConnectingDataSource;
sp<NuCachedSource2> mCachedSource;
@@ -247,6 +251,7 @@ private:
sp<MediaExtractor> mExtractor;
status_t setDataSource_l(
+ const sp<IMediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers = NULL);
@@ -293,6 +298,7 @@ private:
bool getBitrate(int64_t *bitrate);
+ int64_t estimateRealTimeUs(TimeSource *ts, int64_t systemTimeUs);
void finishSeekIfNecessary(int64_t videoTimeUs);
void ensureCacheIsFetching_l();
diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h
deleted file mode 100644
index da188dd..0000000
--- a/media/libstagefright/include/ChromiumHTTPDataSource.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CHROME_HTTP_DATA_SOURCE_H_
-
-#define CHROME_HTTP_DATA_SOURCE_H_
-
-#include <media/stagefright/foundation/AString.h>
-#include <utils/threads.h>
-
-#include "HTTPBase.h"
-
-namespace android {
-
-struct SfDelegate;
-
-struct ChromiumHTTPDataSource : public HTTPBase {
- ChromiumHTTPDataSource(uint32_t flags = 0);
-
- virtual status_t connect(
- const char *uri,
- const KeyedVector<String8, String8> *headers = NULL,
- off64_t offset = 0);
-
- virtual void disconnect();
-
- virtual status_t initCheck() const;
-
- virtual ssize_t readAt(off64_t offset, void *data, size_t size);
- virtual status_t getSize(off64_t *size);
- virtual uint32_t flags();
-
- virtual sp<DecryptHandle> DrmInitialization(const char *mime);
-
- virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
-
- virtual String8 getUri();
-
- virtual String8 getMIMEType() const;
-
- virtual status_t reconnectAtOffset(off64_t offset);
-
- static status_t UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList);
-
-protected:
- virtual ~ChromiumHTTPDataSource();
-
-private:
- friend struct SfDelegate;
-
- enum State {
- DISCONNECTED,
- CONNECTING,
- CONNECTED,
- READING,
- DISCONNECTING
- };
-
- const uint32_t mFlags;
-
- mutable Mutex mLock;
- Condition mCondition;
-
- State mState;
-
- SfDelegate *mDelegate;
-
- AString mURI;
- KeyedVector<String8, String8> mHeaders;
-
- off64_t mCurrentOffset;
-
- // Any connection error or the result of a read operation
- // (for the lattter this is the number of bytes read, if successful).
- ssize_t mIOResult;
-
- int64_t mContentSize;
-
- String8 mContentType;
-
- sp<DecryptHandle> mDecryptHandle;
- DrmManagerClient *mDrmManagerClient;
-
- void disconnect_l();
-
- status_t connect_l(
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- off64_t offset);
-
- static void InitiateRead(
- ChromiumHTTPDataSource *me, void *data, size_t size);
-
- void initiateRead(void *data, size_t size);
-
- void onConnectionEstablished(
- int64_t contentSize, const char *contentType);
-
- void onConnectionFailed(status_t err);
- void onReadCompleted(ssize_t size);
- void onDisconnectComplete();
- void onRedirect(const char *url);
-
- void clearDRMState_l();
-
- DISALLOW_EVIL_CONSTRUCTORS(ChromiumHTTPDataSource);
-};
-
-} // namespace android
-
-#endif // CHROME_HTTP_DATA_SOURCE_H_
diff --git a/media/libstagefright/include/FragmentedMP4Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
deleted file mode 100644
index dbe02b8..0000000
--- a/media/libstagefright/include/FragmentedMP4Parser.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PARSER_H_
-
-#define PARSER_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/DataSource.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct ABuffer;
-
-struct FragmentedMP4Parser : public AHandler {
- struct Source : public RefBase {
- Source() {}
-
- virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
- virtual bool isSeekable() = 0;
-
- protected:
- virtual ~Source() {}
-
- private:
- DISALLOW_EVIL_CONSTRUCTORS(Source);
- };
-
- FragmentedMP4Parser();
-
- void start(const char *filename);
- void start(const sp<Source> &source);
- void start(sp<DataSource> &source);
-
- sp<AMessage> getFormat(bool audio, bool synchronous = false);
- status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, bool synchronous = false);
- status_t seekTo(bool audio, int64_t timeUs);
- bool isSeekable() const;
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-protected:
- virtual ~FragmentedMP4Parser();
-
-private:
- enum {
- kWhatStart,
- kWhatProceed,
- kWhatReadMore,
- kWhatGetFormat,
- kWhatDequeueAccessUnit,
- kWhatSeekTo,
- };
-
- struct TrackFragment;
- struct DynamicTrackFragment;
- struct StaticTrackFragment;
-
- struct DispatchEntry {
- uint32_t mType;
- uint32_t mParentType;
- status_t (FragmentedMP4Parser::*mHandler)(uint32_t, size_t, uint64_t);
- };
-
- struct Container {
- uint64_t mOffset;
- uint64_t mBytesRemaining;
- uint32_t mType;
- bool mExtendsToEOF;
- };
-
- struct SampleDescription {
- uint32_t mType;
- uint16_t mDataRefIndex;
-
- sp<AMessage> mFormat;
- };
-
- struct SampleInfo {
- off64_t mOffset;
- size_t mSize;
- uint32_t mPresentationTime;
- size_t mSampleDescIndex;
- uint32_t mFlags;
- };
-
- struct MediaDataInfo {
- sp<ABuffer> mBuffer;
- off64_t mOffset;
- };
-
- struct SidxEntry {
- size_t mSize;
- uint32_t mDurationUs;
- };
-
- struct TrackInfo {
- enum Flags {
- kTrackEnabled = 0x01,
- kTrackInMovie = 0x02,
- kTrackInPreview = 0x04,
- };
-
- uint32_t mTrackID;
- uint32_t mFlags;
- uint32_t mDuration; // This is the duration in terms of movie timescale!
- uint64_t mSidxDuration; // usec, from sidx box, which can use a different timescale
-
- uint32_t mMediaTimeScale;
-
- uint32_t mMediaHandlerType;
- Vector<SampleDescription> mSampleDescs;
-
- // from track extends:
- uint32_t mDefaultSampleDescriptionIndex;
- uint32_t mDefaultSampleDuration;
- uint32_t mDefaultSampleSize;
- uint32_t mDefaultSampleFlags;
-
- uint32_t mDecodingTime;
-
- Vector<SidxEntry> mSidx;
- sp<StaticTrackFragment> mStaticFragment;
- List<sp<TrackFragment> > mFragments;
- };
-
- struct TrackFragmentHeaderInfo {
- enum Flags {
- kBaseDataOffsetPresent = 0x01,
- kSampleDescriptionIndexPresent = 0x02,
- kDefaultSampleDurationPresent = 0x08,
- kDefaultSampleSizePresent = 0x10,
- kDefaultSampleFlagsPresent = 0x20,
- kDurationIsEmpty = 0x10000,
- };
-
- uint32_t mTrackID;
- uint32_t mFlags;
- uint64_t mBaseDataOffset;
- uint32_t mSampleDescriptionIndex;
- uint32_t mDefaultSampleDuration;
- uint32_t mDefaultSampleSize;
- uint32_t mDefaultSampleFlags;
-
- uint64_t mDataOffset;
- };
-
- static const DispatchEntry kDispatchTable[];
-
- sp<Source> mSource;
- off_t mBufferPos;
- bool mSuspended;
- bool mDoneWithMoov;
- off_t mFirstMoofOffset; // used as the starting point for offsets calculated from the sidx box
- sp<ABuffer> mBuffer;
- Vector<Container> mStack;
- KeyedVector<uint32_t, TrackInfo> mTracks; // TrackInfo by trackID
- Vector<MediaDataInfo> mMediaData;
-
- uint32_t mCurrentTrackID;
-
- status_t mFinalResult;
-
- TrackFragmentHeaderInfo mTrackFragmentHeaderInfo;
-
- status_t onProceed();
- status_t onDequeueAccessUnit(size_t trackIndex, sp<ABuffer> *accessUnit);
- status_t onSeekTo(bool wantAudio, int64_t position);
-
- void enter(off64_t offset, uint32_t type, uint64_t size);
-
- uint16_t readU16(size_t offset);
- uint32_t readU32(size_t offset);
- uint64_t readU64(size_t offset);
- void skip(off_t distance);
- status_t need(size_t size);
- bool fitsContainer(uint64_t size) const;
-
- status_t parseTrackHeader(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseMediaHeader(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseMediaHandler(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseTrackExtends(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseTrackFragmentHeader(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseTrackFragmentRun(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseVisualSampleEntry(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseAudioSampleEntry(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseSampleSizes(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseCompactSampleSizes(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseSampleToChunk(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseChunkOffsets(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseChunkOffsets64(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseAVCCodecSpecificData(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseESDSCodecSpecificData(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseMediaData(
- uint32_t type, size_t offset, uint64_t size);
-
- status_t parseSegmentIndex(
- uint32_t type, size_t offset, uint64_t size);
-
- TrackInfo *editTrack(uint32_t trackID, bool createIfNecessary = false);
-
- ssize_t findTrack(bool wantAudio) const;
-
- status_t makeAccessUnit(
- TrackInfo *info,
- const SampleInfo &sample,
- const MediaDataInfo &mdatInfo,
- sp<ABuffer> *accessUnit);
-
- status_t getSample(
- TrackInfo *info,
- sp<TrackFragment> *fragment,
- SampleInfo *sampleInfo);
-
- static int CompareSampleLocation(
- const SampleInfo &sample, const MediaDataInfo &mdatInfo);
-
- void resumeIfNecessary();
-
- void copyBuffer(
- sp<ABuffer> *dst,
- size_t offset, uint64_t size) const;
-
- DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser);
-};
-
-} // namespace android
-
-#endif // PARSER_H_
-
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index d4b7f9f..1c3cd5e 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -48,14 +48,6 @@ struct HTTPBase : public DataSource {
virtual status_t setBandwidthStatCollectFreq(int32_t freqMs);
- static status_t UpdateProxyConfig(
- const char *host, int32_t port, const char *exclusionList);
-
- void setUID(uid_t uid);
- bool getUID(uid_t *uid) const;
-
- static sp<HTTPBase> Create(uint32_t flags = 0);
-
static void RegisterSocketUserTag(int sockfd, uid_t uid, uint32_t kTag);
static void UnRegisterSocketUserTag(int sockfd);
@@ -87,9 +79,6 @@ private:
int32_t mPrevEstimatedBandWidthKbps;
int32_t mBandWidthCollectFreqMs;
- bool mUIDValid;
- uid_t mUID;
-
DISALLOW_EVIL_CONSTRUCTORS(HTTPBase);
};
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 7b4bc6d..1fe6fcf 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -39,6 +39,14 @@ struct SidxEntry {
uint32_t mDurationUs;
};
+struct Trex {
+ uint32_t track_ID;
+ uint32_t default_sample_description_index;
+ uint32_t default_sample_duration;
+ uint32_t default_sample_size;
+ uint32_t default_sample_flags;
+};
+
class MPEG4Extractor : public MediaExtractor {
public:
// Extractor assumes ownership of "source".
@@ -74,11 +82,12 @@ private:
};
Vector<SidxEntry> mSidxEntries;
- uint64_t mSidxDuration;
off64_t mMoofOffset;
Vector<PsshInfo> mPssh;
+ Vector<Trex> mTrex;
+
sp<DataSource> mDataSource;
status_t mInitCheck;
bool mHasVideo;
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index 5db4b4b..4252706 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -37,6 +37,8 @@ struct NuCachedSource2 : public DataSource {
virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+ virtual void disconnect();
+
virtual status_t getSize(off64_t *size);
virtual uint32_t flags();
@@ -103,6 +105,7 @@ private:
off64_t mLastAccessPos;
sp<AMessage> mAsyncResult;
bool mFetching;
+ bool mDisconnecting;
int64_t mLastFetchTimeUs;
int32_t mNumRetriesLeft;
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 31a5077..e8c4970 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -75,6 +75,10 @@ public:
node_id node, OMX_U32 portIndex, OMX_BOOL enable,
OMX_U32 max_frame_width, OMX_U32 max_frame_height);
+ virtual status_t configureVideoTunnelMode(
+ node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
+
virtual status_t useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer);
@@ -134,10 +138,10 @@ public:
OMX_IN OMX_PTR pEventData);
OMX_ERRORTYPE OnEmptyBufferDone(
- node_id node, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+ node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
OMX_ERRORTYPE OnFillBufferDone(
- node_id node, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+ node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
void invalidateNodeID(node_id node);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 339179e..24d431c 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -62,6 +62,10 @@ struct OMXNodeInstance {
OMX_U32 portIndex, OMX_BOOL enable,
OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
+ status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
+
status_t useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer);
@@ -138,12 +142,25 @@ private:
OMX::buffer_id mID;
};
Vector<ActiveBuffer> mActiveBuffers;
+#ifdef __LP64__
+ Mutex mBufferIDLock;
+ uint32_t mBufferIDCount;
+ KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
+ KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+#endif
~OMXNodeInstance();
void addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
void removeActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
void freeActiveBuffers();
+
+ // For buffer id management
+ OMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+ OMX_BUFFERHEADERTYPE *findBufferHeader(OMX::buffer_id buffer);
+ OMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+ void invalidateBufferID(OMX::buffer_id buffer);
+
status_t useGraphicBuffer2_l(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
OMX::buffer_id *buffer);
@@ -165,7 +182,9 @@ private:
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
- status_t storeMetaDataInBuffers_l(OMX_U32 portIndex, OMX_BOOL enable);
+ status_t storeMetaDataInBuffers_l(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_BOOL useGraphicBuffer, OMX_BOOL *usingGraphicBufferInMeta);
sp<GraphicBufferSource> getGraphicBufferSource();
void setGraphicBufferSource(const sp<GraphicBufferSource>& bufferSource);
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h
index ca59dc0..2c4f543 100644
--- a/media/libstagefright/include/SDPLoader.h
+++ b/media/libstagefright/include/SDPLoader.h
@@ -25,6 +25,7 @@
namespace android {
struct HTTPBase;
+struct IMediaHTTPService;
struct SDPLoader : public AHandler {
enum Flags {
@@ -34,7 +35,10 @@ struct SDPLoader : public AHandler {
enum {
kWhatSDPLoaded = 'sdpl'
};
- SDPLoader(const sp<AMessage> &notify, uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+ SDPLoader(
+ const sp<AMessage> &notify,
+ uint32_t flags,
+ const sp<IMediaHTTPService> &httpService);
void load(const char* url, const KeyedVector<String8, String8> *headers);
@@ -55,8 +59,6 @@ private:
sp<AMessage> mNotify;
const char* mUrl;
uint32_t mFlags;
- bool mUIDValid;
- uid_t mUID;
sp<ALooper> mNetLooper;
bool mCancelled;
diff --git a/media/libstagefright/include/SampleIterator.h b/media/libstagefright/include/SampleIterator.h
index b5a043c..60c9e7e 100644
--- a/media/libstagefright/include/SampleIterator.h
+++ b/media/libstagefright/include/SampleIterator.h
@@ -30,6 +30,7 @@ struct SampleIterator {
off64_t getSampleOffset() const { return mCurrentSampleOffset; }
size_t getSampleSize() const { return mCurrentSampleSize; }
uint32_t getSampleTime() const { return mCurrentSampleTime; }
+ uint32_t getSampleDuration() const { return mCurrentSampleDuration; }
status_t getSampleSizeDirect(
uint32_t sampleIndex, size_t *size);
@@ -61,11 +62,12 @@ private:
off64_t mCurrentSampleOffset;
size_t mCurrentSampleSize;
uint32_t mCurrentSampleTime;
+ uint32_t mCurrentSampleDuration;
void reset();
status_t findChunkRange(uint32_t sampleIndex);
status_t getChunkOffset(uint32_t chunk, off64_t *offset);
- status_t findSampleTime(uint32_t sampleIndex, uint32_t *time);
+ status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint32_t *time, uint32_t *duration);
SampleIterator(const SampleIterator &);
SampleIterator &operator=(const SampleIterator &);
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index 847dff7..d06df7b 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -66,7 +66,8 @@ public:
off64_t *offset,
size_t *size,
uint32_t *compositionTime,
- bool *isSyncSample = NULL);
+ bool *isSyncSample = NULL,
+ uint32_t *sampleDuration = NULL);
enum {
kFlagBefore,
@@ -74,7 +75,8 @@ public:
kFlagClosest
};
status_t findSampleAtTime(
- uint32_t req_time, uint32_t *sample_index, uint32_t flags);
+ uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+ uint32_t *sample_index, uint32_t flags);
status_t findSyncSampleNear(
uint32_t start_sample_index, uint32_t *sample_index,
@@ -137,6 +139,13 @@ private:
friend struct SampleIterator;
+ // normally we don't round
+ inline uint64_t getSampleTime(
+ size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
+ return (mSampleTimeEntries[sample_index].mCompositionTime
+ * scale_num) / scale_den;
+ }
+
status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index f8c61eb..591b38e 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -58,6 +58,11 @@ protected:
} mTransition;
};
+ enum {
+ kStoreMetaDataExtensionIndex = OMX_IndexVendorStartUnused + 1,
+ kPrepareForAdaptivePlaybackIndex,
+ };
+
void addPort(const OMX_PARAM_PORTDEFINITIONTYPE &def);
virtual OMX_ERRORTYPE internalGetParameter(
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index d050fa6..9e97ebd 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -27,8 +27,6 @@
#include <utils/threads.h>
#include <utils/Vector.h>
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
-
namespace android {
struct SoftVideoDecoderOMXComponent : public SimpleSoftOMXComponent {
@@ -57,12 +55,31 @@ protected:
virtual OMX_ERRORTYPE getConfig(
OMX_INDEXTYPE index, OMX_PTR params);
+ virtual OMX_ERRORTYPE getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index);
+
void initPorts(OMX_U32 numInputBuffers,
OMX_U32 inputBufferSize,
OMX_U32 numOutputBuffers,
const char *mimeType);
- virtual void updatePortDefinitions();
+ virtual void updatePortDefinitions(bool updateCrop = true);
+
+ uint32_t outputBufferWidth();
+ uint32_t outputBufferHeight();
+
+ enum CropSettingsMode {
+ kCropUnSet = 0,
+ kCropSet,
+ kCropChanged,
+ };
+ void handlePortSettingsChange(
+ bool *portWillReset, uint32_t width, uint32_t height,
+ CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false);
+
+ void copyYV12FrameToOutputBuffer(
+ uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride);
enum {
kInputPortIndex = 0,
@@ -70,6 +87,8 @@ protected:
kMaxPortIndex = 1,
};
+ bool mIsAdaptive;
+ uint32_t mAdaptiveMaxWidth, mAdaptiveMaxHeight;
uint32_t mWidth, mHeight;
uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
diff --git a/media/libstagefright/include/SoftVideoEncoderOMXComponent.h b/media/libstagefright/include/SoftVideoEncoderOMXComponent.h
new file mode 100644
index 0000000..b3b810d
--- /dev/null
+++ b/media/libstagefright/include/SoftVideoEncoderOMXComponent.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VIDEO_ENCODER_OMX_COMPONENT_H_
+
+#define SOFT_VIDEO_ENCODER_OMX_COMPONENT_H_
+
+#include "SimpleSoftOMXComponent.h"
+#include <system/window.h>
+
+struct hw_module_t;
+
+namespace android {
+
+struct SoftVideoEncoderOMXComponent : public SimpleSoftOMXComponent {
+ SoftVideoEncoderOMXComponent(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ static void ConvertFlexYUVToPlanar(
+ uint8_t *dst, size_t dstStride, size_t dstVStride,
+ struct android_ycbcr *ycbcr, int32_t width, int32_t height);
+
+ static void ConvertYUV420SemiPlanarToYUV420Planar(
+ const uint8_t *inYVU, uint8_t* outYUV, int32_t width, int32_t height);
+
+ static void ConvertRGB32ToPlanar(
+ uint8_t *dstY, size_t dstStride, size_t dstVStride,
+ const uint8_t *src, size_t width, size_t height, size_t srcStride,
+ bool bgr);
+
+ const uint8_t *extractGraphicBuffer(
+ uint8_t *dst, size_t dstSize, const uint8_t *src, size_t srcSize,
+ size_t width, size_t height) const;
+
+ virtual OMX_ERRORTYPE getExtensionIndex(const char *name, OMX_INDEXTYPE *index);
+
+ enum {
+ kInputPortIndex = 0,
+ kOutputPortIndex = 1,
+ };
+
+private:
+ mutable const hw_module_t *mGrallocModule;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftVideoEncoderOMXComponent);
+};
+
+} // namespace android
+
+#endif // SOFT_VIDEO_ENCODER_OMX_COMPONENT_H_
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 7ab0042..fa3ea89 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -24,17 +24,17 @@
namespace android {
-struct MetaData;
+struct AMessage;
class SoftwareRenderer {
public:
- SoftwareRenderer(
- const sp<ANativeWindow> &nativeWindow, const sp<MetaData> &meta);
+ explicit SoftwareRenderer(const sp<ANativeWindow> &nativeWindow);
~SoftwareRenderer();
void render(
- const void *data, size_t size, void *platformPrivate);
+ const void *data, size_t size, int64_t timestampNs,
+ void *platformPrivate, const sp<AMessage> &format);
private:
enum YUVMode {
@@ -51,6 +51,8 @@ private:
SoftwareRenderer(const SoftwareRenderer &);
SoftwareRenderer &operator=(const SoftwareRenderer &);
+
+ void resetFormatIfChanged(const sp<AMessage> &format);
};
} // namespace android
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index b02ed0e..6632c27 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -33,6 +33,7 @@ struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
virtual ~StagefrightMetadataRetriever();
virtual status_t setDataSource(
+ const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers);
diff --git a/media/libstagefright/include/TimedEventQueue.h b/media/libstagefright/include/TimedEventQueue.h
index 3e84256..2963150 100644
--- a/media/libstagefright/include/TimedEventQueue.h
+++ b/media/libstagefright/include/TimedEventQueue.h
@@ -122,7 +122,7 @@ private:
};
struct StopEvent : public TimedEventQueue::Event {
- virtual void fire(TimedEventQueue *queue, int64_t now_us) {
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
queue->mStopped = true;
}
};
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index 8e62946..ab7e8b8 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -49,6 +49,7 @@ public:
virtual sp<MediaSource> getTrack(size_t index);
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual void setUID(uid_t uid);
// Return the amount of data cached from the current
// playback positiion (in us).
@@ -74,8 +75,6 @@ public:
// codec.
void setCryptoPluginMode(bool cryptoPluginMode);
- void setUID(uid_t uid);
-
static bool getVendorLibHandle();
status_t getError();
diff --git a/media/libstagefright/matroska/Android.mk b/media/libstagefright/matroska/Android.mk
index 2d8c1e1..446ff8c 100644
--- a/media/libstagefright/matroska/Android.mk
+++ b/media/libstagefright/matroska/Android.mk
@@ -8,7 +8,7 @@ LOCAL_C_INCLUDES:= \
$(TOP)/external/libvpx/libwebm \
$(TOP)/frameworks/native/include/media/openmax \
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror
LOCAL_MODULE:= libstagefright_matroska
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index dcb1cda..2587ec7 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -20,8 +20,6 @@
#include "MatroskaExtractor.h"
-#include "mkvparser.hpp"
-
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/DataSource.h>
@@ -33,6 +31,8 @@
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#include <inttypes.h>
+
namespace android {
struct DataSourceReader : public mkvparser::IMkvReader {
@@ -87,7 +87,7 @@ private:
////////////////////////////////////////////////////////////////////////////////
struct BlockIterator {
- BlockIterator(MatroskaExtractor *extractor, unsigned long trackNum);
+ BlockIterator(MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index);
bool eos() const;
@@ -103,7 +103,8 @@ struct BlockIterator {
private:
MatroskaExtractor *mExtractor;
- unsigned long mTrackNum;
+ long long mTrackNum;
+ unsigned long mIndex;
const mkvparser::Cluster *mCluster;
const mkvparser::BlockEntry *mBlockEntry;
@@ -155,6 +156,53 @@ private:
MatroskaSource &operator=(const MatroskaSource &);
};
+const mkvparser::Track* MatroskaExtractor::TrackInfo::getTrack() const {
+ return mExtractor->mSegment->GetTracks()->GetTrackByNumber(mTrackNum);
+}
+
+// This function does exactly the same as mkvparser::Cues::Find, except that it
+// searches in our own track based vectors. We should not need this once mkvparser
+// adds the same functionality.
+const mkvparser::CuePoint::TrackPosition *MatroskaExtractor::TrackInfo::find(
+ long long timeNs) const {
+ ALOGV("mCuePoints.size %zu", mCuePoints.size());
+ if (mCuePoints.empty()) {
+ return NULL;
+ }
+
+ const mkvparser::CuePoint* cp = mCuePoints.itemAt(0);
+ const mkvparser::Track* track = getTrack();
+ if (timeNs <= cp->GetTime(mExtractor->mSegment)) {
+ return cp->Find(track);
+ }
+
+ // Binary searches through relevant cues; assumes cues are ordered by timecode.
+ // If we do detect out-of-order cues, return NULL.
+ size_t lo = 0;
+ size_t hi = mCuePoints.size();
+ while (lo < hi) {
+ const size_t mid = lo + (hi - lo) / 2;
+ const mkvparser::CuePoint* const midCp = mCuePoints.itemAt(mid);
+ const long long cueTimeNs = midCp->GetTime(mExtractor->mSegment);
+ if (cueTimeNs <= timeNs) {
+ lo = mid + 1;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (lo == 0) {
+ return NULL;
+ }
+
+ cp = mCuePoints.itemAt(lo - 1);
+ if (cp->GetTime(mExtractor->mSegment) > timeNs) {
+ return NULL;
+ }
+
+ return cp->Find(track);
+}
+
MatroskaSource::MatroskaSource(
const sp<MatroskaExtractor> &extractor, size_t index)
: mExtractor(extractor),
@@ -162,7 +210,8 @@ MatroskaSource::MatroskaSource(
mType(OTHER),
mIsAudio(false),
mBlockIter(mExtractor.get(),
- mExtractor->mTracks.itemAt(index).mTrackNum),
+ mExtractor->mTracks.itemAt(index).mTrackNum,
+ index),
mNALSizeLen(0) {
sp<MetaData> meta = mExtractor->mTracks.itemAt(index).mMeta;
@@ -183,7 +232,7 @@ MatroskaSource::MatroskaSource(
CHECK_GE(avccSize, 5u);
mNALSizeLen = 1 + (avcc[4] & 3);
- ALOGV("mNALSizeLen = %d", mNALSizeLen);
+ ALOGV("mNALSizeLen = %zu", mNALSizeLen);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
mType = AAC;
}
@@ -193,7 +242,7 @@ MatroskaSource::~MatroskaSource() {
clearPendingFrames();
}
-status_t MatroskaSource::start(MetaData *params) {
+status_t MatroskaSource::start(MetaData * /* params */) {
mBlockIter.reset();
return OK;
@@ -212,9 +261,10 @@ sp<MetaData> MatroskaSource::getFormat() {
////////////////////////////////////////////////////////////////////////////////
BlockIterator::BlockIterator(
- MatroskaExtractor *extractor, unsigned long trackNum)
+ MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index)
: mExtractor(extractor),
mTrackNum(trackNum),
+ mIndex(index),
mCluster(NULL),
mBlockEntry(NULL),
mBlockEntryIndex(0) {
@@ -313,14 +363,14 @@ void BlockIterator::seek(
*actualFrameTimeUs = -1ll;
- const int64_t seekTimeNs = seekTimeUs * 1000ll;
+ const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
mkvparser::Segment* const pSegment = mExtractor->mSegment;
// Special case the 0 seek to avoid loading Cues when the application
// extraneously seeks to 0 before playing.
if (seekTimeNs <= 0) {
- ALOGV("Seek to beginning: %lld", seekTimeUs);
+ ALOGV("Seek to beginning: %" PRId64, seekTimeUs);
mCluster = pSegment->GetFirst();
mBlockEntryIndex = 0;
do {
@@ -329,7 +379,7 @@ void BlockIterator::seek(
return;
}
- ALOGV("Seeking to: %lld", seekTimeUs);
+ ALOGV("Seeking to: %" PRId64, seekTimeUs);
// If the Cues have not been located then find them.
const mkvparser::Cues* pCues = pSegment->GetCues();
@@ -362,9 +412,20 @@ void BlockIterator::seek(
}
const mkvparser::CuePoint* pCP;
+ mkvparser::Tracks const *pTracks = pSegment->GetTracks();
+ unsigned long int trackCount = pTracks->GetTracksCount();
while (!pCues->DoneParsing()) {
pCues->LoadCuePoint();
pCP = pCues->GetLast();
+ CHECK(pCP);
+
+ for (size_t index = 0; index < trackCount; ++index) {
+ const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
+ if (pTrack && pTrack->GetType() == 1 && pCP->Find(pTrack)) { // VIDEO_TRACK
+ MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(index);
+ track.mCuePoints.push_back(pCP);
+ }
+ }
if (pCP->GetTime(pSegment) >= seekTimeNs) {
ALOGV("Parsed past relevant Cue");
@@ -372,22 +433,25 @@ void BlockIterator::seek(
}
}
- // The Cue index is built around video keyframes
- mkvparser::Tracks const *pTracks = pSegment->GetTracks();
- const mkvparser::Track *pTrack = NULL;
- for (size_t index = 0; index < pTracks->GetTracksCount(); ++index) {
- pTrack = pTracks->GetTrackByIndex(index);
- if (pTrack && pTrack->GetType() == 1) { // VIDEO_TRACK
- ALOGV("Video track located at %d", index);
- break;
+ const mkvparser::CuePoint::TrackPosition *pTP = NULL;
+ const mkvparser::Track *thisTrack = pTracks->GetTrackByIndex(mIndex);
+ if (thisTrack->GetType() == 1) { // video
+ MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(mIndex);
+ pTP = track.find(seekTimeNs);
+ } else {
+ // The Cue index is built around video keyframes
+ for (size_t index = 0; index < trackCount; ++index) {
+ const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
+ if (pTrack && pTrack->GetType() == 1 && pCues->Find(seekTimeNs, pTrack, pCP, pTP)) {
+ ALOGV("Video track located at %zu", index);
+ break;
+ }
}
}
+
// Always *search* based on the video track, but finalize based on mTrackNum
- const mkvparser::CuePoint::TrackPosition* pTP;
- if (pTrack && pTrack->GetType() == 1) {
- pCues->Find(seekTimeNs, pTrack, pCP, pTP);
- } else {
+ if (!pTP) {
ALOGE("Did not locate the video track for seeking");
return;
}
@@ -408,10 +472,13 @@ void BlockIterator::seek(
if (isAudio || block()->IsKey()) {
// Accept the first key frame
- *actualFrameTimeUs = (block()->GetTime(mCluster) + 500LL) / 1000LL;
- ALOGV("Requested seek point: %lld actual: %lld",
- seekTimeUs, actualFrameTimeUs);
- break;
+ int64_t frameTimeUs = (block()->GetTime(mCluster) + 500LL) / 1000LL;
+ if (thisTrack->GetType() == 1 || frameTimeUs >= seekTimeUs) {
+ *actualFrameTimeUs = frameTimeUs;
+ ALOGV("Requested seek point: %" PRId64 " actual: %" PRId64,
+ seekTimeUs, *actualFrameTimeUs);
+ break;
+ }
}
}
}
@@ -628,7 +695,8 @@ MatroskaExtractor::MatroskaExtractor(const sp<DataSource> &source)
mReader(new DataSourceReader(mDataSource)),
mSegment(NULL),
mExtractedThumbnails(false),
- mIsWebm(false) {
+ mIsWebm(false),
+ mSeekPreRollNs(0) {
off64_t size;
mIsLiveStreaming =
(mDataSource->flags()
@@ -654,14 +722,22 @@ MatroskaExtractor::MatroskaExtractor(const sp<DataSource> &source)
return;
}
+ // from mkvparser::Segment::Load(), but stop at first cluster
ret = mSegment->ParseHeaders();
- CHECK_EQ(ret, 0);
-
- long len;
- ret = mSegment->LoadCluster(pos, len);
- CHECK_EQ(ret, 0);
+ if (ret == 0) {
+ long len;
+ ret = mSegment->LoadCluster(pos, len);
+ if (ret >= 1) {
+ // no more clusters
+ ret = 0;
+ }
+ } else if (ret > 0) {
+ ret = mkvparser::E_BUFFER_NOT_FULL;
+ }
if (ret < 0) {
+ ALOGW("Corrupt %s source: %s", mIsWebm ? "webm" : "matroska",
+ uriDebugString(mDataSource->getUri()).c_str());
delete mSegment;
mSegment = NULL;
return;
@@ -919,6 +995,12 @@ void MatroskaExtractor::addTracks() {
err = addVorbisCodecInfo(
meta, codecPrivate, codecPrivateSize);
+ } else if (!strcmp("A_OPUS", codecID)) {
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_OPUS);
+ meta->setData(kKeyOpusHeader, 0, codecPrivate, codecPrivateSize);
+ meta->setInt64(kKeyOpusCodecDelay, track->GetCodecDelay());
+ meta->setInt64(kKeyOpusSeekPreRoll, track->GetSeekPreRoll());
+ mSeekPreRollNs = track->GetSeekPreRoll();
} else if (!strcmp("A_MPEG/L3", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
} else {
@@ -947,6 +1029,7 @@ void MatroskaExtractor::addTracks() {
TrackInfo *trackInfo = &mTracks.editItemAt(mTracks.size() - 1);
trackInfo->mTrackNum = track->GetNumber();
trackInfo->mMeta = meta;
+ trackInfo->mExtractor = this;
}
}
@@ -961,7 +1044,7 @@ void MatroskaExtractor::findThumbnails() {
continue;
}
- BlockIterator iter(this, info->mTrackNum);
+ BlockIterator iter(this, info->mTrackNum, i);
int32_t j = 0;
int64_t thumbnailTimeUs = 0;
size_t maxBlockSize = 0;
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index 1294b4f..db36bf8 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -18,14 +18,12 @@
#define MATROSKA_EXTRACTOR_H_
+#include "mkvparser.hpp"
+
#include <media/stagefright/MediaExtractor.h>
#include <utils/Vector.h>
#include <utils/threads.h>
-namespace mkvparser {
-struct Segment;
-};
-
namespace android {
struct AMessage;
@@ -58,6 +56,11 @@ private:
struct TrackInfo {
unsigned long mTrackNum;
sp<MetaData> mMeta;
+ const MatroskaExtractor *mExtractor;
+ Vector<const mkvparser::CuePoint*> mCuePoints;
+
+ const mkvparser::Track* getTrack() const;
+ const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
};
Mutex mLock;
@@ -69,6 +72,7 @@ private:
bool mExtractedThumbnails;
bool mIsLiveStreaming;
bool mIsWebm;
+ int64_t mSeekPreRollNs;
void addTracks();
void findThumbnails();
diff --git a/media/libstagefright/mp4/FragmentedMP4Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
deleted file mode 100644
index 0102656..0000000
--- a/media/libstagefright/mp4/FragmentedMP4Parser.cpp
+++ /dev/null
@@ -1,1993 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FragmentedMP4Parser"
-#include <utils/Log.h>
-
-#include "include/avc_utils.h"
-#include "include/ESDS.h"
-#include "include/FragmentedMP4Parser.h"
-#include "TrackFragment.h"
-
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-
-
-namespace android {
-
-static const char *Fourcc2String(uint32_t fourcc) {
- static char buffer[5];
- buffer[4] = '\0';
- buffer[0] = fourcc >> 24;
- buffer[1] = (fourcc >> 16) & 0xff;
- buffer[2] = (fourcc >> 8) & 0xff;
- buffer[3] = fourcc & 0xff;
-
- return buffer;
-}
-
-static const char *IndentString(size_t n) {
- static const char kSpace[] = " ";
- return kSpace + sizeof(kSpace) - 2 * n - 1;
-}
-
-// static
-const FragmentedMP4Parser::DispatchEntry FragmentedMP4Parser::kDispatchTable[] = {
- { FOURCC('m', 'o', 'o', 'v'), 0, NULL },
- { FOURCC('t', 'r', 'a', 'k'), FOURCC('m', 'o', 'o', 'v'), NULL },
- { FOURCC('u', 'd', 't', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
- { FOURCC('u', 'd', 't', 'a'), FOURCC('m', 'o', 'o', 'v'), NULL },
- { FOURCC('m', 'e', 't', 'a'), FOURCC('u', 'd', 't', 'a'), NULL },
- { FOURCC('i', 'l', 's', 't'), FOURCC('m', 'e', 't', 'a'), NULL },
-
- { FOURCC('t', 'k', 'h', 'd'), FOURCC('t', 'r', 'a', 'k'),
- &FragmentedMP4Parser::parseTrackHeader
- },
-
- { FOURCC('m', 'v', 'e', 'x'), FOURCC('m', 'o', 'o', 'v'), NULL },
-
- { FOURCC('t', 'r', 'e', 'x'), FOURCC('m', 'v', 'e', 'x'),
- &FragmentedMP4Parser::parseTrackExtends
- },
-
- { FOURCC('e', 'd', 't', 's'), FOURCC('t', 'r', 'a', 'k'), NULL },
- { FOURCC('m', 'd', 'i', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
-
- { FOURCC('m', 'd', 'h', 'd'), FOURCC('m', 'd', 'i', 'a'),
- &FragmentedMP4Parser::parseMediaHeader
- },
-
- { FOURCC('h', 'd', 'l', 'r'), FOURCC('m', 'd', 'i', 'a'),
- &FragmentedMP4Parser::parseMediaHandler
- },
-
- { FOURCC('m', 'i', 'n', 'f'), FOURCC('m', 'd', 'i', 'a'), NULL },
- { FOURCC('d', 'i', 'n', 'f'), FOURCC('m', 'i', 'n', 'f'), NULL },
- { FOURCC('s', 't', 'b', 'l'), FOURCC('m', 'i', 'n', 'f'), NULL },
- { FOURCC('s', 't', 's', 'd'), FOURCC('s', 't', 'b', 'l'), NULL },
-
- { FOURCC('s', 't', 's', 'z'), FOURCC('s', 't', 'b', 'l'),
- &FragmentedMP4Parser::parseSampleSizes },
-
- { FOURCC('s', 't', 'z', '2'), FOURCC('s', 't', 'b', 'l'),
- &FragmentedMP4Parser::parseCompactSampleSizes },
-
- { FOURCC('s', 't', 's', 'c'), FOURCC('s', 't', 'b', 'l'),
- &FragmentedMP4Parser::parseSampleToChunk },
-
- { FOURCC('s', 't', 'c', 'o'), FOURCC('s', 't', 'b', 'l'),
- &FragmentedMP4Parser::parseChunkOffsets },
-
- { FOURCC('c', 'o', '6', '4'), FOURCC('s', 't', 'b', 'l'),
- &FragmentedMP4Parser::parseChunkOffsets64 },
-
- { FOURCC('a', 'v', 'c', 'C'), FOURCC('a', 'v', 'c', '1'),
- &FragmentedMP4Parser::parseAVCCodecSpecificData },
-
- { FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'a'),
- &FragmentedMP4Parser::parseESDSCodecSpecificData },
-
- { FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'v'),
- &FragmentedMP4Parser::parseESDSCodecSpecificData },
-
- { FOURCC('m', 'd', 'a', 't'), 0, &FragmentedMP4Parser::parseMediaData },
-
- { FOURCC('m', 'o', 'o', 'f'), 0, NULL },
- { FOURCC('t', 'r', 'a', 'f'), FOURCC('m', 'o', 'o', 'f'), NULL },
-
- { FOURCC('t', 'f', 'h', 'd'), FOURCC('t', 'r', 'a', 'f'),
- &FragmentedMP4Parser::parseTrackFragmentHeader
- },
- { FOURCC('t', 'r', 'u', 'n'), FOURCC('t', 'r', 'a', 'f'),
- &FragmentedMP4Parser::parseTrackFragmentRun
- },
-
- { FOURCC('m', 'f', 'r', 'a'), 0, NULL },
-
- { FOURCC('s', 'i', 'd', 'x'), 0, &FragmentedMP4Parser::parseSegmentIndex },
-};
-
-struct FileSource : public FragmentedMP4Parser::Source {
- FileSource(const char *filename)
- : mFile(fopen(filename, "rb")) {
- CHECK(mFile != NULL);
- }
-
- virtual ~FileSource() {
- fclose(mFile);
- }
-
- virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
- fseek(mFile, offset, SEEK_SET);
- return fread(data, 1, size, mFile);
- }
-
- virtual bool isSeekable() {
- return true;
- }
-
- private:
- FILE *mFile;
-
- DISALLOW_EVIL_CONSTRUCTORS(FileSource);
-};
-
-struct ReadTracker : public RefBase {
- ReadTracker(off64_t size) {
- allocSize = 1 + size / 8192; // 1 bit per kilobyte
- bitmap = (char*) calloc(1, allocSize);
- }
- virtual ~ReadTracker() {
- dumpToLog();
- free(bitmap);
- }
- void mark(off64_t offset, size_t size) {
- int firstbit = offset / 1024;
- int lastbit = (offset + size - 1) / 1024;
- for (int i = firstbit; i <= lastbit; i++) {
- bitmap[i/8] |= (0x80 >> (i & 7));
- }
- }
-
- private:
- void dumpToLog() {
- // 96 chars per line, each char represents one kilobyte, 1 kb per bit
- int numlines = allocSize / 12;
- char buf[97];
- char *cur = bitmap;
- for (int i = 0; i < numlines; i++ && cur) {
- for (int j = 0; j < 12; j++) {
- for (int k = 0; k < 8; k++) {
- buf[(j * 8) + k] = (*cur & (0x80 >> k)) ? 'X' : '.';
- }
- cur++;
- }
- buf[96] = '\0';
- ALOGI("%5dk: %s", i * 96, buf);
- }
- }
-
- size_t allocSize;
- char *bitmap;
-};
-
-struct DataSourceSource : public FragmentedMP4Parser::Source {
- DataSourceSource(sp<DataSource> &source)
- : mDataSource(source) {
- CHECK(mDataSource != NULL);
-#if 0
- off64_t size;
- if (source->getSize(&size) == OK) {
- mReadTracker = new ReadTracker(size);
- } else {
- ALOGE("couldn't get data source size");
- }
-#endif
- }
-
- virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
- if (mReadTracker != NULL) {
- mReadTracker->mark(offset, size);
- }
- return mDataSource->readAt(offset, data, size);
- }
-
- virtual bool isSeekable() {
- return true;
- }
-
- private:
- sp<DataSource> mDataSource;
- sp<ReadTracker> mReadTracker;
-
- DISALLOW_EVIL_CONSTRUCTORS(DataSourceSource);
-};
-
-FragmentedMP4Parser::FragmentedMP4Parser()
- : mBufferPos(0),
- mSuspended(false),
- mDoneWithMoov(false),
- mFirstMoofOffset(0),
- mFinalResult(OK) {
-}
-
-FragmentedMP4Parser::~FragmentedMP4Parser() {
-}
-
-void FragmentedMP4Parser::start(const char *filename) {
- sp<AMessage> msg = new AMessage(kWhatStart, id());
- msg->setObject("source", new FileSource(filename));
- msg->post();
- ALOGV("Parser::start(%s)", filename);
-}
-
-void FragmentedMP4Parser::start(const sp<Source> &source) {
- sp<AMessage> msg = new AMessage(kWhatStart, id());
- msg->setObject("source", source);
- msg->post();
- ALOGV("Parser::start(Source)");
-}
-
-void FragmentedMP4Parser::start(sp<DataSource> &source) {
- sp<AMessage> msg = new AMessage(kWhatStart, id());
- msg->setObject("source", new DataSourceSource(source));
- msg->post();
- ALOGV("Parser::start(DataSource)");
-}
-
-sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) {
-
- while (true) {
- bool moovDone = mDoneWithMoov;
- sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
- msg->setInt32("audio", audio);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- ALOGV("getFormat post failed: %d", err);
- return NULL;
- }
-
- if (response->findInt32("err", &err) && err != OK) {
- if (synchronous && err == -EWOULDBLOCK && !moovDone) {
- resumeIfNecessary();
- ALOGV("@getFormat parser not ready yet, retrying");
- usleep(10000);
- continue;
- }
- ALOGV("getFormat failed: %d", err);
- return NULL;
- }
-
- sp<AMessage> format;
- CHECK(response->findMessage("format", &format));
-
- ALOGV("returning format %s", format->debugString().c_str());
- return format;
- }
-}
-
-status_t FragmentedMP4Parser::seekTo(bool wantAudio, int64_t timeUs) {
- sp<AMessage> msg = new AMessage(kWhatSeekTo, id());
- msg->setInt32("audio", wantAudio);
- msg->setInt64("position", timeUs);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- return err;
-}
-
-bool FragmentedMP4Parser::isSeekable() const {
- while (mFirstMoofOffset == 0 && mFinalResult == OK) {
- usleep(10000);
- }
- bool seekable = mSource->isSeekable();
- for (size_t i = 0; seekable && i < mTracks.size(); i++) {
- const TrackInfo *info = &mTracks.valueAt(i);
- seekable &= !info->mSidx.empty();
- }
- return seekable;
-}
-
-status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) {
- status_t err = -EINVAL;
- ssize_t trackIndex = findTrack(wantAudio);
- if (trackIndex < 0) {
- err = trackIndex;
- } else {
- TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
- int numSidxEntries = info->mSidx.size();
- int64_t totalTime = 0;
- off_t totalOffset = mFirstMoofOffset;
- for (int i = 0; i < numSidxEntries; i++) {
- const SidxEntry *se = &info->mSidx[i];
- if (totalTime + se->mDurationUs > position) {
- mBuffer->setRange(0,0);
- mBufferPos = totalOffset;
- if (mFinalResult == ERROR_END_OF_STREAM) {
- mFinalResult = OK;
- mSuspended = true; // force resume
- resumeIfNecessary();
- }
- info->mFragments.clear();
- info->mDecodingTime = totalTime * info->mMediaTimeScale / 1000000ll;
- return OK;
- }
- totalTime += se->mDurationUs;
- totalOffset += se->mSize;
- }
- }
- ALOGV("seekTo out of range");
- return err;
-}
-
-status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit,
- bool synchronous) {
-
- while (true) {
- sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
- msg->setInt32("audio", audio);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- ALOGV("dequeue fail 1: %d", err);
- return err;
- }
-
- if (response->findInt32("err", &err) && err != OK) {
- if (synchronous && err == -EWOULDBLOCK) {
- resumeIfNecessary();
- ALOGV("Parser not ready yet, retrying");
- usleep(10000);
- continue;
- }
- ALOGV("dequeue fail 2: %d, %d", err, synchronous);
- return err;
- }
-
- CHECK(response->findBuffer("accessUnit", accessUnit));
-
- return OK;
- }
-}
-
-ssize_t FragmentedMP4Parser::findTrack(bool wantAudio) const {
- for (size_t i = 0; i < mTracks.size(); ++i) {
- const TrackInfo *info = &mTracks.valueAt(i);
-
- bool isAudio =
- info->mMediaHandlerType == FOURCC('s', 'o', 'u', 'n');
-
- bool isVideo =
- info->mMediaHandlerType == FOURCC('v', 'i', 'd', 'e');
-
- if ((wantAudio && isAudio) || (!wantAudio && !isAudio)) {
- if (info->mSampleDescs.empty()) {
- break;
- }
-
- return i;
- }
- }
-
- return -EWOULDBLOCK;
-}
-
-void FragmentedMP4Parser::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatStart:
- {
- sp<RefBase> obj;
- CHECK(msg->findObject("source", &obj));
-
- mSource = static_cast<Source *>(obj.get());
-
- mBuffer = new ABuffer(512 * 1024);
- mBuffer->setRange(0, 0);
-
- enter(0ll, 0, 0);
-
- (new AMessage(kWhatProceed, id()))->post();
- break;
- }
-
- case kWhatProceed:
- {
- CHECK(!mSuspended);
-
- status_t err = onProceed();
-
- if (err == OK) {
- if (!mSuspended) {
- msg->post();
- }
- } else if (err != -EAGAIN) {
- ALOGE("onProceed returned error %d", err);
- }
-
- break;
- }
-
- case kWhatReadMore:
- {
- size_t needed;
- CHECK(msg->findSize("needed", &needed));
-
- memmove(mBuffer->base(), mBuffer->data(), mBuffer->size());
- mBufferPos += mBuffer->offset();
- mBuffer->setRange(0, mBuffer->size());
-
- size_t maxBytesToRead = mBuffer->capacity() - mBuffer->size();
-
- if (maxBytesToRead < needed) {
- ALOGV("resizing buffer.");
-
- sp<ABuffer> newBuffer =
- new ABuffer((mBuffer->size() + needed + 1023) & ~1023);
- memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
- newBuffer->setRange(0, mBuffer->size());
-
- mBuffer = newBuffer;
- maxBytesToRead = mBuffer->capacity() - mBuffer->size();
- }
-
- CHECK_GE(maxBytesToRead, needed);
-
- ssize_t n = mSource->readAt(
- mBufferPos + mBuffer->size(),
- mBuffer->data() + mBuffer->size(), needed);
-
- if (n < (ssize_t)needed) {
- ALOGV("Reached EOF when reading %d @ %d + %d", needed, mBufferPos, mBuffer->size());
- if (n < 0) {
- mFinalResult = n;
- } else if (n == 0) {
- mFinalResult = ERROR_END_OF_STREAM;
- } else {
- mFinalResult = ERROR_IO;
- }
- } else {
- mBuffer->setRange(0, mBuffer->size() + n);
- (new AMessage(kWhatProceed, id()))->post();
- }
-
- break;
- }
-
- case kWhatGetFormat:
- {
- int32_t wantAudio;
- CHECK(msg->findInt32("audio", &wantAudio));
-
- status_t err = -EWOULDBLOCK;
- sp<AMessage> response = new AMessage;
-
- ssize_t trackIndex = findTrack(wantAudio);
-
- if (trackIndex < 0) {
- err = trackIndex;
- } else {
- TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
- sp<AMessage> format = info->mSampleDescs.itemAt(0).mFormat;
- if (info->mSidxDuration) {
- format->setInt64("durationUs", info->mSidxDuration);
- } else {
- // this is probably going to be zero. Oh well...
- format->setInt64("durationUs",
- 1000000ll * info->mDuration / info->mMediaTimeScale);
- }
- response->setMessage(
- "format", format);
-
- err = OK;
- }
-
- response->setInt32("err", err);
-
- uint32_t replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- response->postReply(replyID);
- break;
- }
-
- case kWhatDequeueAccessUnit:
- {
- int32_t wantAudio;
- CHECK(msg->findInt32("audio", &wantAudio));
-
- status_t err = -EWOULDBLOCK;
- sp<AMessage> response = new AMessage;
-
- ssize_t trackIndex = findTrack(wantAudio);
-
- if (trackIndex < 0) {
- err = trackIndex;
- } else {
- sp<ABuffer> accessUnit;
- err = onDequeueAccessUnit(trackIndex, &accessUnit);
-
- if (err == OK) {
- response->setBuffer("accessUnit", accessUnit);
- }
- }
-
- response->setInt32("err", err);
-
- uint32_t replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- response->postReply(replyID);
- break;
- }
-
- case kWhatSeekTo:
- {
- ALOGV("kWhatSeekTo");
- int32_t wantAudio;
- CHECK(msg->findInt32("audio", &wantAudio));
- int64_t position;
- CHECK(msg->findInt64("position", &position));
-
- status_t err = -EWOULDBLOCK;
- sp<AMessage> response = new AMessage;
-
- ssize_t trackIndex = findTrack(wantAudio);
-
- if (trackIndex < 0) {
- err = trackIndex;
- } else {
- err = onSeekTo(wantAudio, position);
- }
- response->setInt32("err", err);
- uint32_t replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
- default:
- TRESPASS();
- }
-}
-
-status_t FragmentedMP4Parser::onProceed() {
- status_t err;
-
- if ((err = need(8)) != OK) {
- return err;
- }
-
- uint64_t size = readU32(0);
- uint32_t type = readU32(4);
-
- size_t offset = 8;
-
- if (size == 1) {
- if ((err = need(16)) != OK) {
- return err;
- }
-
- size = readU64(offset);
- offset += 8;
- }
-
- uint8_t userType[16];
-
- if (type == FOURCC('u', 'u', 'i', 'd')) {
- if ((err = need(offset + 16)) != OK) {
- return err;
- }
-
- memcpy(userType, mBuffer->data() + offset, 16);
- offset += 16;
- }
-
- CHECK(!mStack.isEmpty());
- uint32_t ptype = mStack.itemAt(mStack.size() - 1).mType;
-
- static const size_t kNumDispatchers =
- sizeof(kDispatchTable) / sizeof(kDispatchTable[0]);
-
- size_t i;
- for (i = 0; i < kNumDispatchers; ++i) {
- if (kDispatchTable[i].mType == type
- && kDispatchTable[i].mParentType == ptype) {
- break;
- }
- }
-
- // SampleEntry boxes are container boxes that start with a variable
- // amount of data depending on the media handler type.
- // We don't look inside 'hint' type SampleEntry boxes.
-
- bool isSampleEntryBox =
- (ptype == FOURCC('s', 't', 's', 'd'))
- && editTrack(mCurrentTrackID)->mMediaHandlerType
- != FOURCC('h', 'i', 'n', 't');
-
- if ((i < kNumDispatchers && kDispatchTable[i].mHandler == 0)
- || isSampleEntryBox || ptype == FOURCC('i', 'l', 's', 't')) {
- // This is a container box.
- if (type == FOURCC('m', 'o', 'o', 'f')) {
- if (mFirstMoofOffset == 0) {
- ALOGV("first moof @ %08x", mBufferPos + offset);
- mFirstMoofOffset = mBufferPos + offset - 8; // point at the size
- }
- }
- if (type == FOURCC('m', 'e', 't', 'a')) {
- if ((err = need(offset + 4)) < OK) {
- return err;
- }
-
- if (readU32(offset) != 0) {
- return -EINVAL;
- }
-
- offset += 4;
- } else if (type == FOURCC('s', 't', 's', 'd')) {
- if ((err = need(offset + 8)) < OK) {
- return err;
- }
-
- if (readU32(offset) != 0) {
- return -EINVAL;
- }
-
- if (readU32(offset + 4) == 0) {
- // We need at least some entries.
- return -EINVAL;
- }
-
- offset += 8;
- } else if (isSampleEntryBox) {
- size_t headerSize;
-
- switch (editTrack(mCurrentTrackID)->mMediaHandlerType) {
- case FOURCC('v', 'i', 'd', 'e'):
- {
- // 8 bytes SampleEntry + 70 bytes VisualSampleEntry
- headerSize = 78;
- break;
- }
-
- case FOURCC('s', 'o', 'u', 'n'):
- {
- // 8 bytes SampleEntry + 20 bytes AudioSampleEntry
- headerSize = 28;
- break;
- }
-
- case FOURCC('m', 'e', 't', 'a'):
- {
- headerSize = 8; // 8 bytes SampleEntry
- break;
- }
-
- default:
- TRESPASS();
- }
-
- if (offset + headerSize > size) {
- return -EINVAL;
- }
-
- if ((err = need(offset + headerSize)) != OK) {
- return err;
- }
-
- switch (editTrack(mCurrentTrackID)->mMediaHandlerType) {
- case FOURCC('v', 'i', 'd', 'e'):
- {
- err = parseVisualSampleEntry(
- type, offset, offset + headerSize);
- break;
- }
-
- case FOURCC('s', 'o', 'u', 'n'):
- {
- err = parseAudioSampleEntry(
- type, offset, offset + headerSize);
- break;
- }
-
- case FOURCC('m', 'e', 't', 'a'):
- {
- err = OK;
- break;
- }
-
- default:
- TRESPASS();
- }
-
- if (err != OK) {
- return err;
- }
-
- offset += headerSize;
- }
-
- skip(offset);
-
- ALOGV("%sentering box of type '%s'",
- IndentString(mStack.size()), Fourcc2String(type));
-
- enter(mBufferPos - offset, type, size - offset);
- } else {
- if (!fitsContainer(size)) {
- return -EINVAL;
- }
-
- if (i < kNumDispatchers && kDispatchTable[i].mHandler != 0) {
- // We have a handler for this box type.
-
- if ((err = need(size)) != OK) {
- return err;
- }
-
- ALOGV("%sparsing box of type '%s'",
- IndentString(mStack.size()), Fourcc2String(type));
-
- if ((err = (this->*kDispatchTable[i].mHandler)(
- type, offset, size)) != OK) {
- return err;
- }
- } else {
- // Unknown box type
-
- ALOGV("%sskipping box of type '%s', size %llu",
- IndentString(mStack.size()),
- Fourcc2String(type), size);
-
- }
-
- skip(size);
- }
-
- return OK;
-}
-
-// static
-int FragmentedMP4Parser::CompareSampleLocation(
- const SampleInfo &sample, const MediaDataInfo &mdatInfo) {
- if (sample.mOffset + sample.mSize < mdatInfo.mOffset) {
- return -1;
- }
-
- if (sample.mOffset >= mdatInfo.mOffset + mdatInfo.mBuffer->size()) {
- return 1;
- }
-
- // Otherwise make sure the sample is completely contained within this
- // media data block.
-
- CHECK_GE(sample.mOffset, mdatInfo.mOffset);
-
- CHECK_LE(sample.mOffset + sample.mSize,
- mdatInfo.mOffset + mdatInfo.mBuffer->size());
-
- return 0;
-}
-
-void FragmentedMP4Parser::resumeIfNecessary() {
- if (!mSuspended) {
- return;
- }
-
- ALOGV("resuming.");
-
- mSuspended = false;
- (new AMessage(kWhatProceed, id()))->post();
-}
-
-status_t FragmentedMP4Parser::getSample(
- TrackInfo *info, sp<TrackFragment> *fragment, SampleInfo *sampleInfo) {
- for (;;) {
- if (info->mFragments.empty()) {
- if (mFinalResult != OK) {
- return mFinalResult;
- }
-
- resumeIfNecessary();
- return -EWOULDBLOCK;
- }
-
- *fragment = *info->mFragments.begin();
-
- status_t err = (*fragment)->getSample(sampleInfo);
-
- if (err == OK) {
- return OK;
- } else if (err != ERROR_END_OF_STREAM) {
- return err;
- }
-
- // Really, end of this fragment...
-
- info->mFragments.erase(info->mFragments.begin());
- }
-}
-
-status_t FragmentedMP4Parser::onDequeueAccessUnit(
- size_t trackIndex, sp<ABuffer> *accessUnit) {
- TrackInfo *info = &mTracks.editValueAt(trackIndex);
-
- sp<TrackFragment> fragment;
- SampleInfo sampleInfo;
- status_t err = getSample(info, &fragment, &sampleInfo);
-
- if (err == -EWOULDBLOCK) {
- resumeIfNecessary();
- return err;
- } else if (err != OK) {
- return err;
- }
-
- err = -EWOULDBLOCK;
-
- bool checkDroppable = false;
-
- for (size_t i = 0; i < mMediaData.size(); ++i) {
- const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
-
- int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
-
- if (cmp < 0 && !mSource->isSeekable()) {
- return -EPIPE;
- } else if (cmp == 0) {
- if (i > 0) {
- checkDroppable = true;
- }
-
- err = makeAccessUnit(info, sampleInfo, mdatInfo, accessUnit);
- break;
- }
- }
-
- if (err != OK) {
- return err;
- }
-
- fragment->advance();
-
- if (!mMediaData.empty() && checkDroppable) {
- size_t numDroppable = 0;
- bool done = false;
-
- // XXX FIXME: if one of the tracks is not advanced (e.g. if you play an audio+video
- // file with sf2), then mMediaData will not be pruned and keeps growing
- for (size_t i = 0; !done && i < mMediaData.size(); ++i) {
- const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
-
- for (size_t j = 0; j < mTracks.size(); ++j) {
- TrackInfo *info = &mTracks.editValueAt(j);
-
- sp<TrackFragment> fragment;
- SampleInfo sampleInfo;
- err = getSample(info, &fragment, &sampleInfo);
-
- if (err != OK) {
- done = true;
- break;
- }
-
- int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
-
- if (cmp <= 0) {
- done = true;
- break;
- }
- }
-
- if (!done) {
- ++numDroppable;
- }
- }
-
- if (numDroppable > 0) {
- mMediaData.removeItemsAt(0, numDroppable);
-
- if (mMediaData.size() < 5) {
- resumeIfNecessary();
- }
- }
- }
-
- return err;
-}
-
-static size_t parseNALSize(size_t nalLengthSize, const uint8_t *data) {
- switch (nalLengthSize) {
- case 1:
- return *data;
- case 2:
- return U16_AT(data);
- case 3:
- return ((size_t)data[0] << 16) | U16_AT(&data[1]);
- case 4:
- return U32_AT(data);
- }
-
- // This cannot happen, mNALLengthSize springs to life by adding 1 to
- // a 2-bit integer.
- TRESPASS();
-
- return 0;
-}
-
-status_t FragmentedMP4Parser::makeAccessUnit(
- TrackInfo *info,
- const SampleInfo &sample,
- const MediaDataInfo &mdatInfo,
- sp<ABuffer> *accessUnit) {
- if (sample.mSampleDescIndex < 1
- || sample.mSampleDescIndex > info->mSampleDescs.size()) {
- return ERROR_MALFORMED;
- }
-
- int64_t presentationTimeUs =
- 1000000ll * sample.mPresentationTime / info->mMediaTimeScale;
-
- const SampleDescription &sampleDesc =
- info->mSampleDescs.itemAt(sample.mSampleDescIndex - 1);
-
- size_t nalLengthSize;
- if (!sampleDesc.mFormat->findSize("nal-length-size", &nalLengthSize)) {
- *accessUnit = new ABuffer(sample.mSize);
-
- memcpy((*accessUnit)->data(),
- mdatInfo.mBuffer->data() + (sample.mOffset - mdatInfo.mOffset),
- sample.mSize);
-
- (*accessUnit)->meta()->setInt64("timeUs", presentationTimeUs);
- if (IsIDR(*accessUnit)) {
- (*accessUnit)->meta()->setInt32("is-sync-frame", 1);
- }
-
- return OK;
- }
-
- const uint8_t *srcPtr =
- mdatInfo.mBuffer->data() + (sample.mOffset - mdatInfo.mOffset);
-
- for (int i = 0; i < 2 ; ++i) {
- size_t srcOffset = 0;
- size_t dstOffset = 0;
-
- while (srcOffset < sample.mSize) {
- if (srcOffset + nalLengthSize > sample.mSize) {
- return ERROR_MALFORMED;
- }
-
- size_t nalSize = parseNALSize(nalLengthSize, &srcPtr[srcOffset]);
- srcOffset += nalLengthSize;
-
- if (srcOffset + nalSize > sample.mSize) {
- return ERROR_MALFORMED;
- }
-
- if (i == 1) {
- memcpy((*accessUnit)->data() + dstOffset,
- "\x00\x00\x00\x01",
- 4);
-
- memcpy((*accessUnit)->data() + dstOffset + 4,
- srcPtr + srcOffset,
- nalSize);
- }
-
- srcOffset += nalSize;
- dstOffset += nalSize + 4;
- }
-
- if (i == 0) {
- (*accessUnit) = new ABuffer(dstOffset);
- (*accessUnit)->meta()->setInt64(
- "timeUs", presentationTimeUs);
- }
- }
- if (IsIDR(*accessUnit)) {
- (*accessUnit)->meta()->setInt32("is-sync-frame", 1);
- }
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::need(size_t size) {
- if (!fitsContainer(size)) {
- return -EINVAL;
- }
-
- if (size <= mBuffer->size()) {
- return OK;
- }
-
- sp<AMessage> msg = new AMessage(kWhatReadMore, id());
- msg->setSize("needed", size - mBuffer->size());
- msg->post();
-
- // ALOGV("need(%d) returning -EAGAIN, only have %d", size, mBuffer->size());
-
- return -EAGAIN;
-}
-
-void FragmentedMP4Parser::enter(off64_t offset, uint32_t type, uint64_t size) {
- Container container;
- container.mOffset = offset;
- container.mType = type;
- container.mExtendsToEOF = (size == 0);
- container.mBytesRemaining = size;
-
- mStack.push(container);
-}
-
-bool FragmentedMP4Parser::fitsContainer(uint64_t size) const {
- CHECK(!mStack.isEmpty());
- const Container &container = mStack.itemAt(mStack.size() - 1);
-
- return container.mExtendsToEOF || size <= container.mBytesRemaining;
-}
-
-uint16_t FragmentedMP4Parser::readU16(size_t offset) {
- CHECK_LE(offset + 2, mBuffer->size());
-
- const uint8_t *ptr = mBuffer->data() + offset;
- return (ptr[0] << 8) | ptr[1];
-}
-
-uint32_t FragmentedMP4Parser::readU32(size_t offset) {
- CHECK_LE(offset + 4, mBuffer->size());
-
- const uint8_t *ptr = mBuffer->data() + offset;
- return (ptr[0] << 24) | (ptr[1] << 16) | (ptr[2] << 8) | ptr[3];
-}
-
-uint64_t FragmentedMP4Parser::readU64(size_t offset) {
- return (((uint64_t)readU32(offset)) << 32) | readU32(offset + 4);
-}
-
-void FragmentedMP4Parser::skip(off_t distance) {
- CHECK(!mStack.isEmpty());
- for (size_t i = mStack.size(); i-- > 0;) {
- Container *container = &mStack.editItemAt(i);
- if (!container->mExtendsToEOF) {
- CHECK_LE(distance, (off_t)container->mBytesRemaining);
-
- container->mBytesRemaining -= distance;
-
- if (container->mBytesRemaining == 0) {
- ALOGV("%sleaving box of type '%s'",
- IndentString(mStack.size() - 1),
- Fourcc2String(container->mType));
-
-#if 0
- if (container->mType == FOURCC('s', 't', 's', 'd')) {
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
- for (size_t i = 0;
- i < trackInfo->mSampleDescs.size(); ++i) {
- ALOGI("format #%d: %s",
- i,
- trackInfo->mSampleDescs.itemAt(i)
- .mFormat->debugString().c_str());
- }
- }
-#endif
-
- if (container->mType == FOURCC('s', 't', 'b', 'l')) {
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
- trackInfo->mStaticFragment->signalCompletion();
-
- CHECK(trackInfo->mFragments.empty());
- trackInfo->mFragments.push_back(trackInfo->mStaticFragment);
- trackInfo->mStaticFragment.clear();
- } else if (container->mType == FOURCC('t', 'r', 'a', 'f')) {
- TrackInfo *trackInfo =
- editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
- const sp<TrackFragment> &fragment =
- *--trackInfo->mFragments.end();
-
- static_cast<DynamicTrackFragment *>(
- fragment.get())->signalCompletion();
- } else if (container->mType == FOURCC('m', 'o', 'o', 'v')) {
- mDoneWithMoov = true;
- }
-
- container = NULL;
- mStack.removeItemsAt(i);
- }
- }
- }
-
- if (distance < (off_t)mBuffer->size()) {
- mBuffer->setRange(mBuffer->offset() + distance, mBuffer->size() - distance);
- mBufferPos += distance;
- return;
- }
-
- mBuffer->setRange(0, 0);
- mBufferPos += distance;
-}
-
-status_t FragmentedMP4Parser::parseTrackHeader(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- uint32_t flags = readU32(offset);
-
- uint32_t version = flags >> 24;
- flags &= 0xffffff;
-
- uint32_t trackID;
- uint64_t duration;
-
- if (version == 1) {
- if (offset + 36 > size) {
- return -EINVAL;
- }
-
- trackID = readU32(offset + 20);
- duration = readU64(offset + 28);
-
- offset += 36;
- } else if (version == 0) {
- if (offset + 24 > size) {
- return -EINVAL;
- }
-
- trackID = readU32(offset + 12);
- duration = readU32(offset + 20);
-
- offset += 24;
- } else {
- return -EINVAL;
- }
-
- TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
- info->mFlags = flags;
- info->mDuration = duration;
- if (info->mDuration == 0xffffffff) {
- // ffmpeg sets this to -1, which is incorrect.
- info->mDuration = 0;
- }
-
- info->mStaticFragment = new StaticTrackFragment;
-
- mCurrentTrackID = trackID;
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaHeader(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- uint32_t versionAndFlags = readU32(offset);
-
- if (versionAndFlags & 0xffffff) {
- return ERROR_MALFORMED;
- }
-
- uint32_t version = versionAndFlags >> 24;
-
- TrackInfo *info = editTrack(mCurrentTrackID);
-
- if (version == 1) {
- if (offset + 4 + 32 > size) {
- return -EINVAL;
- }
- info->mMediaTimeScale = U32_AT(mBuffer->data() + offset + 20);
- } else if (version == 0) {
- if (offset + 4 + 20 > size) {
- return -EINVAL;
- }
- info->mMediaTimeScale = U32_AT(mBuffer->data() + offset + 12);
- } else {
- return ERROR_MALFORMED;
- }
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaHandler(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 12 > size) {
- return -EINVAL;
- }
-
- if (readU32(offset) != 0) {
- return -EINVAL;
- }
-
- uint32_t handlerType = readU32(offset + 8);
-
- switch (handlerType) {
- case FOURCC('v', 'i', 'd', 'e'):
- case FOURCC('s', 'o', 'u', 'n'):
- case FOURCC('h', 'i', 'n', 't'):
- case FOURCC('m', 'e', 't', 'a'):
- break;
-
- default:
- return -EINVAL;
- }
-
- editTrack(mCurrentTrackID)->mMediaHandlerType = handlerType;
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseVisualSampleEntry(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 78 > size) {
- return -EINVAL;
- }
-
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
- trackInfo->mSampleDescs.push();
- SampleDescription *sampleDesc =
- &trackInfo->mSampleDescs.editItemAt(
- trackInfo->mSampleDescs.size() - 1);
-
- sampleDesc->mType = type;
- sampleDesc->mDataRefIndex = readU16(offset + 6);
-
- sp<AMessage> format = new AMessage;
-
- switch (type) {
- case FOURCC('a', 'v', 'c', '1'):
- format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
- break;
- case FOURCC('m', 'p', '4', 'v'):
- format->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
- break;
- case FOURCC('s', '2', '6', '3'):
- case FOURCC('h', '2', '6', '3'):
- case FOURCC('H', '2', '6', '3'):
- format->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
- break;
- default:
- format->setString("mime", "application/octet-stream");
- break;
- }
-
- format->setInt32("width", readU16(offset + 8 + 16));
- format->setInt32("height", readU16(offset + 8 + 18));
-
- sampleDesc->mFormat = format;
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseAudioSampleEntry(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 28 > size) {
- return -EINVAL;
- }
-
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
- trackInfo->mSampleDescs.push();
- SampleDescription *sampleDesc =
- &trackInfo->mSampleDescs.editItemAt(
- trackInfo->mSampleDescs.size() - 1);
-
- sampleDesc->mType = type;
- sampleDesc->mDataRefIndex = readU16(offset + 6);
-
- sp<AMessage> format = new AMessage;
-
- format->setInt32("channel-count", readU16(offset + 8 + 8));
- format->setInt32("sample-size", readU16(offset + 8 + 10));
- format->setInt32("sample-rate", readU32(offset + 8 + 16) / 65536.0f);
-
- switch (type) {
- case FOURCC('m', 'p', '4', 'a'):
- format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
- break;
-
- case FOURCC('s', 'a', 'm', 'r'):
- format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
- format->setInt32("channel-count", 1);
- format->setInt32("sample-rate", 8000);
- break;
-
- case FOURCC('s', 'a', 'w', 'b'):
- format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
- format->setInt32("channel-count", 1);
- format->setInt32("sample-rate", 16000);
- break;
- default:
- format->setString("mime", "application/octet-stream");
- break;
- }
-
- sampleDesc->mFormat = format;
-
- return OK;
-}
-
-static void addCodecSpecificData(
- const sp<AMessage> &format, int32_t index,
- const void *data, size_t size,
- bool insertStartCode = false) {
- sp<ABuffer> csd = new ABuffer(insertStartCode ? size + 4 : size);
-
- memcpy(csd->data() + (insertStartCode ? 4 : 0), data, size);
-
- if (insertStartCode) {
- memcpy(csd->data(), "\x00\x00\x00\x01", 4);
- }
-
- csd->meta()->setInt32("csd", true);
- csd->meta()->setInt64("timeUs", 0ll);
-
- format->setBuffer(StringPrintf("csd-%d", index).c_str(), csd);
-}
-
-status_t FragmentedMP4Parser::parseSampleSizes(
- uint32_t type, size_t offset, uint64_t size) {
- return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleSizes(
- this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseCompactSampleSizes(
- uint32_t type, size_t offset, uint64_t size) {
- return editTrack(mCurrentTrackID)->mStaticFragment->parseCompactSampleSizes(
- this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseSampleToChunk(
- uint32_t type, size_t offset, uint64_t size) {
- return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleToChunk(
- this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseChunkOffsets(
- uint32_t type, size_t offset, uint64_t size) {
- return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets(
- this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseChunkOffsets64(
- uint32_t type, size_t offset, uint64_t size) {
- return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets64(
- this, type, offset, size);
-}
-
-status_t FragmentedMP4Parser::parseAVCCodecSpecificData(
- uint32_t type, size_t offset, uint64_t size) {
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
- SampleDescription *sampleDesc =
- &trackInfo->mSampleDescs.editItemAt(
- trackInfo->mSampleDescs.size() - 1);
-
- if (sampleDesc->mType != FOURCC('a', 'v', 'c', '1')) {
- return -EINVAL;
- }
-
- const uint8_t *ptr = mBuffer->data() + offset;
-
- size -= offset;
- offset = 0;
-
- if (size < 7 || ptr[0] != 0x01) {
- return ERROR_MALFORMED;
- }
-
- sampleDesc->mFormat->setSize("nal-length-size", 1 + (ptr[4] & 3));
-
- size_t numSPS = ptr[5] & 31;
-
- ptr += 6;
- size -= 6;
-
- for (size_t i = 0; i < numSPS; ++i) {
- if (size < 2) {
- return ERROR_MALFORMED;
- }
-
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- if (size < length) {
- return ERROR_MALFORMED;
- }
-
- addCodecSpecificData(
- sampleDesc->mFormat, i, ptr, length,
- true /* insertStartCode */);
-
- ptr += length;
- size -= length;
- }
-
- if (size < 1) {
- return ERROR_MALFORMED;
- }
-
- size_t numPPS = *ptr;
- ++ptr;
- --size;
-
- for (size_t i = 0; i < numPPS; ++i) {
- if (size < 2) {
- return ERROR_MALFORMED;
- }
-
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- if (size < length) {
- return ERROR_MALFORMED;
- }
-
- addCodecSpecificData(
- sampleDesc->mFormat, numSPS + i, ptr, length,
- true /* insertStartCode */);
-
- ptr += length;
- size -= length;
- }
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseESDSCodecSpecificData(
- uint32_t type, size_t offset, uint64_t size) {
- TrackInfo *trackInfo = editTrack(mCurrentTrackID);
-
- SampleDescription *sampleDesc =
- &trackInfo->mSampleDescs.editItemAt(
- trackInfo->mSampleDescs.size() - 1);
-
- if (sampleDesc->mType != FOURCC('m', 'p', '4', 'a')
- && sampleDesc->mType != FOURCC('m', 'p', '4', 'v')) {
- return -EINVAL;
- }
-
- const uint8_t *ptr = mBuffer->data() + offset;
-
- size -= offset;
- offset = 0;
-
- if (size < 4) {
- return -EINVAL;
- }
-
- if (U32_AT(ptr) != 0) {
- return -EINVAL;
- }
-
- ptr += 4;
- size -=4;
-
- ESDS esds(ptr, size);
-
- uint8_t objectTypeIndication;
- if (esds.getObjectTypeIndication(&objectTypeIndication) != OK) {
- return ERROR_MALFORMED;
- }
-
- const uint8_t *csd;
- size_t csd_size;
- if (esds.getCodecSpecificInfo(
- (const void **)&csd, &csd_size) != OK) {
- return ERROR_MALFORMED;
- }
-
- addCodecSpecificData(sampleDesc->mFormat, 0, csd, csd_size);
-
- if (sampleDesc->mType != FOURCC('m', 'p', '4', 'a')) {
- return OK;
- }
-
- if (csd_size == 0) {
- // There's no further information, i.e. no codec specific data
- // Let's assume that the information provided in the mpeg4 headers
- // is accurate and hope for the best.
-
- return OK;
- }
-
- if (csd_size < 2) {
- return ERROR_MALFORMED;
- }
-
- uint32_t objectType = csd[0] >> 3;
-
- if (objectType == 31) {
- return ERROR_UNSUPPORTED;
- }
-
- uint32_t freqIndex = (csd[0] & 7) << 1 | (csd[1] >> 7);
- int32_t sampleRate = 0;
- int32_t numChannels = 0;
- if (freqIndex == 15) {
- if (csd_size < 5) {
- return ERROR_MALFORMED;
- }
-
- sampleRate = (csd[1] & 0x7f) << 17
- | csd[2] << 9
- | csd[3] << 1
- | (csd[4] >> 7);
-
- numChannels = (csd[4] >> 3) & 15;
- } else {
- static uint32_t kSamplingRate[] = {
- 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
- 16000, 12000, 11025, 8000, 7350
- };
-
- if (freqIndex == 13 || freqIndex == 14) {
- return ERROR_MALFORMED;
- }
-
- sampleRate = kSamplingRate[freqIndex];
- numChannels = (csd[1] >> 3) & 15;
- }
-
- if (numChannels == 0) {
- return ERROR_UNSUPPORTED;
- }
-
- sampleDesc->mFormat->setInt32("sample-rate", sampleRate);
- sampleDesc->mFormat->setInt32("channel-count", numChannels);
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseMediaData(
- uint32_t type, size_t offset, uint64_t size) {
- ALOGV("skipping 'mdat' chunk at offsets 0x%08lx-0x%08llx.",
- mBufferPos + offset, mBufferPos + size);
-
- sp<ABuffer> buffer = new ABuffer(size - offset);
- memcpy(buffer->data(), mBuffer->data() + offset, size - offset);
-
- mMediaData.push();
- MediaDataInfo *info = &mMediaData.editItemAt(mMediaData.size() - 1);
- info->mBuffer = buffer;
- info->mOffset = mBufferPos + offset;
-
- if (mMediaData.size() > 10) {
- ALOGV("suspending for now.");
- mSuspended = true;
- }
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseSegmentIndex(
- uint32_t type, size_t offset, uint64_t size) {
- ALOGV("sidx box type %d, offset %d, size %d", type, int(offset), int(size));
-// AString sidxstr;
-// hexdump(mBuffer->data() + offset, size, 0 /* indent */, &sidxstr);
-// ALOGV("raw sidx:");
-// ALOGV("%s", sidxstr.c_str());
- if (offset + 12 > size) {
- return -EINVAL;
- }
-
- uint32_t flags = readU32(offset);
-
- uint32_t version = flags >> 24;
- flags &= 0xffffff;
-
- ALOGV("sidx version %d", version);
-
- uint32_t referenceId = readU32(offset + 4);
- uint32_t timeScale = readU32(offset + 8);
- ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
-
- uint64_t earliestPresentationTime;
- uint64_t firstOffset;
-
- offset += 12;
-
- if (version == 0) {
- if (offset + 8 > size) {
- return -EINVAL;
- }
- earliestPresentationTime = readU32(offset);
- firstOffset = readU32(offset + 4);
- offset += 8;
- } else {
- if (offset + 16 > size) {
- return -EINVAL;
- }
- earliestPresentationTime = readU64(offset);
- firstOffset = readU64(offset + 8);
- offset += 16;
- }
- ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
-
- if (offset + 4 > size) {
- return -EINVAL;
- }
- if (readU16(offset) != 0) { // reserved
- return -EINVAL;
- }
- int32_t referenceCount = readU16(offset + 2);
- offset += 4;
- ALOGV("refcount: %d", referenceCount);
-
- if (offset + referenceCount * 12 > size) {
- return -EINVAL;
- }
-
- TrackInfo *info = editTrack(mCurrentTrackID);
- uint64_t total_duration = 0;
- for (int i = 0; i < referenceCount; i++) {
- uint32_t d1 = readU32(offset);
- uint32_t d2 = readU32(offset + 4);
- uint32_t d3 = readU32(offset + 8);
-
- if (d1 & 0x80000000) {
- ALOGW("sub-sidx boxes not supported yet");
- }
- bool sap = d3 & 0x80000000;
- bool saptype = d3 >> 28;
- if (!sap || saptype > 2) {
- ALOGW("not a stream access point, or unsupported type");
- }
- total_duration += d2;
- offset += 12;
- ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
- SidxEntry se;
- se.mSize = d1 & 0x7fffffff;
- se.mDurationUs = 1000000LL * d2 / timeScale;
- info->mSidx.add(se);
- }
-
- info->mSidxDuration = total_duration * 1000000 / timeScale;
- ALOGV("duration: %lld", info->mSidxDuration);
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseTrackExtends(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 24 > size) {
- return -EINVAL;
- }
-
- if (readU32(offset) != 0) {
- return -EINVAL;
- }
-
- uint32_t trackID = readU32(offset + 4);
-
- TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
- info->mDefaultSampleDescriptionIndex = readU32(offset + 8);
- info->mDefaultSampleDuration = readU32(offset + 12);
- info->mDefaultSampleSize = readU32(offset + 16);
- info->mDefaultSampleFlags = readU32(offset + 20);
-
- return OK;
-}
-
-FragmentedMP4Parser::TrackInfo *FragmentedMP4Parser::editTrack(
- uint32_t trackID, bool createIfNecessary) {
- ssize_t i = mTracks.indexOfKey(trackID);
-
- if (i >= 0) {
- return &mTracks.editValueAt(i);
- }
-
- if (!createIfNecessary) {
- return NULL;
- }
-
- TrackInfo info;
- info.mTrackID = trackID;
- info.mFlags = 0;
- info.mDuration = 0xffffffff;
- info.mSidxDuration = 0;
- info.mMediaTimeScale = 0;
- info.mMediaHandlerType = 0;
- info.mDefaultSampleDescriptionIndex = 0;
- info.mDefaultSampleDuration = 0;
- info.mDefaultSampleSize = 0;
- info.mDefaultSampleFlags = 0;
-
- info.mDecodingTime = 0;
-
- mTracks.add(trackID, info);
- return &mTracks.editValueAt(mTracks.indexOfKey(trackID));
-}
-
-status_t FragmentedMP4Parser::parseTrackFragmentHeader(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 8 > size) {
- return -EINVAL;
- }
-
- uint32_t flags = readU32(offset);
-
- if (flags & 0xff000000) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mFlags = flags;
-
- mTrackFragmentHeaderInfo.mTrackID = readU32(offset + 4);
- offset += 8;
-
- if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
- if (offset + 8 > size) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mBaseDataOffset = readU64(offset);
- offset += 8;
- }
-
- if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mSampleDescriptionIndex = readU32(offset);
- offset += 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mDefaultSampleDuration = readU32(offset);
- offset += 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mDefaultSampleSize = readU32(offset);
- offset += 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- mTrackFragmentHeaderInfo.mDefaultSampleFlags = readU32(offset);
- offset += 4;
- }
-
- if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
- // This should point to the position of the first byte of the
- // enclosing 'moof' container for the first track and
- // the end of the data of the preceding fragment for subsequent
- // tracks.
-
- CHECK_GE(mStack.size(), 2u);
-
- mTrackFragmentHeaderInfo.mBaseDataOffset =
- mStack.itemAt(mStack.size() - 2).mOffset;
-
- // XXX TODO: This does not do the right thing for the 2nd and
- // subsequent tracks yet.
- }
-
- mTrackFragmentHeaderInfo.mDataOffset =
- mTrackFragmentHeaderInfo.mBaseDataOffset;
-
- TrackInfo *trackInfo = editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
- if (trackInfo->mFragments.empty()
- || (*trackInfo->mFragments.begin())->complete()) {
- trackInfo->mFragments.push_back(new DynamicTrackFragment);
- }
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::parseTrackFragmentRun(
- uint32_t type, size_t offset, uint64_t size) {
- if (offset + 8 > size) {
- return -EINVAL;
- }
-
- enum {
- kDataOffsetPresent = 0x01,
- kFirstSampleFlagsPresent = 0x04,
- kSampleDurationPresent = 0x100,
- kSampleSizePresent = 0x200,
- kSampleFlagsPresent = 0x400,
- kSampleCompositionTimeOffsetPresent = 0x800,
- };
-
- uint32_t flags = readU32(offset);
-
- if (flags & 0xff000000) {
- return -EINVAL;
- }
-
- if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
- // These two shall not be used together.
- return -EINVAL;
- }
-
- uint32_t sampleCount = readU32(offset + 4);
- offset += 8;
-
- uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset;
-
- uint32_t firstSampleFlags = 0;
-
- if (flags & kDataOffsetPresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- int32_t dataOffsetDelta = (int32_t)readU32(offset);
-
- dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
-
- offset += 4;
- }
-
- if (flags & kFirstSampleFlagsPresent) {
- if (offset + 4 > size) {
- return -EINVAL;
- }
-
- firstSampleFlags = readU32(offset);
- offset += 4;
- }
-
- TrackInfo *info = editTrack(mTrackFragmentHeaderInfo.mTrackID);
-
- if (info == NULL) {
- return -EINVAL;
- }
-
- uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0,
- sampleCtsOffset = 0;
-
- size_t bytesPerSample = 0;
- if (flags & kSampleDurationPresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
- sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
- } else {
- sampleDuration = info->mDefaultSampleDuration;
- }
-
- if (flags & kSampleSizePresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
- sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
- } else {
- sampleSize = info->mDefaultSampleSize;
- }
-
- if (flags & kSampleFlagsPresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
- sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
- } else {
- sampleFlags = info->mDefaultSampleFlags;
- }
-
- if (flags & kSampleCompositionTimeOffsetPresent) {
- bytesPerSample += 4;
- } else {
- sampleCtsOffset = 0;
- }
-
- if (offset + sampleCount * bytesPerSample > size) {
- return -EINVAL;
- }
-
- uint32_t sampleDescIndex =
- (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent)
- ? mTrackFragmentHeaderInfo.mSampleDescriptionIndex
- : info->mDefaultSampleDescriptionIndex;
-
- for (uint32_t i = 0; i < sampleCount; ++i) {
- if (flags & kSampleDurationPresent) {
- sampleDuration = readU32(offset);
- offset += 4;
- }
-
- if (flags & kSampleSizePresent) {
- sampleSize = readU32(offset);
- offset += 4;
- }
-
- if (flags & kSampleFlagsPresent) {
- sampleFlags = readU32(offset);
- offset += 4;
- }
-
- if (flags & kSampleCompositionTimeOffsetPresent) {
- sampleCtsOffset = readU32(offset);
- offset += 4;
- }
-
- ALOGV("adding sample at offset 0x%08llx, size %u, duration %u, "
- "sampleDescIndex=%u, flags 0x%08x",
- dataOffset, sampleSize, sampleDuration,
- sampleDescIndex,
- (flags & kFirstSampleFlagsPresent) && i == 0
- ? firstSampleFlags : sampleFlags);
-
- const sp<TrackFragment> &fragment = *--info->mFragments.end();
-
- uint32_t decodingTime = info->mDecodingTime;
- info->mDecodingTime += sampleDuration;
- uint32_t presentationTime = decodingTime + sampleCtsOffset;
-
- static_cast<DynamicTrackFragment *>(
- fragment.get())->addSample(
- dataOffset,
- sampleSize,
- presentationTime,
- sampleDescIndex,
- ((flags & kFirstSampleFlagsPresent) && i == 0)
- ? firstSampleFlags : sampleFlags);
-
- dataOffset += sampleSize;
- }
-
- mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
-
- return OK;
-}
-
-void FragmentedMP4Parser::copyBuffer(
- sp<ABuffer> *dst, size_t offset, uint64_t size) const {
- sp<ABuffer> buf = new ABuffer(size);
- memcpy(buf->data(), mBuffer->data() + offset, size);
-
- *dst = buf;
-}
-
-} // namespace android
diff --git a/media/libstagefright/mp4/TrackFragment.cpp b/media/libstagefright/mp4/TrackFragment.cpp
deleted file mode 100644
index 3699038..0000000
--- a/media/libstagefright/mp4/TrackFragment.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TrackFragment"
-#include <utils/Log.h>
-
-#include "TrackFragment.h"
-
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/hexdump.h>
-
-namespace android {
-
-FragmentedMP4Parser::DynamicTrackFragment::DynamicTrackFragment()
- : mComplete(false),
- mSampleIndex(0) {
-}
-
-FragmentedMP4Parser::DynamicTrackFragment::~DynamicTrackFragment() {
-}
-
-status_t FragmentedMP4Parser::DynamicTrackFragment::getSample(SampleInfo *info) {
- if (mSampleIndex >= mSamples.size()) {
- return mComplete ? ERROR_END_OF_STREAM : -EWOULDBLOCK;
- }
-
- *info = mSamples.itemAt(mSampleIndex);
-
- return OK;
-}
-
-void FragmentedMP4Parser::DynamicTrackFragment::advance() {
- ++mSampleIndex;
-}
-
-void FragmentedMP4Parser::DynamicTrackFragment::addSample(
- off64_t dataOffset, size_t sampleSize,
- uint32_t presentationTime,
- size_t sampleDescIndex,
- uint32_t flags) {
- mSamples.push();
- SampleInfo *sampleInfo = &mSamples.editItemAt(mSamples.size() - 1);
-
- sampleInfo->mOffset = dataOffset;
- sampleInfo->mSize = sampleSize;
- sampleInfo->mPresentationTime = presentationTime;
- sampleInfo->mSampleDescIndex = sampleDescIndex;
- sampleInfo->mFlags = flags;
-}
-
-status_t FragmentedMP4Parser::DynamicTrackFragment::signalCompletion() {
- mComplete = true;
-
- return OK;
-}
-
-bool FragmentedMP4Parser::DynamicTrackFragment::complete() const {
- return mComplete;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-FragmentedMP4Parser::StaticTrackFragment::StaticTrackFragment()
- : mSampleIndex(0),
- mSampleCount(0),
- mChunkIndex(0),
- mSampleToChunkIndex(-1),
- mSampleToChunkRemaining(0),
- mPrevChunkIndex(0xffffffff),
- mNextSampleOffset(0) {
-}
-
-FragmentedMP4Parser::StaticTrackFragment::~StaticTrackFragment() {
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::getSample(SampleInfo *info) {
- if (mSampleIndex >= mSampleCount) {
- return ERROR_END_OF_STREAM;
- }
-
- *info = mSampleInfo;
-
- ALOGV("returning sample %d at [0x%08llx, 0x%08llx)",
- mSampleIndex,
- info->mOffset, info->mOffset + info->mSize);
-
- return OK;
-}
-
-void FragmentedMP4Parser::StaticTrackFragment::updateSampleInfo() {
- if (mSampleIndex >= mSampleCount) {
- return;
- }
-
- if (mSampleSizes != NULL) {
- uint32_t defaultSampleSize = U32_AT(mSampleSizes->data() + 4);
- if (defaultSampleSize > 0) {
- mSampleInfo.mSize = defaultSampleSize;
- } else {
- mSampleInfo.mSize= U32_AT(mSampleSizes->data() + 12 + 4 * mSampleIndex);
- }
- } else {
- CHECK(mCompactSampleSizes != NULL);
-
- uint32_t fieldSize = U32_AT(mCompactSampleSizes->data() + 4);
-
- switch (fieldSize) {
- case 4:
- {
- unsigned byte = mCompactSampleSizes->data()[12 + mSampleIndex / 2];
- mSampleInfo.mSize = (mSampleIndex & 1) ? byte & 0x0f : byte >> 4;
- break;
- }
-
- case 8:
- {
- mSampleInfo.mSize = mCompactSampleSizes->data()[12 + mSampleIndex];
- break;
- }
-
- default:
- {
- CHECK_EQ(fieldSize, 16);
- mSampleInfo.mSize =
- U16_AT(mCompactSampleSizes->data() + 12 + mSampleIndex * 2);
- break;
- }
- }
- }
-
- CHECK_GT(mSampleToChunkRemaining, 0);
-
- // The sample desc index is 1-based... XXX
- mSampleInfo.mSampleDescIndex =
- U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 8);
-
- if (mChunkIndex != mPrevChunkIndex) {
- mPrevChunkIndex = mChunkIndex;
-
- if (mChunkOffsets != NULL) {
- uint32_t entryCount = U32_AT(mChunkOffsets->data() + 4);
-
- if (mChunkIndex >= entryCount) {
- mSampleIndex = mSampleCount;
- return;
- }
-
- mNextSampleOffset =
- U32_AT(mChunkOffsets->data() + 8 + 4 * mChunkIndex);
- } else {
- CHECK(mChunkOffsets64 != NULL);
-
- uint32_t entryCount = U32_AT(mChunkOffsets64->data() + 4);
-
- if (mChunkIndex >= entryCount) {
- mSampleIndex = mSampleCount;
- return;
- }
-
- mNextSampleOffset =
- U64_AT(mChunkOffsets64->data() + 8 + 8 * mChunkIndex);
- }
- }
-
- mSampleInfo.mOffset = mNextSampleOffset;
-
- mSampleInfo.mPresentationTime = 0;
- mSampleInfo.mFlags = 0;
-}
-
-void FragmentedMP4Parser::StaticTrackFragment::advance() {
- mNextSampleOffset += mSampleInfo.mSize;
-
- ++mSampleIndex;
- if (--mSampleToChunkRemaining == 0) {
- ++mChunkIndex;
-
- uint32_t entryCount = U32_AT(mSampleToChunk->data() + 4);
-
- // If this is the last entry in the sample to chunk table, we will
- // stay on this entry.
- if ((uint32_t)(mSampleToChunkIndex + 1) < entryCount) {
- uint32_t nextChunkIndex =
- U32_AT(mSampleToChunk->data() + 8 + 12 * (mSampleToChunkIndex + 1));
-
- CHECK_GE(nextChunkIndex, 1u);
- --nextChunkIndex;
-
- if (mChunkIndex >= nextChunkIndex) {
- CHECK_EQ(mChunkIndex, nextChunkIndex);
- ++mSampleToChunkIndex;
- }
- }
-
- mSampleToChunkRemaining =
- U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 4);
- }
-
- updateSampleInfo();
-}
-
-static void setU32At(uint8_t *ptr, uint32_t x) {
- ptr[0] = x >> 24;
- ptr[1] = (x >> 16) & 0xff;
- ptr[2] = (x >> 8) & 0xff;
- ptr[3] = x & 0xff;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::signalCompletion() {
- mSampleToChunkIndex = 0;
-
- mSampleToChunkRemaining =
- (mSampleToChunk == NULL)
- ? 0
- : U32_AT(mSampleToChunk->data() + 8 + 12 * mSampleToChunkIndex + 4);
-
- updateSampleInfo();
-
- return OK;
-}
-
-bool FragmentedMP4Parser::StaticTrackFragment::complete() const {
- return true;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleSizes(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
- if (offset + 12 > size) {
- return ERROR_MALFORMED;
- }
-
- if (parser->readU32(offset) != 0) {
- return ERROR_MALFORMED;
- }
-
- uint32_t sampleSize = parser->readU32(offset + 4);
- uint32_t sampleCount = parser->readU32(offset + 8);
-
- if (sampleSize == 0 && offset + 12 + sampleCount * 4 != size) {
- return ERROR_MALFORMED;
- }
-
- parser->copyBuffer(&mSampleSizes, offset, size);
-
- mSampleCount = sampleCount;
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseCompactSampleSizes(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
- if (offset + 12 > size) {
- return ERROR_MALFORMED;
- }
-
- if (parser->readU32(offset) != 0) {
- return ERROR_MALFORMED;
- }
-
- uint32_t fieldSize = parser->readU32(offset + 4);
-
- if (fieldSize != 4 && fieldSize != 8 && fieldSize != 16) {
- return ERROR_MALFORMED;
- }
-
- uint32_t sampleCount = parser->readU32(offset + 8);
-
- if (offset + 12 + (sampleCount * fieldSize + 4) / 8 != size) {
- return ERROR_MALFORMED;
- }
-
- parser->copyBuffer(&mCompactSampleSizes, offset, size);
-
- mSampleCount = sampleCount;
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleToChunk(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
- if (offset + 8 > size) {
- return ERROR_MALFORMED;
- }
-
- if (parser->readU32(offset) != 0) {
- return ERROR_MALFORMED;
- }
-
- uint32_t entryCount = parser->readU32(offset + 4);
-
- if (entryCount == 0) {
- return OK;
- }
-
- if (offset + 8 + entryCount * 12 != size) {
- return ERROR_MALFORMED;
- }
-
- parser->copyBuffer(&mSampleToChunk, offset, size);
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
- if (offset + 8 > size) {
- return ERROR_MALFORMED;
- }
-
- if (parser->readU32(offset) != 0) {
- return ERROR_MALFORMED;
- }
-
- uint32_t entryCount = parser->readU32(offset + 4);
-
- if (offset + 8 + entryCount * 4 != size) {
- return ERROR_MALFORMED;
- }
-
- parser->copyBuffer(&mChunkOffsets, offset, size);
-
- return OK;
-}
-
-status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets64(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
- if (offset + 8 > size) {
- return ERROR_MALFORMED;
- }
-
- if (parser->readU32(offset) != 0) {
- return ERROR_MALFORMED;
- }
-
- uint32_t entryCount = parser->readU32(offset + 4);
-
- if (offset + 8 + entryCount * 8 != size) {
- return ERROR_MALFORMED;
- }
-
- parser->copyBuffer(&mChunkOffsets64, offset, size);
-
- return OK;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/mp4/TrackFragment.h b/media/libstagefright/mp4/TrackFragment.h
deleted file mode 100644
index e1ad46e..0000000
--- a/media/libstagefright/mp4/TrackFragment.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TRACK_FRAGMENT_H_
-
-#define TRACK_FRAGMENT_H_
-
-#include "include/FragmentedMP4Parser.h"
-
-namespace android {
-
-struct FragmentedMP4Parser::TrackFragment : public RefBase {
- TrackFragment() {}
-
- virtual status_t getSample(SampleInfo *info) = 0;
- virtual void advance() = 0;
-
- virtual status_t signalCompletion() = 0;
- virtual bool complete() const = 0;
-
-protected:
- virtual ~TrackFragment() {}
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(TrackFragment);
-};
-
-struct FragmentedMP4Parser::DynamicTrackFragment : public FragmentedMP4Parser::TrackFragment {
- DynamicTrackFragment();
-
- virtual status_t getSample(SampleInfo *info);
- virtual void advance();
-
- void addSample(
- off64_t dataOffset, size_t sampleSize,
- uint32_t presentationTime,
- size_t sampleDescIndex,
- uint32_t flags);
-
- // No more samples will be added to this fragment.
- virtual status_t signalCompletion();
-
- virtual bool complete() const;
-
-protected:
- virtual ~DynamicTrackFragment();
-
-private:
- bool mComplete;
- size_t mSampleIndex;
- Vector<SampleInfo> mSamples;
-
- DISALLOW_EVIL_CONSTRUCTORS(DynamicTrackFragment);
-};
-
-struct FragmentedMP4Parser::StaticTrackFragment : public FragmentedMP4Parser::TrackFragment {
- StaticTrackFragment();
-
- virtual status_t getSample(SampleInfo *info);
- virtual void advance();
-
- virtual status_t signalCompletion();
- virtual bool complete() const;
-
- status_t parseSampleSizes(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
- status_t parseCompactSampleSizes(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
- status_t parseSampleToChunk(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
- status_t parseChunkOffsets(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
- status_t parseChunkOffsets64(
- FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
-
-protected:
- virtual ~StaticTrackFragment();
-
-private:
- size_t mSampleIndex;
- size_t mSampleCount;
- uint32_t mChunkIndex;
-
- SampleInfo mSampleInfo;
-
- sp<ABuffer> mSampleSizes;
- sp<ABuffer> mCompactSampleSizes;
-
- sp<ABuffer> mSampleToChunk;
- ssize_t mSampleToChunkIndex;
- size_t mSampleToChunkRemaining;
-
- sp<ABuffer> mChunkOffsets;
- sp<ABuffer> mChunkOffsets64;
- uint32_t mPrevChunkIndex;
- uint64_t mNextSampleOffset;
-
- void updateSampleInfo();
-
- DISALLOW_EVIL_CONSTRUCTORS(StaticTrackFragment);
-};
-
-} // namespace android
-
-#endif // TRACK_FRAGMENT_H_
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index cb57a2f..6d8866a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -36,6 +36,8 @@
#include <media/IStreamSource.h>
#include <utils/KeyedVector.h>
+#include <inttypes.h>
+
namespace android {
// I want the expression "y" evaluated even if verbose logging is off.
@@ -501,11 +503,7 @@ ATSParser::Stream::Stream(
ElementaryStreamQueue::MPEG4_VIDEO);
break;
- case STREAMTYPE_PCM_AUDIO:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::PCM_AUDIO);
- break;
-
+ case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
mQueue = new ElementaryStreamQueue(
ElementaryStreamQueue::AC3);
@@ -553,7 +551,9 @@ status_t ATSParser::Stream::parse(
}
#endif
- return OK;
+ if (!payload_unit_start_indicator) {
+ return OK;
+ }
}
mExpectedContinuityCounter = (continuity_counter + 1) & 0x0f;
@@ -586,7 +586,7 @@ status_t ATSParser::Stream::parse(
// Increment in multiples of 64K.
neededSize = (neededSize + 65535) & ~65535;
- ALOGI("resizing buffer to %d bytes", neededSize);
+ ALOGI("resizing buffer to %zu bytes", neededSize);
sp<ABuffer> newBuffer = new ABuffer(neededSize);
memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
@@ -618,7 +618,7 @@ bool ATSParser::Stream::isAudio() const {
case STREAMTYPE_MPEG1_AUDIO:
case STREAMTYPE_MPEG2_AUDIO:
case STREAMTYPE_MPEG2_AUDIO_ADTS:
- case STREAMTYPE_PCM_AUDIO:
+ case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
return true;
@@ -665,7 +665,7 @@ void ATSParser::Stream::signalDiscontinuity(
}
if (mSource != NULL) {
- mSource->queueDiscontinuity(type, extra);
+ mSource->queueDiscontinuity(type, extra, true);
}
}
@@ -748,7 +748,7 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
PTS |= br->getBits(15);
CHECK_EQ(br->getBits(1), 1u);
- ALOGV("PTS = 0x%016llx (%.2f)", PTS, PTS / 90000.0);
+ ALOGV("PTS = 0x%016" PRIx64 " (%.2f)", PTS, PTS / 90000.0);
optional_bytes_remaining -= 5;
@@ -764,7 +764,7 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
DTS |= br->getBits(15);
CHECK_EQ(br->getBits(1), 1u);
- ALOGV("DTS = %llu", DTS);
+ ALOGV("DTS = %" PRIu64, DTS);
optional_bytes_remaining -= 5;
}
@@ -782,7 +782,7 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
ESCR |= br->getBits(15);
CHECK_EQ(br->getBits(1), 1u);
- ALOGV("ESCR = %llu", ESCR);
+ ALOGV("ESCR = %" PRIu64, ESCR);
MY_LOGV("ESCR_extension = %u", br->getBits(9));
CHECK_EQ(br->getBits(1), 1u);
@@ -812,7 +812,7 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
if (br->numBitsLeft() < dataLength * 8) {
ALOGE("PES packet does not carry enough data to contain "
- "payload. (numBitsLeft = %d, required = %d)",
+ "payload. (numBitsLeft = %zu, required = %u)",
br->numBitsLeft(), dataLength * 8);
return ERROR_MALFORMED;
@@ -832,7 +832,7 @@ status_t ATSParser::Stream::parsePES(ABitReader *br) {
size_t payloadSizeBits = br->numBitsLeft();
CHECK_EQ(payloadSizeBits % 8, 0u);
- ALOGV("There's %d bytes of payload.", payloadSizeBits / 8);
+ ALOGV("There's %zu bytes of payload.", payloadSizeBits / 8);
}
} else if (stream_id == 0xbe) { // padding_stream
CHECK_NE(PES_packet_length, 0u);
@@ -850,7 +850,7 @@ status_t ATSParser::Stream::flush() {
return OK;
}
- ALOGV("flushing stream 0x%04x size = %d", mElementaryPID, mBuffer->size());
+ ALOGV("flushing stream 0x%04x size = %zu", mElementaryPID, mBuffer->size());
ABitReader br(mBuffer->data(), mBuffer->size());
@@ -862,7 +862,7 @@ status_t ATSParser::Stream::flush() {
}
void ATSParser::Stream::onPayloadData(
- unsigned PTS_DTS_flags, uint64_t PTS, uint64_t DTS,
+ unsigned PTS_DTS_flags, uint64_t PTS, uint64_t /* DTS */,
const uint8_t *data, size_t size) {
#if 0
ALOGI("payload streamType 0x%02x, PTS = 0x%016llx, dPTS = %lld",
@@ -894,6 +894,12 @@ void ATSParser::Stream::onPayloadData(
ALOGV("Stream PID 0x%08x of type 0x%02x now has data.",
mElementaryPID, mStreamType);
+ const char *mime;
+ if (meta->findCString(kKeyMIMEType, &mime)
+ && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ && !IsIDR(accessUnit)) {
+ continue;
+ }
mSource = new AnotherPacketSource(meta);
mSource->queueAccessUnit(accessUnit);
}
@@ -1172,7 +1178,7 @@ void ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
uint64_t PCR = PCR_base * 300 + PCR_ext;
- ALOGV("PID 0x%04x: PCR = 0x%016llx (%.2f)",
+ ALOGV("PID 0x%04x: PCR = 0x%016" PRIx64 " (%.2f)",
PID, PCR, PCR / 27E6);
// The number of bytes received by this parser up to and
@@ -1267,8 +1273,8 @@ bool ATSParser::PTSTimeDeltaEstablished() {
}
void ATSParser::updatePCR(
- unsigned PID, uint64_t PCR, size_t byteOffsetFromStart) {
- ALOGV("PCR 0x%016llx @ %d", PCR, byteOffsetFromStart);
+ unsigned /* PID */, uint64_t PCR, size_t byteOffsetFromStart) {
+ ALOGV("PCR 0x%016" PRIx64 " @ %zu", PCR, byteOffsetFromStart);
if (mNumPCRs == 2) {
mPCR[0] = mPCR[1];
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 86b025f..8986a22 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -93,7 +93,9 @@ struct ATSParser : public RefBase {
// From ATSC A/53 Part 3:2009, 6.7.1
STREAMTYPE_AC3 = 0x81,
- STREAMTYPE_PCM_AUDIO = 0x83,
+ // Stream type 0x83 is non-standard,
+ // it could be LPCM or TrueHD AC3
+ STREAMTYPE_LPCM_AC3 = 0x83,
};
protected:
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index c1a7a9d..c17a0b7 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -13,6 +13,8 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE:= libstagefright_mpeg2ts
ifeq ($(TARGET_ARCH),arm)
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 2b0bf30..a03f6f9 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AnotherPacketSource"
+
#include "AnotherPacketSource.h"
#include <media/stagefright/foundation/ABuffer.h>
@@ -26,16 +29,21 @@
#include <media/stagefright/MetaData.h>
#include <utils/Vector.h>
+#include <inttypes.h>
+
namespace android {
const int64_t kNearEOSMarkUs = 2000000ll; // 2 secs
AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
: mIsAudio(false),
+ mIsVideo(false),
mFormat(NULL),
mLastQueuedTimeUs(0),
mEOSResult(OK),
- mLatestEnqueuedMeta(NULL) {
+ mLatestEnqueuedMeta(NULL),
+ mLatestDequeuedMeta(NULL),
+ mQueuedDiscontinuityCount(0) {
setFormat(meta);
}
@@ -43,6 +51,7 @@ void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
CHECK(mFormat == NULL);
mIsAudio = false;
+ mIsVideo = false;
if (meta == NULL) {
return;
@@ -54,15 +63,17 @@ void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
if (!strncasecmp("audio/", mime, 6)) {
mIsAudio = true;
+ } else if (!strncasecmp("video/", mime, 6)) {
+ mIsVideo = true;
} else {
- CHECK(!strncasecmp("video/", mime, 6));
+ CHECK(!strncasecmp("text/", mime, 5));
}
}
AnotherPacketSource::~AnotherPacketSource() {
}
-status_t AnotherPacketSource::start(MetaData *params) {
+status_t AnotherPacketSource::start(MetaData * /* params */) {
return OK;
}
@@ -86,7 +97,7 @@ sp<MetaData> AnotherPacketSource::getFormat() {
sp<RefBase> object;
if (buffer->meta()->findObject("format", &object)) {
- return static_cast<MetaData*>(object.get());
+ return mFormat = static_cast<MetaData*>(object.get());
}
++it;
@@ -112,9 +123,12 @@ status_t AnotherPacketSource::dequeueAccessUnit(sp<ABuffer> *buffer) {
mFormat.clear();
}
+ --mQueuedDiscontinuityCount;
return INFO_DISCONTINUITY;
}
+ mLatestDequeuedMeta = (*buffer)->meta()->dup();
+
sp<RefBase> object;
if ((*buffer)->meta()->findObject("format", &object)) {
mFormat = static_cast<MetaData*>(object.get());
@@ -136,8 +150,10 @@ status_t AnotherPacketSource::read(
}
if (!mBuffers.empty()) {
+
const sp<ABuffer> buffer = *mBuffers.begin();
mBuffers.erase(mBuffers.begin());
+ mLatestDequeuedMeta = buffer->meta()->dup();
int32_t discontinuity;
if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
@@ -173,7 +189,11 @@ bool AnotherPacketSource::wasFormatChange(
return (discontinuityType & ATSParser::DISCONTINUITY_AUDIO_FORMAT) != 0;
}
- return (discontinuityType & ATSParser::DISCONTINUITY_VIDEO_FORMAT) != 0;
+ if (mIsVideo) {
+ return (discontinuityType & ATSParser::DISCONTINUITY_VIDEO_FORMAT) != 0;
+ }
+
+ return false;
}
void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) {
@@ -186,13 +206,18 @@ void AnotherPacketSource::queueAccessUnit(const sp<ABuffer> &buffer) {
int64_t lastQueuedTimeUs;
CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
mLastQueuedTimeUs = lastQueuedTimeUs;
- ALOGV("queueAccessUnit timeUs=%lld us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
+ ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
Mutex::Autolock autoLock(mLock);
mBuffers.push_back(buffer);
mCondition.signal();
- if (!mLatestEnqueuedMeta.get()) {
+ int32_t discontinuity;
+ if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+ ++mQueuedDiscontinuityCount;
+ }
+
+ if (mLatestEnqueuedMeta == NULL) {
mLatestEnqueuedMeta = buffer->meta();
} else {
int64_t latestTimeUs = 0;
@@ -208,6 +233,7 @@ void AnotherPacketSource::clear() {
mBuffers.clear();
mEOSResult = OK;
+ mQueuedDiscontinuityCount = 0;
mFormat = NULL;
mLatestEnqueuedMeta = NULL;
@@ -215,27 +241,31 @@ void AnotherPacketSource::clear() {
void AnotherPacketSource::queueDiscontinuity(
ATSParser::DiscontinuityType type,
- const sp<AMessage> &extra) {
+ const sp<AMessage> &extra,
+ bool discard) {
Mutex::Autolock autoLock(mLock);
- // Leave only discontinuities in the queue.
- List<sp<ABuffer> >::iterator it = mBuffers.begin();
- while (it != mBuffers.end()) {
- sp<ABuffer> oldBuffer = *it;
+ if (discard) {
+ // Leave only discontinuities in the queue.
+ List<sp<ABuffer> >::iterator it = mBuffers.begin();
+ while (it != mBuffers.end()) {
+ sp<ABuffer> oldBuffer = *it;
+
+ int32_t oldDiscontinuityType;
+ if (!oldBuffer->meta()->findInt32(
+ "discontinuity", &oldDiscontinuityType)) {
+ it = mBuffers.erase(it);
+ continue;
+ }
- int32_t oldDiscontinuityType;
- if (!oldBuffer->meta()->findInt32(
- "discontinuity", &oldDiscontinuityType)) {
- it = mBuffers.erase(it);
- continue;
+ ++it;
}
-
- ++it;
}
mEOSResult = OK;
mLastQueuedTimeUs = 0;
mLatestEnqueuedMeta = NULL;
+ ++mQueuedDiscontinuityCount;
sp<ABuffer> buffer = new ABuffer(0);
buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type));
@@ -265,7 +295,10 @@ bool AnotherPacketSource::hasBufferAvailable(status_t *finalResult) {
int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
Mutex::Autolock autoLock(mLock);
+ return getBufferedDurationUs_l(finalResult);
+}
+int64_t AnotherPacketSource::getBufferedDurationUs_l(status_t *finalResult) {
*finalResult = mEOSResult;
if (mBuffers.empty()) {
@@ -274,6 +307,7 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
int64_t time1 = -1;
int64_t time2 = -1;
+ int64_t durationUs = 0;
List<sp<ABuffer> >::iterator it = mBuffers.begin();
while (it != mBuffers.end()) {
@@ -281,20 +315,64 @@ int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
int64_t timeUs;
if (buffer->meta()->findInt64("timeUs", &timeUs)) {
- if (time1 < 0) {
+ if (time1 < 0 || timeUs < time1) {
time1 = timeUs;
}
- time2 = timeUs;
+ if (time2 < 0 || timeUs > time2) {
+ time2 = timeUs;
+ }
} else {
// This is a discontinuity, reset everything.
+ durationUs += time2 - time1;
time1 = time2 = -1;
}
++it;
}
- return time2 - time1;
+ return durationUs + (time2 - time1);
+}
+
+// A cheaper but less precise version of getBufferedDurationUs that we would like to use in
+// LiveSession::dequeueAccessUnit to trigger downwards adaptation.
+int64_t AnotherPacketSource::getEstimatedDurationUs() {
+ Mutex::Autolock autoLock(mLock);
+ if (mBuffers.empty()) {
+ return 0;
+ }
+
+ if (mQueuedDiscontinuityCount > 0) {
+ status_t finalResult;
+ return getBufferedDurationUs_l(&finalResult);
+ }
+
+ List<sp<ABuffer> >::iterator it = mBuffers.begin();
+ sp<ABuffer> buffer = *it;
+
+ int64_t startTimeUs;
+ buffer->meta()->findInt64("timeUs", &startTimeUs);
+ if (startTimeUs < 0) {
+ return 0;
+ }
+
+ it = mBuffers.end();
+ --it;
+ buffer = *it;
+
+ int64_t endTimeUs;
+ buffer->meta()->findInt64("timeUs", &endTimeUs);
+ if (endTimeUs < 0) {
+ return 0;
+ }
+
+ int64_t diffUs;
+ if (endTimeUs > startTimeUs) {
+ diffUs = endTimeUs - startTimeUs;
+ } else {
+ diffUs = startTimeUs - endTimeUs;
+ }
+ return diffUs;
}
status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) {
@@ -323,9 +401,14 @@ bool AnotherPacketSource::isFinished(int64_t duration) const {
return (mEOSResult != OK);
}
-sp<AMessage> AnotherPacketSource::getLatestMeta() {
+sp<AMessage> AnotherPacketSource::getLatestEnqueuedMeta() {
Mutex::Autolock autoLock(mLock);
return mLatestEnqueuedMeta;
}
+sp<AMessage> AnotherPacketSource::getLatestDequeuedMeta() {
+ Mutex::Autolock autoLock(mLock);
+ return mLatestDequeuedMeta;
+}
+
} // namespace android
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 9b193a2..809a858 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -49,12 +49,16 @@ struct AnotherPacketSource : public MediaSource {
// presentation timestamps since the last discontinuity (if any).
int64_t getBufferedDurationUs(status_t *finalResult);
+ int64_t getEstimatedDurationUs();
+
status_t nextBufferTime(int64_t *timeUs);
void queueAccessUnit(const sp<ABuffer> &buffer);
void queueDiscontinuity(
- ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
+ ATSParser::DiscontinuityType type,
+ const sp<AMessage> &extra,
+ bool discard);
void signalEOS(status_t result);
@@ -62,7 +66,8 @@ struct AnotherPacketSource : public MediaSource {
bool isFinished(int64_t duration) const;
- sp<AMessage> getLatestMeta();
+ sp<AMessage> getLatestEnqueuedMeta();
+ sp<AMessage> getLatestDequeuedMeta();
protected:
virtual ~AnotherPacketSource();
@@ -72,13 +77,18 @@ private:
Condition mCondition;
bool mIsAudio;
+ bool mIsVideo;
sp<MetaData> mFormat;
int64_t mLastQueuedTimeUs;
List<sp<ABuffer> > mBuffers;
status_t mEOSResult;
sp<AMessage> mLatestEnqueuedMeta;
+ sp<AMessage> mLatestDequeuedMeta;
+
+ size_t mQueuedDiscontinuityCount;
bool wasFormatChange(int32_t discontinuityType) const;
+ int64_t getBufferedDurationUs_l(status_t *finalResult);
DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource);
};
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 2b0711b..ef1cd3d 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -31,6 +31,7 @@
#include "include/avc_utils.h"
+#include <inttypes.h>
#include <netinet/in.h>
namespace android {
@@ -264,7 +265,7 @@ status_t ElementaryStreamQueue::appendData(
if (startOffset > 0) {
ALOGI("found something resembling an H.264/MPEG syncword "
- "at offset %d",
+ "at offset %zd",
startOffset);
}
@@ -297,7 +298,7 @@ status_t ElementaryStreamQueue::appendData(
if (startOffset > 0) {
ALOGI("found something resembling an H.264/MPEG syncword "
- "at offset %d",
+ "at offset %zd",
startOffset);
}
@@ -330,7 +331,7 @@ status_t ElementaryStreamQueue::appendData(
if (startOffset > 0) {
ALOGI("found something resembling an AAC syncword at "
- "offset %d",
+ "offset %zd",
startOffset);
}
@@ -358,7 +359,7 @@ status_t ElementaryStreamQueue::appendData(
if (startOffset > 0) {
ALOGI("found something resembling an AC3 syncword at "
- "offset %d",
+ "offset %zd",
startOffset);
}
@@ -385,7 +386,7 @@ status_t ElementaryStreamQueue::appendData(
if (startOffset > 0) {
ALOGI("found something resembling an MPEG audio "
- "syncword at offset %d",
+ "syncword at offset %zd",
startOffset);
}
@@ -409,7 +410,7 @@ status_t ElementaryStreamQueue::appendData(
if (mBuffer == NULL || neededSize > mBuffer->capacity()) {
neededSize = (neededSize + 65535) & ~65535;
- ALOGV("resizing buffer to size %d", neededSize);
+ ALOGV("resizing buffer to size %zu", neededSize);
sp<ABuffer> buffer = new ABuffer(neededSize);
if (mBuffer != NULL) {
@@ -432,7 +433,7 @@ status_t ElementaryStreamQueue::appendData(
#if 0
if (mMode == AAC) {
- ALOGI("size = %d, timeUs = %.2f secs", size, timeUs / 1E6);
+ ALOGI("size = %zu, timeUs = %.2f secs", size, timeUs / 1E6);
hexdump(data, size);
}
#endif
@@ -603,6 +604,8 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
// having to interpolate.
// The final AAC frame may well extend into the next RangeInfo but
// that's ok.
+ // TODO: the logic commented above is skipped because codec cannot take
+ // arbitrary sized input buffers;
size_t offset = 0;
while (offset < info.mLength) {
if (offset + 7 > mBuffer->size()) {
@@ -667,9 +670,12 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitAAC() {
size_t headerSize = protection_absent ? 7 : 9;
offset += aac_frame_length;
+ // TODO: move back to concatenation when codec can support arbitrary input buffers.
+ // For now only queue a single buffer
+ break;
}
- int64_t timeUs = fetchTimestamp(offset);
+ int64_t timeUs = fetchTimestampAAC(offset);
sp<ABuffer> accessUnit = new ABuffer(offset);
memcpy(accessUnit->data(), mBuffer->data(), offset);
@@ -716,6 +722,45 @@ int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) {
return timeUs;
}
+// TODO: avoid interpolating timestamps once codec supports arbitrary sized input buffers
+int64_t ElementaryStreamQueue::fetchTimestampAAC(size_t size) {
+ int64_t timeUs = -1;
+ bool first = true;
+
+ size_t samplesize = size;
+ while (size > 0) {
+ CHECK(!mRangeInfos.empty());
+
+ RangeInfo *info = &*mRangeInfos.begin();
+
+ if (first) {
+ timeUs = info->mTimestampUs;
+ first = false;
+ }
+
+ if (info->mLength > size) {
+ int32_t sampleRate;
+ CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
+ info->mLength -= size;
+ size_t numSamples = 1024 * size / samplesize;
+ info->mTimestampUs += numSamples * 1000000ll / sampleRate;
+ size = 0;
+ } else {
+ size -= info->mLength;
+
+ mRangeInfos.erase(mRangeInfos.begin());
+ info = NULL;
+ }
+
+ }
+
+ if (timeUs == 0ll) {
+ ALOGV("Returning 0 timestamp");
+ }
+
+ return timeUs;
+}
+
struct NALPosition {
size_t nalOffset;
size_t nalSize;
@@ -776,6 +821,12 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
unsigned nalType = mBuffer->data()[pos.nalOffset] & 0x1f;
+ if (nalType == 6) {
+ sp<ABuffer> sei = new ABuffer(pos.nalSize);
+ memcpy(sei->data(), mBuffer->data() + pos.nalOffset, pos.nalSize);
+ accessUnit->meta()->setBuffer("sei", sei);
+ }
+
#if !LOG_NDEBUG
char tmp[128];
sprintf(tmp, "0x%02x", nalType);
@@ -794,7 +845,9 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
dstOffset += pos.nalSize + 4;
}
+#if !LOG_NDEBUG
ALOGV("accessUnit contains nal types %s", out.c_str());
+#endif
const NALPosition &pos = nals.itemAt(nals.size() - 1);
size_t nextScan = pos.nalOffset + pos.nalSize;
@@ -1025,7 +1078,7 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEGVideo() {
accessUnit->meta()->setInt64("timeUs", timeUs);
- ALOGV("returning MPEG video access unit at time %lld us",
+ ALOGV("returning MPEG video access unit at time %" PRId64 " us",
timeUs);
// hexdump(accessUnit->data(), accessUnit->size());
@@ -1184,7 +1237,7 @@ sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEG4Video() {
accessUnit->meta()->setInt64("timeUs", timeUs);
- ALOGV("returning MPEG4 video access unit at time %lld us",
+ ALOGV("returning MPEG4 video access unit at time %" PRId64 " us",
timeUs);
// hexdump(accessUnit->data(), accessUnit->size());
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index a2cca77..7c81ff0 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -77,6 +77,7 @@ private:
// consume a logical (compressed) access unit of size "size",
// returns its timestamp in us (or -1 if no time information).
int64_t fetchTimestamp(size_t size);
+ int64_t fetchTimestampAAC(size_t size);
DISALLOW_EVIL_CONSTRUCTORS(ElementaryStreamQueue);
};
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index dd714c9..85859f7 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -36,6 +36,8 @@
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#include <inttypes.h>
+
namespace android {
struct MPEG2PSExtractor::Track : public MediaSource {
@@ -130,7 +132,8 @@ sp<MediaSource> MPEG2PSExtractor::getTrack(size_t index) {
return new WrappedTrack(this, mTracks.valueAt(index));
}
-sp<MetaData> MPEG2PSExtractor::getTrackMetaData(size_t index, uint32_t flags) {
+sp<MetaData> MPEG2PSExtractor::getTrackMetaData(
+ size_t index, uint32_t /* flags */) {
if (index >= mTracks.size()) {
return NULL;
}
@@ -408,7 +411,7 @@ ssize_t MPEG2PSExtractor::dequeuePES() {
PTS |= br.getBits(15);
CHECK_EQ(br.getBits(1), 1u);
- ALOGV("PTS = %llu", PTS);
+ ALOGV("PTS = %" PRIu64, PTS);
// ALOGI("PTS = %.2f secs", PTS / 90000.0f);
optional_bytes_remaining -= 5;
@@ -425,7 +428,7 @@ ssize_t MPEG2PSExtractor::dequeuePES() {
DTS |= br.getBits(15);
CHECK_EQ(br.getBits(1), 1u);
- ALOGV("DTS = %llu", DTS);
+ ALOGV("DTS = %" PRIu64, DTS);
optional_bytes_remaining -= 5;
}
@@ -443,7 +446,7 @@ ssize_t MPEG2PSExtractor::dequeuePES() {
ESCR |= br.getBits(15);
CHECK_EQ(br.getBits(1), 1u);
- ALOGV("ESCR = %llu", ESCR);
+ ALOGV("ESCR = %" PRIu64, ESCR);
/* unsigned ESCR_extension = */br.getBits(9);
CHECK_EQ(br.getBits(1), 1u);
@@ -472,7 +475,7 @@ ssize_t MPEG2PSExtractor::dequeuePES() {
if (br.numBitsLeft() < dataLength * 8) {
ALOGE("PES packet does not carry enough data to contain "
- "payload. (numBitsLeft = %d, required = %d)",
+ "payload. (numBitsLeft = %zu, required = %u)",
br.numBitsLeft(), dataLength * 8);
return ERROR_MALFORMED;
@@ -625,7 +628,7 @@ status_t MPEG2PSExtractor::Track::read(
status_t MPEG2PSExtractor::Track::appendPESData(
unsigned PTS_DTS_flags,
- uint64_t PTS, uint64_t DTS,
+ uint64_t PTS, uint64_t /* DTS */,
const uint8_t *data, size_t size) {
if (mQueue == NULL) {
return OK;
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index d449c34..35ca118 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -141,7 +141,7 @@ sp<MediaSource> MPEG2TSExtractor::getTrack(size_t index) {
}
sp<MetaData> MPEG2TSExtractor::getTrackMetaData(
- size_t index, uint32_t flags) {
+ size_t index, uint32_t /* flags */) {
return index < mSourceImpls.size()
? mSourceImpls.editItemAt(index)->getFormat() : NULL;
}
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index cd912e7..aaa8334 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -1,6 +1,10 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
+ifeq ($(TARGET_DEVICE), manta)
+ LOCAL_CFLAGS += -DSURFACE_IS_BGR32
+endif
+
LOCAL_SRC_FILES:= \
GraphicBufferSource.cpp \
OMX.cpp \
@@ -10,6 +14,7 @@ LOCAL_SRC_FILES:= \
SoftOMXComponent.cpp \
SoftOMXPlugin.cpp \
SoftVideoDecoderOMXComponent.cpp \
+ SoftVideoEncoderOMXComponent.cpp \
LOCAL_C_INCLUDES += \
$(TOP)/frameworks/av/media/libstagefright \
@@ -18,6 +23,7 @@ LOCAL_C_INCLUDES += \
LOCAL_SHARED_LIBRARIES := \
libbinder \
+ libhardware \
libmedia \
libutils \
liblog \
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 44f0be7..3e70956 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
#define LOG_TAG "GraphicBufferSource"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -27,13 +29,16 @@
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
+#include <inttypes.h>
+
namespace android {
static const bool EXTRA_CHECK = true;
GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
- uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount) :
+ uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
+ bool useGraphicBufferInMeta) :
mInitCheck(UNKNOWN_ERROR),
mNodeInstance(nodeInstance),
mExecuting(false),
@@ -41,16 +46,22 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
mNumFramesAvailable(0),
mEndOfStream(false),
mEndOfStreamSent(false),
- mRepeatAfterUs(-1ll),
mMaxTimestampGapUs(-1ll),
mPrevOriginalTimeUs(-1ll),
mPrevModifiedTimeUs(-1ll),
+ mSkipFramesBeforeNs(-1ll),
+ mRepeatAfterUs(-1ll),
mRepeatLastFrameGeneration(0),
mRepeatLastFrameTimestamp(-1ll),
mLatestSubmittedBufferId(-1),
mLatestSubmittedBufferFrameNum(0),
mLatestSubmittedBufferUseCount(0),
- mRepeatBufferDeferred(false) {
+ mRepeatBufferDeferred(false),
+ mTimePerCaptureUs(-1ll),
+ mTimePerFrameUs(-1ll),
+ mPrevCaptureUs(-1ll),
+ mPrevFrameUs(-1ll),
+ mUseGraphicBufferInMeta(useGraphicBufferInMeta) {
ALOGV("GraphicBufferSource w=%u h=%u c=%u",
bufferWidth, bufferHeight, bufferCount);
@@ -63,13 +74,12 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
String8 name("GraphicBufferSource");
- mBufferQueue = new BufferQueue();
- mBufferQueue->setConsumerName(name);
- mBufferQueue->setDefaultBufferSize(bufferWidth, bufferHeight);
- mBufferQueue->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_HW_TEXTURE);
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+ mConsumer->setConsumerName(name);
+ mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+ mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
- mInitCheck = mBufferQueue->setMaxAcquiredBufferCount(bufferCount);
+ mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
if (mInitCheck != NO_ERROR) {
ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
bufferCount, mInitCheck);
@@ -83,7 +93,7 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
- mInitCheck = mBufferQueue->consumerConnect(proxy, false);
+ mInitCheck = mConsumer->consumerConnect(proxy, false);
if (mInitCheck != NO_ERROR) {
ALOGE("Error connecting to BufferQueue: %s (%d)",
strerror(-mInitCheck), mInitCheck);
@@ -95,8 +105,8 @@ GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
GraphicBufferSource::~GraphicBufferSource() {
ALOGV("~GraphicBufferSource");
- if (mBufferQueue != NULL) {
- status_t err = mBufferQueue->consumerDisconnect();
+ if (mConsumer != NULL) {
+ status_t err = mConsumer->consumerDisconnect();
if (err != NO_ERROR) {
ALOGW("consumerDisconnect failed: %d", err);
}
@@ -105,7 +115,7 @@ GraphicBufferSource::~GraphicBufferSource() {
void GraphicBufferSource::omxExecuting() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("--> executing; avail=%d, codec vec size=%zd",
+ ALOGV("--> executing; avail=%zu, codec vec size=%zd",
mNumFramesAvailable, mCodecBuffers.size());
CHECK(!mExecuting);
mExecuting = true;
@@ -127,7 +137,7 @@ void GraphicBufferSource::omxExecuting() {
}
}
- ALOGV("done loading initial frames, avail=%d", mNumFramesAvailable);
+ ALOGV("done loading initial frames, avail=%zu", mNumFramesAvailable);
// If EOS has already been signaled, and there are no more frames to
// submit, try to send EOS now as well.
@@ -179,7 +189,7 @@ void GraphicBufferSource::omxLoaded(){
mLooper.clear();
}
- ALOGV("--> loaded; avail=%d eos=%d eosSent=%d",
+ ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d",
mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
// Codec is no longer executing. Discard all codec-related state.
@@ -200,7 +210,7 @@ void GraphicBufferSource::addCodecBuffer(OMX_BUFFERHEADERTYPE* header) {
return;
}
- ALOGV("addCodecBuffer h=%p size=%lu p=%p",
+ ALOGV("addCodecBuffer h=%p size=%" PRIu32 " p=%p",
header, header->nAllocLen, header->pBuffer);
CodecBuffer codecBuffer;
codecBuffer.mHeader = header;
@@ -221,7 +231,7 @@ void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
return;
}
- ALOGV("codecBufferEmptied h=%p size=%lu filled=%lu p=%p",
+ ALOGV("codecBufferEmptied h=%p size=%" PRIu32 " filled=%" PRIu32 " p=%p",
header, header->nAllocLen, header->nFilledLen,
header->pBuffer);
CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
@@ -246,13 +256,25 @@ void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
// Pull the graphic buffer handle back out of the buffer, and confirm
// that it matches expectations.
OMX_U8* data = header->pBuffer;
- buffer_handle_t bufferHandle;
- memcpy(&bufferHandle, data + 4, sizeof(buffer_handle_t));
- if (bufferHandle != codecBuffer.mGraphicBuffer->handle) {
- // should never happen
- ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
- bufferHandle, codecBuffer.mGraphicBuffer->handle);
- CHECK(!"codecBufferEmptied: mismatched buffer");
+ MetadataBufferType type = *(MetadataBufferType *)data;
+ if (type == kMetadataBufferTypeGrallocSource) {
+ buffer_handle_t bufferHandle;
+ memcpy(&bufferHandle, data + 4, sizeof(buffer_handle_t));
+ if (bufferHandle != codecBuffer.mGraphicBuffer->handle) {
+ // should never happen
+ ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
+ bufferHandle, codecBuffer.mGraphicBuffer->handle);
+ CHECK(!"codecBufferEmptied: mismatched buffer");
+ }
+ } else if (type == kMetadataBufferTypeGraphicBuffer) {
+ GraphicBuffer *buffer;
+ memcpy(&buffer, data + 4, sizeof(buffer));
+ if (buffer != codecBuffer.mGraphicBuffer.get()) {
+ // should never happen
+ ALOGE("codecBufferEmptied: buffer is %p, expected %p",
+ buffer, codecBuffer.mGraphicBuffer.get());
+ CHECK(!"codecBufferEmptied: mismatched buffer");
+ }
}
}
@@ -268,7 +290,7 @@ void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
if (id == mLatestSubmittedBufferId) {
CHECK_GT(mLatestSubmittedBufferUseCount--, 0);
} else {
- mBufferQueue->releaseBuffer(id, codecBuffer.mFrameNumber,
+ mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
}
} else {
@@ -282,7 +304,7 @@ void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
if (mNumFramesAvailable) {
// Fill this codec buffer.
CHECK(!mEndOfStreamSent);
- ALOGV("buffer freed, %d frames avail (eos=%d)",
+ ALOGV("buffer freed, %zu frames avail (eos=%d)",
mNumFramesAvailable, mEndOfStream);
fillCodecBuffer_l();
} else if (mEndOfStream) {
@@ -311,7 +333,8 @@ void GraphicBufferSource::codecBufferFilled(OMX_BUFFERHEADERTYPE* header) {
ssize_t index = mOriginalTimeUs.indexOfKey(header->nTimeStamp);
if (index >= 0) {
ALOGV("OUT timestamp: %lld -> %lld",
- header->nTimeStamp, mOriginalTimeUs[index]);
+ static_cast<long long>(header->nTimeStamp),
+ static_cast<long long>(mOriginalTimeUs[index]));
header->nTimeStamp = mOriginalTimeUs[index];
mOriginalTimeUs.removeItemsAt(index);
} else {
@@ -322,7 +345,7 @@ void GraphicBufferSource::codecBufferFilled(OMX_BUFFERHEADERTYPE* header) {
}
if (mOriginalTimeUs.size() > BufferQueue::NUM_BUFFER_SLOTS) {
// something terribly wrong must have happened, giving up...
- ALOGE("mOriginalTimeUs has too many entries (%d)",
+ ALOGE("mOriginalTimeUs has too many entries (%zu)",
mOriginalTimeUs.size());
mMaxTimestampGapUs = -1ll;
}
@@ -337,7 +360,7 @@ void GraphicBufferSource::suspend(bool suspend) {
while (mNumFramesAvailable > 0) {
BufferQueue::BufferItem item;
- status_t err = mBufferQueue->acquireBuffer(&item, 0);
+ status_t err = mConsumer->acquireBuffer(&item, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// shouldn't happen.
@@ -350,7 +373,7 @@ void GraphicBufferSource::suspend(bool suspend) {
--mNumFramesAvailable;
- mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+ mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
}
return;
@@ -379,15 +402,15 @@ bool GraphicBufferSource::fillCodecBuffer_l() {
int cbi = findAvailableCodecBuffer_l();
if (cbi < 0) {
// No buffers available, bail.
- ALOGV("fillCodecBuffer_l: no codec buffers, avail now %d",
+ ALOGV("fillCodecBuffer_l: no codec buffers, avail now %zu",
mNumFramesAvailable);
return false;
}
- ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
+ ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%zu",
mNumFramesAvailable);
BufferQueue::BufferItem item;
- status_t err = mBufferQueue->acquireBuffer(&item, 0);
+ status_t err = mConsumer->acquireBuffer(&item, 0);
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// shouldn't happen
ALOGW("fillCodecBuffer_l: frame was not available");
@@ -414,10 +437,21 @@ bool GraphicBufferSource::fillCodecBuffer_l() {
mBufferSlot[item.mBuf] = item.mGraphicBuffer;
}
- err = submitBuffer_l(item, cbi);
+ err = UNKNOWN_ERROR;
+
+ // only submit sample if start time is unspecified, or sample
+ // is queued after the specified start time
+ if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
+ // if start time is set, offset time stamp by start time
+ if (mSkipFramesBeforeNs > 0) {
+ item.mTimestamp -= mSkipFramesBeforeNs;
+ }
+ err = submitBuffer_l(item, cbi);
+ }
+
if (err != OK) {
ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
- mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+ mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
} else {
ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
@@ -440,7 +474,7 @@ bool GraphicBufferSource::repeatLatestSubmittedBuffer_l() {
//
// To be on the safe side we try to release the buffer.
ALOGD("repeatLatestSubmittedBuffer_l: slot was NULL");
- mBufferQueue->releaseBuffer(
+ mConsumer->releaseBuffer(
mLatestSubmittedBufferId,
mLatestSubmittedBufferFrameNum,
EGL_NO_DISPLAY,
@@ -494,7 +528,7 @@ void GraphicBufferSource::setLatestSubmittedBuffer_l(
if (mLatestSubmittedBufferId >= 0) {
if (mLatestSubmittedBufferUseCount == 0) {
- mBufferQueue->releaseBuffer(
+ mConsumer->releaseBuffer(
mLatestSubmittedBufferId,
mLatestSubmittedBufferFrameNum,
EGL_NO_DISPLAY,
@@ -520,7 +554,7 @@ void GraphicBufferSource::setLatestSubmittedBuffer_l(
status_t GraphicBufferSource::signalEndOfInputStream() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("signalEndOfInputStream: exec=%d avail=%d eos=%d",
+ ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
mExecuting, mNumFramesAvailable, mEndOfStream);
if (mEndOfStream) {
@@ -548,7 +582,32 @@ status_t GraphicBufferSource::signalEndOfInputStream() {
int64_t GraphicBufferSource::getTimestamp(const BufferQueue::BufferItem &item) {
int64_t timeUs = item.mTimestamp / 1000;
- if (mMaxTimestampGapUs > 0ll) {
+ if (mTimePerCaptureUs > 0ll) {
+ // Time lapse or slow motion mode
+ if (mPrevCaptureUs < 0ll) {
+ // first capture
+ mPrevCaptureUs = timeUs;
+ mPrevFrameUs = timeUs;
+ } else {
+ // snap to nearest capture point
+ int64_t nFrames = (timeUs + mTimePerCaptureUs / 2 - mPrevCaptureUs)
+ / mTimePerCaptureUs;
+ if (nFrames <= 0) {
+ // skip this frame as it's too close to previous capture
+ ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
+ return -1;
+ }
+ mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
+ mPrevFrameUs += mTimePerFrameUs * nFrames;
+ }
+
+ ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
+ static_cast<long long>(timeUs),
+ static_cast<long long>(mPrevCaptureUs),
+ static_cast<long long>(mPrevFrameUs));
+
+ return mPrevFrameUs;
+ } else if (mMaxTimestampGapUs > 0ll) {
/* Cap timestamp gap between adjacent frames to specified max
*
* In the scenario of cast mirroring, encoding could be suspended for
@@ -572,7 +631,9 @@ int64_t GraphicBufferSource::getTimestamp(const BufferQueue::BufferItem &item) {
mPrevOriginalTimeUs = originalTimeUs;
mPrevModifiedTimeUs = timeUs;
mOriginalTimeUs.add(timeUs, originalTimeUs);
- ALOGV("IN timestamp: %lld -> %lld", originalTimeUs, timeUs);
+ ALOGV("IN timestamp: %lld -> %lld",
+ static_cast<long long>(originalTimeUs),
+ static_cast<long long>(timeUs));
}
return timeUs;
@@ -595,10 +656,22 @@ status_t GraphicBufferSource::submitBuffer_l(
OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
CHECK(header->nAllocLen >= 4 + sizeof(buffer_handle_t));
OMX_U8* data = header->pBuffer;
- const OMX_U32 type = kMetadataBufferTypeGrallocSource;
- buffer_handle_t handle = codecBuffer.mGraphicBuffer->handle;
- memcpy(data, &type, 4);
- memcpy(data + 4, &handle, sizeof(buffer_handle_t));
+ buffer_handle_t handle;
+ if (!mUseGraphicBufferInMeta) {
+ const OMX_U32 type = kMetadataBufferTypeGrallocSource;
+ handle = codecBuffer.mGraphicBuffer->handle;
+ memcpy(data, &type, 4);
+ memcpy(data + 4, &handle, sizeof(buffer_handle_t));
+ } else {
+ // codecBuffer holds a reference to the GraphicBuffer, so
+ // it is valid while it is with the OMX component
+ const OMX_U32 type = kMetadataBufferTypeGraphicBuffer;
+ memcpy(data, &type, 4);
+ // passing a non-reference-counted graphicBuffer
+ GraphicBuffer *buffer = codecBuffer.mGraphicBuffer.get();
+ handle = buffer->handle;
+ memcpy(data + 4, &buffer, sizeof(buffer));
+ }
status_t err = mNodeInstance->emptyDirectBuffer(header, 0,
4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME,
@@ -680,7 +753,7 @@ int GraphicBufferSource::findMatchingCodecBuffer_l(
void GraphicBufferSource::onFrameAvailable() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("onFrameAvailable exec=%d avail=%d",
+ ALOGV("onFrameAvailable exec=%d avail=%zu",
mExecuting, mNumFramesAvailable);
if (mEndOfStream || mSuspended) {
@@ -694,15 +767,15 @@ void GraphicBufferSource::onFrameAvailable() {
}
BufferQueue::BufferItem item;
- status_t err = mBufferQueue->acquireBuffer(&item, 0);
+ status_t err = mConsumer->acquireBuffer(&item, 0);
if (err == OK) {
// If this is the first time we're seeing this buffer, add it to our
// slot table.
if (item.mGraphicBuffer != NULL) {
- ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mBuf);
+ ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mBuf);
mBufferSlot[item.mBuf] = item.mGraphicBuffer;
}
- mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+ mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
}
return;
@@ -722,13 +795,13 @@ void GraphicBufferSource::onFrameAvailable() {
void GraphicBufferSource::onBuffersReleased() {
Mutex::Autolock lock(mMutex);
- uint32_t slotMask;
- if (mBufferQueue->getReleasedBuffers(&slotMask) != NO_ERROR) {
+ uint64_t slotMask;
+ if (mConsumer->getReleasedBuffers(&slotMask) != NO_ERROR) {
ALOGW("onBuffersReleased: unable to get released buffer set");
- slotMask = 0xffffffff;
+ slotMask = 0xffffffffffffffffULL;
}
- ALOGV("onBuffersReleased: 0x%08x", slotMask);
+ ALOGV("onBuffersReleased: 0x%016" PRIx64, slotMask);
for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
if ((slotMask & 0x01) != 0) {
@@ -738,6 +811,11 @@ void GraphicBufferSource::onBuffersReleased() {
}
}
+// BufferQueue::ConsumerListener callback
+void GraphicBufferSource::onSidebandStreamChanged() {
+ ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
+}
+
status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
int64_t repeatAfterUs) {
Mutex::Autolock autoLock(mMutex);
@@ -762,6 +840,27 @@ status_t GraphicBufferSource::setMaxTimestampGapUs(int64_t maxGapUs) {
return OK;
}
+
+void GraphicBufferSource::setSkipFramesBeforeUs(int64_t skipFramesBeforeUs) {
+ Mutex::Autolock autoLock(mMutex);
+
+ mSkipFramesBeforeNs =
+ (skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
+}
+
+status_t GraphicBufferSource::setTimeLapseUs(int64_t* data) {
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting || data[0] <= 0ll || data[1] <= 0ll) {
+ return INVALID_OPERATION;
+ }
+
+ mTimePerFrameUs = data[0];
+ mTimePerCaptureUs = data[1];
+
+ return OK;
+}
+
void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatRepeatLastFrame:
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 3b0e454..c0860ab 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -49,7 +49,8 @@ namespace android {
class GraphicBufferSource : public BufferQueue::ConsumerListener {
public:
GraphicBufferSource(OMXNodeInstance* nodeInstance,
- uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount);
+ uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
+ bool useGraphicBufferInMeta = false);
virtual ~GraphicBufferSource();
// We can't throw an exception if the constructor fails, so we just set
@@ -61,7 +62,7 @@ public:
// Returns the handle to the producer side of the BufferQueue. Buffers
// queued on this will be received by GraphicBufferSource.
sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
- return mBufferQueue;
+ return mProducer;
}
// This is called when OMX transitions to OMX_StateExecuting, which means
@@ -118,6 +119,17 @@ public:
// of suspension on input.
status_t setMaxTimestampGapUs(int64_t maxGapUs);
+ // Sets the time lapse (or slow motion) parameters.
+ // data[0] is the time (us) between two frames for playback
+ // data[1] is the time (us) between two frames for capture
+ // When set, the sample's timestamp will be modified to playback framerate,
+ // and capture timestamp will be modified to capture rate.
+ status_t setTimeLapseUs(int64_t* data);
+
+ // Sets the start time us (in system time), samples before which should
+ // be dropped and not submitted to encoder
+ void setSkipFramesBeforeUs(int64_t startTimeUs);
+
protected:
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
@@ -132,6 +144,11 @@ protected:
// set of mBufferSlot entries.
virtual void onBuffersReleased();
+ // BufferQueue::ConsumerListener interface, called when the client has
+ // changed the sideband stream. GraphicBufferSource doesn't handle sideband
+ // streams so this is a no-op (and should never be called).
+ virtual void onSidebandStreamChanged();
+
private:
// Keep track of codec input buffers. They may either be available
// (mGraphicBuffer == NULL) or in use by the codec.
@@ -194,8 +211,11 @@ private:
bool mSuspended;
- // We consume graphic buffers from this.
- sp<BufferQueue> mBufferQueue;
+ // Our BufferQueue interfaces. mProducer is passed to the producer through
+ // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
+ // the buffers queued by the producer.
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
// Number of frames pending in BufferQueue that haven't yet been
// forwarded to the codec.
@@ -223,16 +243,17 @@ private:
enum {
kRepeatLastFrameCount = 10,
};
- int64_t mRepeatAfterUs;
- int64_t mMaxTimestampGapUs;
KeyedVector<int64_t, int64_t> mOriginalTimeUs;
+ int64_t mMaxTimestampGapUs;
int64_t mPrevOriginalTimeUs;
int64_t mPrevModifiedTimeUs;
+ int64_t mSkipFramesBeforeNs;
sp<ALooper> mLooper;
sp<AHandlerReflector<GraphicBufferSource> > mReflector;
+ int64_t mRepeatAfterUs;
int32_t mRepeatLastFrameGeneration;
int64_t mRepeatLastFrameTimestamp;
int32_t mRepeatLastFrameCount;
@@ -245,6 +266,14 @@ private:
// no codec buffer was available at the time.
bool mRepeatBufferDeferred;
+ // Time lapse / slow motion configuration
+ int64_t mTimePerCaptureUs;
+ int64_t mTimePerFrameUs;
+ int64_t mPrevCaptureUs;
+ int64_t mPrevFrameUs;
+
+ bool mUseGraphicBufferInMeta;
+
void onMessageReceived(const sp<AMessage> &msg);
DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 84a0e10..41407e4 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "OMX"
#include <utils/Log.h>
@@ -185,7 +187,7 @@ void OMX::binderDied(const wp<IBinder> &the_late_who) {
instance->onObserverDied(mMaster);
}
-bool OMX::livesLocally(node_id node, pid_t pid) {
+bool OMX::livesLocally(node_id /* node */, pid_t pid) {
return pid == getpid();
}
@@ -231,7 +233,7 @@ status_t OMX::allocateNode(
instance, &handle);
if (err != OMX_ErrorNone) {
- ALOGV("FAILED to allocate omx component '%s'", name);
+ ALOGE("FAILED to allocate omx component '%s'", name);
instance->onGetHandleFailed();
@@ -285,6 +287,7 @@ status_t OMX::sendCommand(
status_t OMX::getParameter(
node_id node, OMX_INDEXTYPE index,
void *params, size_t size) {
+ ALOGV("getParameter(%u %#x %p %zd)", node, index, params, size);
return findInstance(node)->getParameter(
index, params, size);
}
@@ -292,6 +295,7 @@ status_t OMX::getParameter(
status_t OMX::setParameter(
node_id node, OMX_INDEXTYPE index,
const void *params, size_t size) {
+ ALOGV("setParameter(%u %#x %p %zd)", node, index, params, size);
return findInstance(node)->setParameter(
index, params, size);
}
@@ -338,6 +342,13 @@ status_t OMX::prepareForAdaptivePlayback(
portIndex, enable, maxFrameWidth, maxFrameHeight);
}
+status_t OMX::configureVideoTunnelMode(
+ node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ return findInstance(node)->configureVideoTunnelMode(
+ portIndex, tunneled, audioHwSync, sidebandHandle);
+}
+
status_t OMX::useBuffer(
node_id node, OMX_U32 port_index, const sp<IMemory> &params,
buffer_id *buffer) {
@@ -424,8 +435,8 @@ OMX_ERRORTYPE OMX::OnEvent(
OMX_IN OMX_EVENTTYPE eEvent,
OMX_IN OMX_U32 nData1,
OMX_IN OMX_U32 nData2,
- OMX_IN OMX_PTR pEventData) {
- ALOGV("OnEvent(%d, %ld, %ld)", eEvent, nData1, nData2);
+ OMX_IN OMX_PTR /* pEventData */) {
+ ALOGV("OnEvent(%d, %" PRIu32", %" PRIu32 ")", eEvent, nData1, nData2);
// Forward to OMXNodeInstance.
findInstance(node)->onEvent(eEvent, nData1, nData2);
@@ -443,13 +454,13 @@ OMX_ERRORTYPE OMX::OnEvent(
}
OMX_ERRORTYPE OMX::OnEmptyBufferDone(
- node_id node, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
+ node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
ALOGV("OnEmptyBufferDone buffer=%p", pBuffer);
omx_message msg;
msg.type = omx_message::EMPTY_BUFFER_DONE;
msg.node = node;
- msg.u.buffer_data.buffer = pBuffer;
+ msg.u.buffer_data.buffer = buffer;
findDispatcher(node)->post(msg);
@@ -457,19 +468,17 @@ OMX_ERRORTYPE OMX::OnEmptyBufferDone(
}
OMX_ERRORTYPE OMX::OnFillBufferDone(
- node_id node, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
+ node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
ALOGV("OnFillBufferDone buffer=%p", pBuffer);
omx_message msg;
msg.type = omx_message::FILL_BUFFER_DONE;
msg.node = node;
- msg.u.extended_buffer_data.buffer = pBuffer;
+ msg.u.extended_buffer_data.buffer = buffer;
msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
msg.u.extended_buffer_data.flags = pBuffer->nFlags;
msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
- msg.u.extended_buffer_data.platform_private = pBuffer->pPlatformPrivate;
- msg.u.extended_buffer_data.data_ptr = pBuffer->pBuffer;
findDispatcher(node)->post(msg);
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6b6d0ab..ae3cb33 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -91,7 +91,7 @@ void OMXMaster::addPlugin(OMXPluginBase *plugin) {
}
if (err != OMX_ErrorNoMore) {
- ALOGE("OMX plugin failed w/ error 0x%08x after registering %d "
+ ALOGE("OMX plugin failed w/ error 0x%08x after registering %zu "
"components", err, mPluginByComponentName.size());
}
}
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 6c5c857..f9c84e2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -92,10 +92,14 @@ OMX_CALLBACKTYPE OMXNodeInstance::kCallbacks = {
OMXNodeInstance::OMXNodeInstance(
OMX *owner, const sp<IOMXObserver> &observer)
: mOwner(owner),
- mNodeID(NULL),
+ mNodeID(0),
mHandle(NULL),
mObserver(observer),
- mDying(false) {
+ mDying(false)
+#ifdef __LP64__
+ , mBufferIDCount(0)
+#endif
+{
}
OMXNodeInstance::~OMXNodeInstance() {
@@ -145,6 +149,11 @@ static status_t StatusFromOMXError(OMX_ERRORTYPE err) {
status_t OMXNodeInstance::freeNode(OMXMaster *master) {
static int32_t kMaxNumIterations = 10;
+ // exit if we have already freed the node
+ if (mHandle == NULL) {
+ return OK;
+ }
+
// Transition the node from its current state all the way down
// to "Loaded".
// This ensures that all active buffers are properly freed even
@@ -232,7 +241,7 @@ status_t OMXNodeInstance::freeNode(OMXMaster *master) {
}
mOwner->invalidateNodeID(mNodeID);
- mNodeID = NULL;
+ mNodeID = 0;
ALOGV("OMXNodeInstance going away.");
delete this;
@@ -266,26 +275,26 @@ status_t OMXNodeInstance::sendCommand(
}
status_t OMXNodeInstance::getParameter(
- OMX_INDEXTYPE index, void *params, size_t size) {
+ OMX_INDEXTYPE index, void *params, size_t /* size */) {
Mutex::Autolock autoLock(mLock);
OMX_ERRORTYPE err = OMX_GetParameter(mHandle, index, params);
-
+ ALOGE_IF(err != OMX_ErrorNone, "getParameter(%d) ERROR: %#x", index, err);
return StatusFromOMXError(err);
}
status_t OMXNodeInstance::setParameter(
- OMX_INDEXTYPE index, const void *params, size_t size) {
+ OMX_INDEXTYPE index, const void *params, size_t /* size */) {
Mutex::Autolock autoLock(mLock);
OMX_ERRORTYPE err = OMX_SetParameter(
mHandle, index, const_cast<void *>(params));
-
+ ALOGE_IF(err != OMX_ErrorNone, "setParameter(%d) ERROR: %#x", index, err);
return StatusFromOMXError(err);
}
status_t OMXNodeInstance::getConfig(
- OMX_INDEXTYPE index, void *params, size_t size) {
+ OMX_INDEXTYPE index, void *params, size_t /* size */) {
Mutex::Autolock autoLock(mLock);
OMX_ERRORTYPE err = OMX_GetConfig(mHandle, index, params);
@@ -293,7 +302,7 @@ status_t OMXNodeInstance::getConfig(
}
status_t OMXNodeInstance::setConfig(
- OMX_INDEXTYPE index, const void *params, size_t size) {
+ OMX_INDEXTYPE index, const void *params, size_t /* size */) {
Mutex::Autolock autoLock(mLock);
OMX_ERRORTYPE err = OMX_SetConfig(
@@ -389,20 +398,39 @@ status_t OMXNodeInstance::storeMetaDataInBuffers(
OMX_U32 portIndex,
OMX_BOOL enable) {
Mutex::Autolock autolock(mLock);
- return storeMetaDataInBuffers_l(portIndex, enable);
+ return storeMetaDataInBuffers_l(
+ portIndex, enable,
+ OMX_FALSE /* useGraphicBuffer */, NULL /* usingGraphicBufferInMetadata */);
}
status_t OMXNodeInstance::storeMetaDataInBuffers_l(
OMX_U32 portIndex,
- OMX_BOOL enable) {
+ OMX_BOOL enable,
+ OMX_BOOL useGraphicBuffer,
+ OMX_BOOL *usingGraphicBufferInMetadata) {
OMX_INDEXTYPE index;
OMX_STRING name = const_cast<OMX_STRING>(
"OMX.google.android.index.storeMetaDataInBuffers");
- OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
+ OMX_STRING graphicBufferName = const_cast<OMX_STRING>(
+ "OMX.google.android.index.storeGraphicBufferInMetaData");
+ if (usingGraphicBufferInMetadata == NULL) {
+ usingGraphicBufferInMetadata = &useGraphicBuffer;
+ }
+
+ OMX_ERRORTYPE err =
+ (useGraphicBuffer && portIndex == kPortIndexInput)
+ ? OMX_GetExtensionIndex(mHandle, graphicBufferName, &index)
+ : OMX_ErrorBadParameter;
+ if (err == OMX_ErrorNone) {
+ *usingGraphicBufferInMetadata = OMX_TRUE;
+ } else {
+ *usingGraphicBufferInMetadata = OMX_FALSE;
+ err = OMX_GetExtensionIndex(mHandle, name, &index);
+ }
+
if (err != OMX_ErrorNone) {
ALOGE("OMX_GetExtensionIndex %s failed", name);
-
return StatusFromOMXError(err);
}
@@ -417,6 +445,7 @@ status_t OMXNodeInstance::storeMetaDataInBuffers_l(
params.bStoreMetaData = enable;
if ((err = OMX_SetParameter(mHandle, index, &params)) != OMX_ErrorNone) {
ALOGE("OMX_SetParameter() failed for StoreMetaDataInBuffers: 0x%08x", err);
+ *usingGraphicBufferInMetadata = OMX_FALSE;
return UNKNOWN_ERROR;
}
return err;
@@ -456,6 +485,49 @@ status_t OMXNodeInstance::prepareForAdaptivePlayback(
return err;
}
+status_t OMXNodeInstance::configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled, OMX_U32 audioHwSync,
+ native_handle_t **sidebandHandle) {
+ Mutex::Autolock autolock(mLock);
+
+ OMX_INDEXTYPE index;
+ OMX_STRING name = const_cast<OMX_STRING>(
+ "OMX.google.android.index.configureVideoTunnelMode");
+
+ OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
+ if (err != OMX_ErrorNone) {
+ ALOGE("configureVideoTunnelMode extension is missing!");
+ return StatusFromOMXError(err);
+ }
+
+ ConfigureVideoTunnelModeParams tunnelParams;
+ tunnelParams.nSize = sizeof(tunnelParams);
+ tunnelParams.nVersion.s.nVersionMajor = 1;
+ tunnelParams.nVersion.s.nVersionMinor = 0;
+ tunnelParams.nVersion.s.nRevision = 0;
+ tunnelParams.nVersion.s.nStep = 0;
+
+ tunnelParams.nPortIndex = portIndex;
+ tunnelParams.bTunneled = tunneled;
+ tunnelParams.nAudioHwSync = audioHwSync;
+ err = OMX_SetParameter(mHandle, index, &tunnelParams);
+ if (err != OMX_ErrorNone) {
+ ALOGE("configureVideoTunnelMode failed! (err %d).", err);
+ return UNKNOWN_ERROR;
+ }
+
+ err = OMX_GetParameter(mHandle, index, &tunnelParams);
+ if (err != OMX_ErrorNone) {
+ ALOGE("GetVideoTunnelWindow failed! (err %d).", err);
+ return UNKNOWN_ERROR;
+ }
+ if (sidebandHandle) {
+ *sidebandHandle = (native_handle_t*)tunnelParams.pSidebandWindow;
+ }
+
+ return err;
+}
+
status_t OMXNodeInstance::useBuffer(
OMX_U32 portIndex, const sp<IMemory> &params,
OMX::buffer_id *buffer) {
@@ -482,7 +554,7 @@ status_t OMXNodeInstance::useBuffer(
CHECK_EQ(header->pAppPrivate, buffer_meta);
- *buffer = header;
+ *buffer = makeBufferID(header);
addActiveBuffer(portIndex, *buffer);
@@ -538,7 +610,7 @@ status_t OMXNodeInstance::useGraphicBuffer2_l(
CHECK_EQ(header->pBuffer, bufferHandle);
CHECK_EQ(header->pAppPrivate, bufferMeta);
- *buffer = header;
+ *buffer = makeBufferID(header);
addActiveBuffer(portIndex, *buffer);
@@ -602,7 +674,7 @@ status_t OMXNodeInstance::useGraphicBuffer(
CHECK_EQ(header->pAppPrivate, bufferMeta);
- *buffer = header;
+ *buffer = makeBufferID(header);
addActiveBuffer(portIndex, *buffer);
@@ -610,11 +682,11 @@ status_t OMXNodeInstance::useGraphicBuffer(
}
status_t OMXNodeInstance::updateGraphicBufferInMeta(
- OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
+ OMX_U32 /* portIndex */, const sp<GraphicBuffer>& graphicBuffer,
OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)(buffer);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
VideoDecoderOutputMetaData *metadata =
(VideoDecoderOutputMetaData *)(header->pBuffer);
BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
@@ -636,7 +708,10 @@ status_t OMXNodeInstance::createInputSurface(
}
// Input buffers will hold meta-data (gralloc references).
- err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE);
+ OMX_BOOL usingGraphicBuffer = OMX_FALSE;
+ err = storeMetaDataInBuffers_l(
+ portIndex, OMX_TRUE,
+ OMX_TRUE /* useGraphicBuffer */, &usingGraphicBuffer);
if (err != OK) {
return err;
}
@@ -662,7 +737,7 @@ status_t OMXNodeInstance::createInputSurface(
GraphicBufferSource* bufferSource = new GraphicBufferSource(
this, def.format.video.nFrameWidth, def.format.video.nFrameHeight,
- def.nBufferCountActual);
+ def.nBufferCountActual, usingGraphicBuffer);
if ((err = bufferSource->initCheck()) != OK) {
delete bufferSource;
return err;
@@ -710,7 +785,7 @@ status_t OMXNodeInstance::allocateBuffer(
CHECK_EQ(header->pAppPrivate, buffer_meta);
- *buffer = header;
+ *buffer = makeBufferID(header);
*buffer_data = header->pBuffer;
addActiveBuffer(portIndex, *buffer);
@@ -748,7 +823,7 @@ status_t OMXNodeInstance::allocateBufferWithBackup(
CHECK_EQ(header->pAppPrivate, buffer_meta);
- *buffer = header;
+ *buffer = makeBufferID(header);
addActiveBuffer(portIndex, *buffer);
@@ -766,13 +841,14 @@ status_t OMXNodeInstance::freeBuffer(
removeActiveBuffer(portIndex, buffer);
- OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)buffer;
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
BufferMeta *buffer_meta = static_cast<BufferMeta *>(header->pAppPrivate);
OMX_ERRORTYPE err = OMX_FreeBuffer(mHandle, portIndex, header);
delete buffer_meta;
buffer_meta = NULL;
+ invalidateBufferID(buffer);
return StatusFromOMXError(err);
}
@@ -780,7 +856,7 @@ status_t OMXNodeInstance::freeBuffer(
status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)buffer;
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
header->nFilledLen = 0;
header->nOffset = 0;
header->nFlags = 0;
@@ -796,7 +872,7 @@ status_t OMXNodeInstance::emptyBuffer(
OMX_U32 flags, OMX_TICKS timestamp) {
Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)buffer;
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
header->nFilledLen = rangeLength;
header->nOffset = rangeOffset;
header->nFlags = flags;
@@ -850,6 +926,8 @@ status_t OMXNodeInstance::setInternalOption(
case IOMX::INTERNAL_OPTION_SUSPEND:
case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP:
+ case IOMX::INTERNAL_OPTION_START_TIME:
+ case IOMX::INTERNAL_OPTION_TIME_LAPSE:
{
const sp<GraphicBufferSource> &bufferSource =
getGraphicBufferSource();
@@ -874,7 +952,8 @@ status_t OMXNodeInstance::setInternalOption(
int64_t delayUs = *(int64_t *)data;
return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
- } else {
+ } else if (type ==
+ IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP){
if (size != sizeof(int64_t)) {
return INVALID_OPERATION;
}
@@ -882,6 +961,20 @@ status_t OMXNodeInstance::setInternalOption(
int64_t maxGapUs = *(int64_t *)data;
return bufferSource->setMaxTimestampGapUs(maxGapUs);
+ } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
+ if (size != sizeof(int64_t)) {
+ return INVALID_OPERATION;
+ }
+
+ int64_t skipFramesBeforeUs = *(int64_t *)data;
+
+ bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
+ } else { // IOMX::INTERNAL_OPTION_TIME_LAPSE
+ if (size != sizeof(int64_t) * 2) {
+ return INVALID_OPERATION;
+ }
+
+ bufferSource->setTimeLapseUs((int64_t *)data);
}
return OK;
@@ -897,8 +990,7 @@ void OMXNodeInstance::onMessage(const omx_message &msg) {
if (msg.type == omx_message::FILL_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
- static_cast<OMX_BUFFERHEADERTYPE *>(
- msg.u.extended_buffer_data.buffer);
+ findBufferHeader(msg.u.extended_buffer_data.buffer);
BufferMeta *buffer_meta =
static_cast<BufferMeta *>(buffer->pAppPrivate);
@@ -923,8 +1015,7 @@ void OMXNodeInstance::onMessage(const omx_message &msg) {
// be very confused.
OMX_BUFFERHEADERTYPE *buffer =
- static_cast<OMX_BUFFERHEADERTYPE *>(
- msg.u.buffer_data.buffer);
+ findBufferHeader(msg.u.buffer_data.buffer);
bufferSource->codecBufferEmptied(buffer);
return;
@@ -961,7 +1052,7 @@ void OMXNodeInstance::onEvent(
// static
OMX_ERRORTYPE OMXNodeInstance::OnEvent(
- OMX_IN OMX_HANDLETYPE hComponent,
+ OMX_IN OMX_HANDLETYPE /* hComponent */,
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_EVENTTYPE eEvent,
OMX_IN OMX_U32 nData1,
@@ -977,26 +1068,28 @@ OMX_ERRORTYPE OMXNodeInstance::OnEvent(
// static
OMX_ERRORTYPE OMXNodeInstance::OnEmptyBufferDone(
- OMX_IN OMX_HANDLETYPE hComponent,
+ OMX_IN OMX_HANDLETYPE /* hComponent */,
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
if (instance->mDying) {
return OMX_ErrorNone;
}
- return instance->owner()->OnEmptyBufferDone(instance->nodeID(), pBuffer);
+ return instance->owner()->OnEmptyBufferDone(instance->nodeID(),
+ instance->findBufferID(pBuffer), pBuffer);
}
// static
OMX_ERRORTYPE OMXNodeInstance::OnFillBufferDone(
- OMX_IN OMX_HANDLETYPE hComponent,
+ OMX_IN OMX_HANDLETYPE /* hComponent */,
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
if (instance->mDying) {
return OMX_ErrorNone;
}
- return instance->owner()->OnFillBufferDone(instance->nodeID(), pBuffer);
+ return instance->owner()->OnFillBufferDone(instance->nodeID(),
+ instance->findBufferID(pBuffer), pBuffer);
}
void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id) {
@@ -1031,4 +1124,67 @@ void OMXNodeInstance::freeActiveBuffers() {
}
}
+#ifdef __LP64__
+
+OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+ if (bufferHeader == NULL) {
+ return 0;
+ }
+ Mutex::Autolock autoLock(mBufferIDLock);
+ OMX::buffer_id buffer;
+ do { // handle the very unlikely case of ID overflow
+ if (++mBufferIDCount == 0) {
+ ++mBufferIDCount;
+ }
+ buffer = (OMX::buffer_id)mBufferIDCount;
+ } while (mBufferIDToBufferHeader.indexOfKey(buffer) >= 0);
+ mBufferIDToBufferHeader.add(buffer, bufferHeader);
+ mBufferHeaderToBufferID.add(bufferHeader, buffer);
+ return buffer;
+}
+
+OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(OMX::buffer_id buffer) {
+ if (buffer == 0) {
+ return NULL;
+ }
+ Mutex::Autolock autoLock(mBufferIDLock);
+ return mBufferIDToBufferHeader.valueFor(buffer);
+}
+
+OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+ if (bufferHeader == NULL) {
+ return 0;
+ }
+ Mutex::Autolock autoLock(mBufferIDLock);
+ return mBufferHeaderToBufferID.valueFor(bufferHeader);
+}
+
+void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer) {
+ if (buffer == 0) {
+ return;
+ }
+ Mutex::Autolock autoLock(mBufferIDLock);
+ mBufferHeaderToBufferID.removeItem(mBufferIDToBufferHeader.valueFor(buffer));
+ mBufferIDToBufferHeader.removeItem(buffer);
+}
+
+#else
+
+OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+ return (OMX::buffer_id)bufferHeader;
+}
+
+OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(OMX::buffer_id buffer) {
+ return (OMX_BUFFERHEADERTYPE *)buffer;
+}
+
+OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+ return (OMX::buffer_id)bufferHeader;
+}
+
+void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer __unused) {
+}
+
+#endif
+
} // namespace android
diff --git a/media/libstagefright/omx/SoftOMXComponent.cpp b/media/libstagefright/omx/SoftOMXComponent.cpp
index b1c34dc..646cd32 100644
--- a/media/libstagefright/omx/SoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftOMXComponent.cpp
@@ -257,69 +257,69 @@ OMX_ERRORTYPE SoftOMXComponent::GetStateWrapper(
////////////////////////////////////////////////////////////////////////////////
OMX_ERRORTYPE SoftOMXComponent::sendCommand(
- OMX_COMMANDTYPE cmd, OMX_U32 param, OMX_PTR data) {
+ OMX_COMMANDTYPE /* cmd */, OMX_U32 /* param */, OMX_PTR /* data */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::getParameter(
- OMX_INDEXTYPE index, OMX_PTR params) {
+ OMX_INDEXTYPE /* index */, OMX_PTR /* params */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::setParameter(
- OMX_INDEXTYPE index, const OMX_PTR params) {
+ OMX_INDEXTYPE /* index */, const OMX_PTR /* params */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::getConfig(
- OMX_INDEXTYPE index, OMX_PTR params) {
+ OMX_INDEXTYPE /* index */, OMX_PTR /* params */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::setConfig(
- OMX_INDEXTYPE index, const OMX_PTR params) {
+ OMX_INDEXTYPE /* index */, const OMX_PTR /* params */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::getExtensionIndex(
- const char *name, OMX_INDEXTYPE *index) {
+ const char * /* name */, OMX_INDEXTYPE * /* index */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::useBuffer(
- OMX_BUFFERHEADERTYPE **buffer,
- OMX_U32 portIndex,
- OMX_PTR appPrivate,
- OMX_U32 size,
- OMX_U8 *ptr) {
+ OMX_BUFFERHEADERTYPE ** /* buffer */,
+ OMX_U32 /* portIndex */,
+ OMX_PTR /* appPrivate */,
+ OMX_U32 /* size */,
+ OMX_U8 * /* ptr */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::allocateBuffer(
- OMX_BUFFERHEADERTYPE **buffer,
- OMX_U32 portIndex,
- OMX_PTR appPrivate,
- OMX_U32 size) {
+ OMX_BUFFERHEADERTYPE ** /* buffer */,
+ OMX_U32 /* portIndex */,
+ OMX_PTR /* appPrivate */,
+ OMX_U32 /* size */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::freeBuffer(
- OMX_U32 portIndex,
- OMX_BUFFERHEADERTYPE *buffer) {
+ OMX_U32 /* portIndex */,
+ OMX_BUFFERHEADERTYPE * /* buffer */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::emptyThisBuffer(
- OMX_BUFFERHEADERTYPE *buffer) {
+ OMX_BUFFERHEADERTYPE * /* buffer */) {
return OMX_ErrorUndefined;
}
OMX_ERRORTYPE SoftOMXComponent::fillThisBuffer(
- OMX_BUFFERHEADERTYPE *buffer) {
+ OMX_BUFFERHEADERTYPE * /* buffer */) {
return OMX_ErrorUndefined;
}
-OMX_ERRORTYPE SoftOMXComponent::getState(OMX_STATETYPE *state) {
+OMX_ERRORTYPE SoftOMXComponent::getState(OMX_STATETYPE * /* state */) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index d6cde73..9b6958a 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -42,6 +42,7 @@ static const struct {
{ "OMX.google.amrwb.encoder", "amrwbenc", "audio_encoder.amrwb" },
{ "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" },
{ "OMX.google.h264.encoder", "h264enc", "video_encoder.avc" },
+ { "OMX.google.hevc.decoder", "hevcdec", "video_decoder.hevc" },
{ "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" },
{ "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
{ "OMX.google.h263.decoder", "mpeg4dec", "video_decoder.h263" },
@@ -50,6 +51,7 @@ static const struct {
{ "OMX.google.mpeg4.encoder", "mpeg4enc", "video_encoder.mpeg4" },
{ "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
{ "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
+ { "OMX.google.opus.decoder", "opusdec", "audio_decoder.opus" },
{ "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
{ "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
{ "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
@@ -154,7 +156,7 @@ OMX_ERRORTYPE SoftOMXPlugin::destroyComponentInstance(
OMX_ERRORTYPE SoftOMXPlugin::enumerateComponents(
OMX_STRING name,
- size_t size,
+ size_t /* size */,
OMX_U32 index) {
if (index >= kNumComponents) {
return OMX_ErrorNoMore;
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 08a3d42..2f83610 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -14,12 +14,15 @@
* limitations under the License.
*/
+#include <inttypes.h>
+
//#define LOG_NDEBUG 0
#define LOG_TAG "SoftVideoDecoderOMXComponent"
#include <utils/Log.h>
#include "include/SoftVideoDecoderOMXComponent.h"
+#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -48,6 +51,9 @@ SoftVideoDecoderOMXComponent::SoftVideoDecoderOMXComponent(
OMX_PTR appData,
OMX_COMPONENTTYPE **component)
: SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mIsAdaptive(false),
+ mAdaptiveMaxWidth(0),
+ mAdaptiveMaxHeight(0),
mWidth(width),
mHeight(height),
mCropLeft(0),
@@ -117,16 +123,18 @@ void SoftVideoDecoderOMXComponent::initPorts(
updatePortDefinitions();
}
-void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
+void SoftVideoDecoderOMXComponent::updatePortDefinitions(bool updateCrop) {
OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
def->format.video.nFrameWidth = mWidth;
def->format.video.nFrameHeight = mHeight;
def->format.video.nStride = def->format.video.nFrameWidth;
def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+ def->nBufferSize = def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
+
def = &editPortInfo(kOutputPortIndex)->mDef;
- def->format.video.nFrameWidth = mWidth;
- def->format.video.nFrameHeight = mHeight;
+ def->format.video.nFrameWidth = outputBufferWidth();
+ def->format.video.nFrameHeight = outputBufferHeight();
def->format.video.nStride = def->format.video.nFrameWidth;
def->format.video.nSliceHeight = def->format.video.nFrameHeight;
@@ -134,10 +142,105 @@ void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
(def->format.video.nFrameWidth *
def->format.video.nFrameHeight * 3) / 2;
- mCropLeft = 0;
- mCropTop = 0;
- mCropWidth = mWidth;
- mCropHeight = mHeight;
+ if (updateCrop) {
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = mWidth;
+ mCropHeight = mHeight;
+ }
+}
+
+
+uint32_t SoftVideoDecoderOMXComponent::outputBufferWidth() {
+ return mIsAdaptive ? mAdaptiveMaxWidth : mWidth;
+}
+
+uint32_t SoftVideoDecoderOMXComponent::outputBufferHeight() {
+ return mIsAdaptive ? mAdaptiveMaxHeight : mHeight;
+}
+
+void SoftVideoDecoderOMXComponent::handlePortSettingsChange(
+ bool *portWillReset, uint32_t width, uint32_t height,
+ CropSettingsMode cropSettingsMode, bool fakeStride) {
+ *portWillReset = false;
+ bool sizeChanged = (width != mWidth || height != mHeight);
+ bool updateCrop = (cropSettingsMode == kCropUnSet);
+ bool cropChanged = (cropSettingsMode == kCropChanged);
+ bool strideChanged = false;
+ if (fakeStride) {
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
+ if (def->format.video.nStride != width || def->format.video.nSliceHeight != height) {
+ strideChanged = true;
+ }
+ }
+
+ if (sizeChanged || cropChanged || strideChanged) {
+ mWidth = width;
+ mHeight = height;
+
+ if ((sizeChanged && !mIsAdaptive)
+ || width > mAdaptiveMaxWidth
+ || height > mAdaptiveMaxHeight) {
+ if (mIsAdaptive) {
+ if (width > mAdaptiveMaxWidth) {
+ mAdaptiveMaxWidth = width;
+ }
+ if (height > mAdaptiveMaxHeight) {
+ mAdaptiveMaxHeight = height;
+ }
+ }
+ updatePortDefinitions(updateCrop);
+ notify(OMX_EventPortSettingsChanged, kOutputPortIndex, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ *portWillReset = true;
+ } else {
+ updatePortDefinitions(updateCrop);
+
+ if (fakeStride) {
+ // MAJOR HACK that is not pretty, it's just to fool the renderer to read the correct
+ // data.
+ // Some software decoders (e.g. SoftMPEG4) fill decoded frame directly to output
+ // buffer without considering the output buffer stride and slice height. So this is
+ // used to signal how the buffer is arranged. The alternative is to re-arrange the
+ // output buffer in SoftMPEG4, but that results in memcopies.
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
+ def->format.video.nStride = mWidth;
+ def->format.video.nSliceHeight = mHeight;
+ }
+
+ notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+ OMX_IndexConfigCommonOutputCrop, NULL);
+ }
+ }
+}
+
+void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
+ uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+ size_t srcYStride, size_t srcUStride, size_t srcVStride) {
+ size_t dstYStride = outputBufferWidth();
+ size_t dstUVStride = dstYStride / 2;
+ size_t dstHeight = outputBufferHeight();
+ uint8_t *dstStart = dst;
+
+ for (size_t i = 0; i < mHeight; ++i) {
+ memcpy(dst, srcY, mWidth);
+ srcY += srcYStride;
+ dst += dstYStride;
+ }
+
+ dst = dstStart + dstYStride * dstHeight;
+ for (size_t i = 0; i < mHeight / 2; ++i) {
+ memcpy(dst, srcU, mWidth / 2);
+ srcU += srcUStride;
+ dst += dstUVStride;
+ }
+
+ dst = dstStart + (5 * dstYStride * dstHeight) / 4;
+ for (size_t i = 0; i < mHeight / 2; ++i) {
+ memcpy(dst, srcV, mWidth / 2);
+ srcV += srcVStride;
+ dst += dstUVStride;
+ }
}
OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
@@ -177,16 +280,16 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
(OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
if (profileLevel->nPortIndex != kInputPortIndex) {
- ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
+ ALOGE("Invalid port index: %" PRIu32, profileLevel->nPortIndex);
return OMX_ErrorUnsupportedIndex;
}
- if (index >= mNumProfileLevels) {
+ if (profileLevel->nProfileIndex >= mNumProfileLevels) {
return OMX_ErrorNoMore;
}
- profileLevel->eProfile = mProfileLevels[index].mProfile;
- profileLevel->eLevel = mProfileLevels[index].mLevel;
+ profileLevel->eProfile = mProfileLevels[profileLevel->nProfileIndex].mProfile;
+ profileLevel->eLevel = mProfileLevels[profileLevel->nProfileIndex].mLevel;
return OMX_ErrorNone;
}
@@ -197,7 +300,10 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
- switch (index) {
+ // Include extension index OMX_INDEXEXTTYPE.
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
case OMX_IndexParamStandardComponentRole:
{
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
@@ -228,6 +334,58 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
return OMX_ErrorNone;
}
+ case kPrepareForAdaptivePlaybackIndex:
+ {
+ const PrepareForAdaptivePlaybackParams* adaptivePlaybackParams =
+ (const PrepareForAdaptivePlaybackParams *)params;
+ mIsAdaptive = adaptivePlaybackParams->bEnable;
+ if (mIsAdaptive) {
+ mAdaptiveMaxWidth = adaptivePlaybackParams->nMaxFrameWidth;
+ mAdaptiveMaxHeight = adaptivePlaybackParams->nMaxFrameHeight;
+ mWidth = mAdaptiveMaxWidth;
+ mHeight = mAdaptiveMaxHeight;
+ } else {
+ mAdaptiveMaxWidth = 0;
+ mAdaptiveMaxHeight = 0;
+ }
+ updatePortDefinitions();
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamPortDefinition:
+ {
+ OMX_PARAM_PORTDEFINITIONTYPE *newParams =
+ (OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &newParams->format.video;
+ OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(newParams->nPortIndex)->mDef;
+
+ uint32_t oldWidth = def->format.video.nFrameWidth;
+ uint32_t oldHeight = def->format.video.nFrameHeight;
+ uint32_t newWidth = video_def->nFrameWidth;
+ uint32_t newHeight = video_def->nFrameHeight;
+ if (newWidth != oldWidth || newHeight != oldHeight) {
+ bool outputPort = (newParams->nPortIndex == kOutputPortIndex);
+ def->format.video.nFrameWidth =
+ (mIsAdaptive && outputPort) ? mAdaptiveMaxWidth : newWidth;
+ def->format.video.nFrameHeight =
+ (mIsAdaptive && outputPort) ? mAdaptiveMaxHeight : newHeight;
+ def->format.video.nStride = def->format.video.nFrameWidth;
+ def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+ def->nBufferSize =
+ def->format.video.nFrameWidth * def->format.video.nFrameHeight * 3 / 2;
+ if (outputPort) {
+ mWidth = newWidth;
+ mHeight = newHeight;
+ mCropLeft = 0;
+ mCropTop = 0;
+ mCropWidth = newWidth;
+ mCropHeight = newHeight;
+ }
+ newParams->nBufferSize = def->nBufferSize;
+ }
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+
default:
return SimpleSoftOMXComponent::internalSetParameter(index, params);
}
@@ -257,6 +415,16 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getConfig(
}
}
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index) {
+ if (!strcmp(name, "OMX.google.android.index.prepareForAdaptivePlayback")) {
+ *(int32_t*)index = kPrepareForAdaptivePlaybackIndex;
+ return OMX_ErrorNone;
+ }
+
+ return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
void SoftVideoDecoderOMXComponent::onReset() {
mOutputPortSettingsChange = NONE;
}
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
new file mode 100644
index 0000000..8bff142
--- /dev/null
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftVideoEncoderOMXComponent"
+#include <utils/Log.h>
+
+#include "include/SoftVideoEncoderOMXComponent.h"
+
+#include <hardware/gralloc.h>
+#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaDefs.h>
+
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferMapper.h>
+
+namespace android {
+
+SoftVideoEncoderOMXComponent::SoftVideoEncoderOMXComponent(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mGrallocModule(NULL) {
+}
+
+// static
+void SoftVideoEncoderOMXComponent::ConvertFlexYUVToPlanar(
+ uint8_t *dst, size_t dstStride, size_t dstVStride,
+ struct android_ycbcr *ycbcr, int32_t width, int32_t height) {
+ const uint8_t *src = (const uint8_t *)ycbcr->y;
+ const uint8_t *srcU = (const uint8_t *)ycbcr->cb;
+ const uint8_t *srcV = (const uint8_t *)ycbcr->cr;
+ uint8_t *dstU = dst + dstVStride * dstStride;
+ uint8_t *dstV = dstU + (dstVStride >> 1) * (dstStride >> 1);
+
+ for (size_t y = height; y > 0; --y) {
+ memcpy(dst, src, width);
+ dst += dstStride;
+ src += ycbcr->ystride;
+ }
+ if (ycbcr->cstride == ycbcr->ystride >> 1 && ycbcr->chroma_step == 1) {
+ // planar
+ for (size_t y = height >> 1; y > 0; --y) {
+ memcpy(dstU, srcU, width >> 1);
+ dstU += dstStride >> 1;
+ srcU += ycbcr->cstride;
+ memcpy(dstV, srcV, width >> 1);
+ dstV += dstStride >> 1;
+ srcV += ycbcr->cstride;
+ }
+ } else {
+ // arbitrary
+ for (size_t y = height >> 1; y > 0; --y) {
+ for (size_t x = width >> 1; x > 0; --x) {
+ *dstU++ = *srcU;
+ *dstV++ = *srcV;
+ srcU += ycbcr->chroma_step;
+ srcV += ycbcr->chroma_step;
+ }
+ dstU += (dstStride >> 1) - (width >> 1);
+ dstV += (dstStride >> 1) - (width >> 1);
+ srcU += ycbcr->cstride - (width >> 1) * ycbcr->chroma_step;
+ srcV += ycbcr->cstride - (width >> 1) * ycbcr->chroma_step;
+ }
+ }
+}
+
+// static
+void SoftVideoEncoderOMXComponent::ConvertYUV420SemiPlanarToYUV420Planar(
+ const uint8_t *inYVU, uint8_t* outYUV, int32_t width, int32_t height) {
+ // TODO: add support for stride
+ int32_t outYsize = width * height;
+ uint32_t *outY = (uint32_t *) outYUV;
+ uint16_t *outCb = (uint16_t *) (outYUV + outYsize);
+ uint16_t *outCr = (uint16_t *) (outYUV + outYsize + (outYsize >> 2));
+
+ /* Y copying */
+ memcpy(outY, inYVU, outYsize);
+
+ /* U & V copying */
+ // FIXME this only works if width is multiple of 4
+ uint32_t *inYVU_4 = (uint32_t *) (inYVU + outYsize);
+ for (int32_t i = height >> 1; i > 0; --i) {
+ for (int32_t j = width >> 2; j > 0; --j) {
+ uint32_t temp = *inYVU_4++;
+ uint32_t tempU = temp & 0xFF;
+ tempU = tempU | ((temp >> 8) & 0xFF00);
+
+ uint32_t tempV = (temp >> 8) & 0xFF;
+ tempV = tempV | ((temp >> 16) & 0xFF00);
+
+ *outCb++ = tempU;
+ *outCr++ = tempV;
+ }
+ }
+}
+
+// static
+void SoftVideoEncoderOMXComponent::ConvertRGB32ToPlanar(
+ uint8_t *dstY, size_t dstStride, size_t dstVStride,
+ const uint8_t *src, size_t width, size_t height, size_t srcStride,
+ bool bgr) {
+ CHECK((width & 1) == 0);
+ CHECK((height & 1) == 0);
+
+ uint8_t *dstU = dstY + dstStride * dstVStride;
+ uint8_t *dstV = dstU + (dstStride >> 1) * (dstVStride >> 1);
+
+#ifdef SURFACE_IS_BGR32
+ bgr = !bgr;
+#endif
+
+ const size_t redOffset = bgr ? 2 : 0;
+ const size_t greenOffset = 1;
+ const size_t blueOffset = bgr ? 0 : 2;
+
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ unsigned red = src[redOffset];
+ unsigned green = src[greenOffset];
+ unsigned blue = src[blueOffset];
+
+ // using ITU-R BT.601 conversion matrix
+ unsigned luma =
+ ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+
+ dstY[x] = luma;
+
+ if ((x & 1) == 0 && (y & 1) == 0) {
+ unsigned U =
+ ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+
+ unsigned V =
+ ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+
+ dstU[x >> 1] = U;
+ dstV[x >> 1] = V;
+ }
+ src += 4;
+ }
+
+ if ((y & 1) == 0) {
+ dstU += dstStride >> 1;
+ dstV += dstStride >> 1;
+ }
+
+ src += srcStride - 4 * width;
+ dstY += dstStride;
+ }
+}
+
+const uint8_t *SoftVideoEncoderOMXComponent::extractGraphicBuffer(
+ uint8_t *dst, size_t dstSize,
+ const uint8_t *src, size_t srcSize,
+ size_t width, size_t height) const {
+ size_t dstStride = width;
+ size_t dstVStride = height;
+
+ MetadataBufferType bufferType = *(MetadataBufferType *)src;
+ bool usingGraphicBuffer = bufferType == kMetadataBufferTypeGraphicBuffer;
+ if (!usingGraphicBuffer && bufferType != kMetadataBufferTypeGrallocSource) {
+ ALOGE("Unsupported metadata type (%d)", bufferType);
+ return NULL;
+ }
+
+ if (mGrallocModule == NULL) {
+ CHECK_EQ(0, hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
+ }
+
+ const gralloc_module_t *grmodule =
+ (const gralloc_module_t *)mGrallocModule;
+
+ buffer_handle_t handle;
+ int format;
+ size_t srcStride;
+ size_t srcVStride;
+ if (usingGraphicBuffer) {
+ if (srcSize < 4 + sizeof(GraphicBuffer *)) {
+ ALOGE("Metadata is too small (%zu vs %zu)", srcSize, 4 + sizeof(GraphicBuffer *));
+ return NULL;
+ }
+
+ GraphicBuffer *buffer = *(GraphicBuffer **)(src + 4);
+ handle = buffer->handle;
+ format = buffer->format;
+ srcStride = buffer->stride;
+ srcVStride = buffer->height;
+ // convert stride from pixels to bytes
+ if (format != HAL_PIXEL_FORMAT_YV12 &&
+ format != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ // TODO do we need to support other formats?
+ srcStride *= 4;
+ }
+ } else {
+ // TODO: remove this part. Check if anyone uses this.
+
+ if (srcSize < 4 + sizeof(buffer_handle_t)) {
+ ALOGE("Metadata is too small (%zu vs %zu)", srcSize, 4 + sizeof(buffer_handle_t));
+ return NULL;
+ }
+
+ handle = *(buffer_handle_t *)(src + 4);
+ // assume HAL_PIXEL_FORMAT_RGBA_8888
+ // there is no way to get the src stride without the graphic buffer
+ format = HAL_PIXEL_FORMAT_RGBA_8888;
+ srcStride = width * 4;
+ srcVStride = height;
+ }
+
+ size_t neededSize =
+ dstStride * dstVStride + (width >> 1)
+ + (dstStride >> 1) * ((dstVStride >> 1) + (height >> 1) - 1);
+ if (dstSize < neededSize) {
+ ALOGE("destination buffer is too small (%zu vs %zu)", dstSize, neededSize);
+ return NULL;
+ }
+
+ void *bits = NULL;
+ struct android_ycbcr ycbcr;
+ status_t res;
+ if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ res = grmodule->lock_ycbcr(
+ grmodule, handle,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_NEVER,
+ 0, 0, width, height, &ycbcr);
+ } else {
+ res = grmodule->lock(
+ grmodule, handle,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_NEVER,
+ 0, 0, width, height, &bits);
+ }
+ if (res != OK) {
+ ALOGE("Unable to lock image buffer %p for access", handle);
+ return NULL;
+ }
+
+ switch (format) {
+ case HAL_PIXEL_FORMAT_YV12: // YCrCb / YVU planar
+ // convert to flex YUV
+ ycbcr.y = bits;
+ ycbcr.cr = (uint8_t *)bits + srcStride * srcVStride;
+ ycbcr.cb = (uint8_t *)ycbcr.cr + (srcStride >> 1) * (srcVStride >> 1);
+ ycbcr.chroma_step = 1;
+ ycbcr.cstride = srcVStride >> 1;
+ ycbcr.ystride = srcVStride;
+ ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
+ break;
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP: // YCrCb / YVU semiplanar, NV21
+ // convert to flex YUV
+ ycbcr.y = bits;
+ ycbcr.cr = (uint8_t *)bits + srcStride * srcVStride;
+ ycbcr.cb = (uint8_t *)ycbcr.cr + 1;
+ ycbcr.chroma_step = 2;
+ ycbcr.cstride = srcVStride;
+ ycbcr.ystride = srcVStride;
+ ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
+ break;
+ case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
+ break;
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ ConvertRGB32ToPlanar(
+ dst, dstStride, dstVStride,
+ (const uint8_t *)bits, width, height, srcStride,
+ format == HAL_PIXEL_FORMAT_BGRA_8888);
+ break;
+ default:
+ ALOGE("Unsupported pixel format %#x", format);
+ dst = NULL;
+ break;
+ }
+
+ if (grmodule->unlock(grmodule, handle) != OK) {
+ ALOGE("Unable to unlock image buffer %p for access", handle);
+ }
+
+ return dst;
+}
+
+OMX_ERRORTYPE SoftVideoEncoderOMXComponent::getExtensionIndex(
+ const char *name, OMX_INDEXTYPE *index) {
+ if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers") ||
+ !strcmp(name, "OMX.google.android.index.storeGraphicBufferInMetaData")) {
+ *(int32_t*)index = kStoreMetaDataExtensionIndex;
+ return OMX_ErrorNone;
+ }
+ return SimpleSoftOMXComponent::getExtensionIndex(name, index);
+}
+
+} // namespace android
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index 1061c39..447b29e 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -11,8 +11,12 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax
+LOCAL_CFLAGS += -Werror
+
LOCAL_MODULE := omx_tests
LOCAL_MODULE_TAGS := tests
+LOCAL_32_BIT_ONLY := true
+
include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 4bee808..f4dfd6b 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "OMXHarness"
+#include <inttypes.h>
#include <utils/Log.h>
#include "OMXHarness.h"
@@ -25,6 +26,7 @@
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -241,7 +243,8 @@ private:
};
static sp<MediaExtractor> CreateExtractorFromURI(const char *uri) {
- sp<DataSource> source = DataSource::CreateFromURI(uri);
+ sp<DataSource> source =
+ DataSource::CreateFromURI(NULL /* httpService */, uri);
if (source == NULL) {
return NULL;
@@ -460,6 +463,7 @@ static const char *GetMimeFromComponentRole(const char *componentRole) {
{ "audio_decoder.aac", "audio/mp4a-latm" },
{ "audio_decoder.mp3", "audio/mpeg" },
{ "audio_decoder.vorbis", "audio/vorbis" },
+ { "audio_decoder.opus", "audio/opus" },
{ "audio_decoder.g711alaw", MEDIA_MIMETYPE_AUDIO_G711_ALAW },
{ "audio_decoder.g711mlaw", MEDIA_MIMETYPE_AUDIO_G711_MLAW },
};
@@ -492,6 +496,7 @@ static const char *GetURLForMime(const char *mime) {
{ "audio/mpeg",
"file:///sdcard/media_api/music/MP3_48KHz_128kbps_s_1_17_CBR.mp3" },
{ "audio/vorbis", NULL },
+ { "audio/opus", NULL },
{ "video/x-vnd.on2.vp8",
"file:///sdcard/media_api/video/big-buck-bunny_trailer.webm" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW, "file:///sdcard/M1F1-Alaw-AFsp.wav" },
@@ -711,11 +716,11 @@ status_t Harness::testSeek(
int64_t bufferTimeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &bufferTimeUs));
if (!CloseEnough(bufferTimeUs, actualSeekTimeUs)) {
- printf("\n * Attempted seeking to %lld us (%.2f secs)",
+ printf("\n * Attempted seeking to %" PRId64 " us (%.2f secs)",
requestedSeekTimeUs, requestedSeekTimeUs / 1E6);
- printf("\n * Nearest keyframe is at %lld us (%.2f secs)",
+ printf("\n * Nearest keyframe is at %" PRId64 " us (%.2f secs)",
actualSeekTimeUs, actualSeekTimeUs / 1E6);
- printf("\n * Returned buffer was at %lld us (%.2f secs)\n\n",
+ printf("\n * Returned buffer was at %" PRId64 " us (%.2f secs)\n\n",
bufferTimeUs, bufferTimeUs / 1E6);
buffer->release();
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index a6825eb..4bc67e8 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -124,7 +124,7 @@ ARTPAssembler::AssemblyStatus AAVCAssembler::addNALUnit(
}
void AAVCAssembler::addSingleNALUnit(const sp<ABuffer> &buffer) {
- ALOGV("addSingleNALUnit of size %d", buffer->size());
+ ALOGV("addSingleNALUnit of size %zu", buffer->size());
#if !LOG_NDEBUG
hexdump(buffer->data(), buffer->size());
#endif
@@ -191,7 +191,7 @@ ARTPAssembler::AssemblyStatus AAVCAssembler::addFragmentedNALUnit(
CHECK((indicator & 0x1f) == 28);
if (size < 2) {
- ALOGV("Ignoring malformed FU buffer (size = %d)", size);
+ ALOGV("Ignoring malformed FU buffer (size = %zu)", size);
queue->erase(queue->begin());
++mNextExpectedSeqNo;
@@ -225,7 +225,7 @@ ARTPAssembler::AssemblyStatus AAVCAssembler::addFragmentedNALUnit(
} else {
List<sp<ABuffer> >::iterator it = ++queue->begin();
while (it != queue->end()) {
- ALOGV("sequence length %d", totalCount);
+ ALOGV("sequence length %zu", totalCount);
const sp<ABuffer> &buffer = *it;
@@ -294,7 +294,7 @@ ARTPAssembler::AssemblyStatus AAVCAssembler::addFragmentedNALUnit(
for (size_t i = 0; i < totalCount; ++i) {
const sp<ABuffer> &buffer = *it;
- ALOGV("piece #%d/%d", i + 1, totalCount);
+ ALOGV("piece #%zu/%zu", i + 1, totalCount);
#if !LOG_NDEBUG
hexdump(buffer->data(), buffer->size());
#endif
@@ -317,7 +317,7 @@ ARTPAssembler::AssemblyStatus AAVCAssembler::addFragmentedNALUnit(
void AAVCAssembler::submitAccessUnit() {
CHECK(!mNALUnits.empty());
- ALOGV("Access unit complete (%d nal units)", mNALUnits.size());
+ ALOGV("Access unit complete (%zu nal units)", mNALUnits.size());
size_t totalSize = 0;
for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
index 4c9bf5b..dca5c89 100644
--- a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
@@ -34,7 +34,9 @@
namespace android {
AMPEG2TSAssembler::AMPEG2TSAssembler(
- const sp<AMessage> &notify, const char *desc, const AString &params)
+ const sp<AMessage> &notify,
+ const char * /* desc */,
+ const AString & /* params */)
: mNotifyMsg(notify),
mNextExpectedSeqNoValid(false),
mNextExpectedSeqNo(0) {
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index eefceba..7eb6542 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -249,11 +249,15 @@ ARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::addPacket(
mPackets.push_back(buffer);
} else {
// hexdump(buffer->data(), buffer->size());
+ if (buffer->size() < 2) {
+ return MALFORMED_PACKET;
+ }
- CHECK_GE(buffer->size(), 2u);
unsigned AU_headers_length = U16_AT(buffer->data()); // in bits
- CHECK_GE(buffer->size(), 2 + (AU_headers_length + 7) / 8);
+ if (buffer->size() < 2 + (AU_headers_length + 7) / 8) {
+ return MALFORMED_PACKET;
+ }
List<AUHeader> headers;
@@ -342,7 +346,9 @@ ARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::addPacket(
it != headers.end(); ++it) {
const AUHeader &header = *it;
- CHECK_LE(offset + header.mSize, buffer->size());
+ if (buffer->size() < offset + header.mSize) {
+ return MALFORMED_PACKET;
+ }
sp<ABuffer> accessUnit = new ABuffer(header.mSize);
memcpy(accessUnit->data(), buffer->data() + offset, header.mSize);
@@ -353,7 +359,10 @@ ARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::addPacket(
mPackets.push_back(accessUnit);
}
- CHECK_EQ(offset, buffer->size());
+ if (offset != buffer->size()) {
+ ALOGW("potentially malformed packet (offset %d, size %d)",
+ offset, buffer->size());
+ }
}
queue->erase(queue->begin());
@@ -365,7 +374,7 @@ ARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::addPacket(
void AMPEG4ElementaryAssembler::submitAccessUnit() {
CHECK(!mPackets.empty());
- ALOGV("Access unit complete (%d nal units)", mPackets.size());
+ ALOGV("Access unit complete (%zu nal units)", mPackets.size());
sp<ABuffer> accessUnit;
@@ -400,6 +409,7 @@ ARTPAssembler::AssemblyStatus AMPEG4ElementaryAssembler::assembleMore(
const sp<ARTPSource> &source) {
AssemblyStatus status = addPacket(source);
if (status == MALFORMED_PACKET) {
+ ALOGI("access unit is damaged");
mAccessUnitDamaged = true;
}
return status;
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 462c384..09f52bc 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -23,7 +23,7 @@
#include "ARawAudioAssembler.h"
#include "ASessionDescription.h"
-#include "avc_utils.h"
+#include "include/avc_utils.h"
#include <ctype.h>
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index af369b5..372fbe9 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -563,7 +563,7 @@ status_t ARTPConnection::parseRTCP(StreamInfo *s, const sp<ABuffer> &buffer) {
default:
{
- ALOGW("Unknown RTCP packet type %u of size %d",
+ ALOGW("Unknown RTCP packet type %u of size %zu",
(unsigned)data[1], headerLength);
break;
}
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 0d07043..793d116 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -114,7 +114,7 @@ bool ARTPWriter::reachedEOS() {
return (mFlags & kFlagEOS) != 0;
}
-status_t ARTPWriter::start(MetaData *params) {
+status_t ARTPWriter::start(MetaData * /* params */) {
Mutex::Autolock autoLock(mLock);
if (mFlags & kFlagStarted) {
return INVALID_OPERATION;
@@ -277,7 +277,7 @@ void ARTPWriter::onRead(const sp<AMessage> &msg) {
}
if (mediaBuf->range_length() > 0) {
- ALOGV("read buffer of size %d", mediaBuf->range_length());
+ ALOGV("read buffer of size %zu", mediaBuf->range_length());
if (mMode == H264) {
StripStartcode(mediaBuf);
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index efde7a9..f25539c 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -33,7 +33,7 @@
#include <openssl/md5.h>
#include <sys/socket.h>
-#include "HTTPBase.h"
+#include "include/HTTPBase.h"
namespace android {
@@ -239,7 +239,7 @@ void ARTSPConnection::onConnect(const sp<AMessage> &msg) {
// right here, since we currently have no way of asking the user
// for this information.
- ALOGE("Malformed rtsp url %s", url.c_str());
+ ALOGE("Malformed rtsp url %s", uriDebugString(url).c_str());
reply->setInt32("result", ERROR_MALFORMED);
reply->post();
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.cpp b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
index 0da5dd2..167f7a4 100644
--- a/media/libstagefright/rtsp/ARawAudioAssembler.cpp
+++ b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
@@ -34,7 +34,9 @@
namespace android {
ARawAudioAssembler::ARawAudioAssembler(
- const sp<AMessage> &notify, const char *desc, const AString &params)
+ const sp<AMessage> &notify,
+ const char * /* desc */,
+ const AString & /* params */)
: mNotifyMsg(notify),
mNextExpectedSeqNoValid(false),
mNextExpectedSeqNo(0) {
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index a9b3330..98498e9 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -319,6 +319,11 @@ bool ASessionDescription::parseNTPRange(
s = end + 1; // skip the dash.
+ if (*s == '\0') {
+ *npt2 = FLT_MAX; // open ended.
+ return true;
+ }
+
if (!strncmp("now", s, 3)) {
return false; // no absolute end time available
}
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index e77c69c..d60dc2f 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -20,7 +20,7 @@ LOCAL_SRC_FILES:= \
SDPLoader.cpp \
LOCAL_C_INCLUDES:= \
- $(TOP)/frameworks/av/media/libstagefright/include \
+ $(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/openssl/include
@@ -30,6 +30,10 @@ ifeq ($(TARGET_ARCH),arm)
LOCAL_CFLAGS += -Wno-psabi
endif
+LOCAL_CFLAGS += -Werror
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
include $(BUILD_STATIC_LIBRARY)
################################################################################
@@ -55,4 +59,6 @@ LOCAL_MODULE_TAGS := optional
LOCAL_MODULE:= rtp_test
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
# include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index cd77aa0..423a420 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -19,7 +19,11 @@
#define MY_HANDLER_H_
//#define LOG_NDEBUG 0
+
+#ifndef LOG_TAG
#define LOG_TAG "MyHandler"
+#endif
+
#include <utils/Log.h>
#include "APacketSource.h"
@@ -42,6 +46,12 @@
#include "HTTPBase.h"
+#if LOG_NDEBUG
+#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
+#else
+#define UNUSED_UNLESS_VERBOSE(x)
+#endif
+
// If no access units are received within 5 secs, assume that the rtp
// stream has ended and signal end of stream.
static int64_t kAccessUnitTimeoutUs = 10000000ll;
@@ -149,7 +159,7 @@ struct MyHandler : public AHandler {
mSessionURL.append(StringPrintf("%u", port));
mSessionURL.append(path);
- ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
+ ALOGV("rewritten session url: '%s'", mSessionURL.c_str());
}
mSessionHost = host;
@@ -178,7 +188,7 @@ struct MyHandler : public AHandler {
mConn->connect(mOriginalSessionURL.c_str(), reply);
}
- AString getControlURL(sp<ASessionDescription> desc) {
+ AString getControlURL() {
AString sessionLevelControlURL;
if (mSessionDesc->findAttribute(
0,
@@ -244,7 +254,9 @@ struct MyHandler : public AHandler {
static void addSDES(int s, const sp<ABuffer> &buffer) {
struct sockaddr_in addr;
socklen_t addrSize = sizeof(addr);
- CHECK_EQ(0, getsockname(s, (sockaddr *)&addr, &addrSize));
+ if (getsockname(s, (sockaddr *)&addr, &addrSize) != 0) {
+ inet_aton("0.0.0.0", &(addr.sin_addr));
+ }
uint8_t *data = buffer->data() + buffer->size();
data[0] = 0x80 | 1;
@@ -478,21 +490,32 @@ struct MyHandler : public AHandler {
sp<ARTSPResponse> response =
static_cast<ARTSPResponse *>(obj.get());
- if (response->mStatusCode == 302) {
+ if (response->mStatusCode == 301 || response->mStatusCode == 302) {
ssize_t i = response->mHeaders.indexOfKey("location");
CHECK_GE(i, 0);
- mSessionURL = response->mHeaders.valueAt(i);
-
- AString request;
- request = "DESCRIBE ";
- request.append(mSessionURL);
- request.append(" RTSP/1.0\r\n");
- request.append("Accept: application/sdp\r\n");
- request.append("\r\n");
+ mOriginalSessionURL = response->mHeaders.valueAt(i);
+ mSessionURL = mOriginalSessionURL;
+
+ // Strip any authentication info from the session url, we don't
+ // want to transmit user/pass in cleartext.
+ AString host, path, user, pass;
+ unsigned port;
+ if (ARTSPConnection::ParseURL(
+ mSessionURL.c_str(), &host, &port, &path, &user, &pass)
+ && user.size() > 0) {
+ mSessionURL.clear();
+ mSessionURL.append("rtsp://");
+ mSessionURL.append(host);
+ mSessionURL.append(":");
+ mSessionURL.append(StringPrintf("%u", port));
+ mSessionURL.append(path);
+
+ ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
+ }
- sp<AMessage> reply = new AMessage('desc', id());
- mConn->sendRequest(request.c_str(), reply);
+ sp<AMessage> reply = new AMessage('conn', id());
+ mConn->connect(mOriginalSessionURL.c_str(), reply);
break;
}
@@ -545,7 +568,7 @@ struct MyHandler : public AHandler {
mBaseURL = tmp;
}
- mControlURL = getControlURL(mSessionDesc);
+ mControlURL = getControlURL();
if (mSessionDesc->countTracks() < 2) {
// There's no actual tracks in this session.
@@ -591,7 +614,7 @@ struct MyHandler : public AHandler {
mSeekable = !isLiveStream(mSessionDesc);
- mControlURL = getControlURL(mSessionDesc);
+ mControlURL = getControlURL();
if (mSessionDesc->countTracks() < 2) {
// There's no actual tracks in this session.
@@ -1805,6 +1828,8 @@ private:
bool addMediaTimestamp(
int32_t trackIndex, const TrackInfo *track,
const sp<ABuffer> &accessUnit) {
+ UNUSED_UNLESS_VERBOSE(trackIndex);
+
uint32_t rtpTime;
CHECK(accessUnit->meta()->findInt32(
"rtp-time", (int32_t *)&rtpTime));
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index ed3fa7e..424badf 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -18,34 +18,30 @@
#define LOG_TAG "SDPLoader"
#include <utils/Log.h>
-#include "SDPLoader.h"
+#include "include/SDPLoader.h"
#include "ASessionDescription.h"
-#include "HTTPBase.h"
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/Utils.h>
#define DEFAULT_SDP_SIZE 100000
namespace android {
-SDPLoader::SDPLoader(const sp<AMessage> &notify, uint32_t flags, bool uidValid, uid_t uid)
+SDPLoader::SDPLoader(
+ const sp<AMessage> &notify,
+ uint32_t flags,
+ const sp<IMediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
- mUIDValid(uidValid),
- mUID(uid),
mNetLooper(new ALooper),
mCancelled(false),
- mHTTPDataSource(
- HTTPBase::Create(
- (mFlags & kFlagIncognito)
- ? HTTPBase::kFlagIncognito
- : 0)) {
- if (mUIDValid) {
- mHTTPDataSource->setUID(mUID);
- }
-
+ mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())) {
mNetLooper->setName("sdp net");
mNetLooper->start(false /* runOnCallingThread */,
false /* canCallJava */,
@@ -94,11 +90,7 @@ void SDPLoader::onLoad(const sp<AMessage> &msg) {
KeyedVector<String8, String8> *headers = NULL;
msg->findPointer("headers", (void **)&headers);
- if (!(mFlags & kFlagIncognito)) {
- ALOGI("onLoad '%s'", url.c_str());
- } else {
- ALOGI("onLoad <URL suppressed>");
- }
+ ALOGV("onLoad %s", uriDebugString(url, mFlags & kFlagIncognito).c_str());
if (!mCancelled) {
err = mHTTPDataSource->connect(url.c_str(), headers);
@@ -130,7 +122,7 @@ void SDPLoader::onLoad(const sp<AMessage> &msg) {
ssize_t readSize = mHTTPDataSource->readAt(0, buffer->data(), sdpSize);
if (readSize < 0) {
- ALOGE("Failed to read SDP, error code = %ld", readSize);
+ ALOGE("Failed to read SDP, error code = %zu", readSize);
err = UNKNOWN_ERROR;
} else {
desc = new ASessionDescription;
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 06ce16b..99b480ad 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -9,7 +9,7 @@ LOCAL_MODULE := SurfaceMediaSource_test
LOCAL_MODULE_TAGS := tests
LOCAL_SRC_FILES := \
- SurfaceMediaSource_test.cpp \
+ SurfaceMediaSource_test.cpp \
DummyRecorder.cpp \
LOCAL_SHARED_LIBRARIES := \
@@ -33,18 +33,55 @@ LOCAL_STATIC_LIBRARIES := \
libgtest_main \
LOCAL_C_INCLUDES := \
- bionic \
- bionic/libstdc++/include \
- external/gtest/include \
- external/stlport/stlport \
+ bionic \
+ bionic/libstdc++/include \
+ external/gtest/include \
+ external/stlport/stlport \
frameworks/av/media/libstagefright \
frameworks/av/media/libstagefright/include \
$(TOP)/frameworks/native/include/media/openmax \
+LOCAL_32_BIT_ONLY := true
+
include $(BUILD_EXECUTABLE)
endif
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := Utils_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ Utils_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ liblog \
+ libmedia \
+ libstagefright \
+ libstagefright_foundation \
+ libstagefright_omx \
+ libstlport \
+
+LOCAL_STATIC_LIBRARIES := \
+ libgtest \
+ libgtest_main \
+
+LOCAL_C_INCLUDES := \
+ bionic \
+ bionic/libstdc++/include \
+ external/gtest/include \
+ external/stlport/stlport \
+ frameworks/av/include \
+ frameworks/av/media/libstagefright \
+ frameworks/av/media/libstagefright/include \
+ $(TOP)/frameworks/native/include/media/openmax \
+
+include $(BUILD_EXECUTABLE)
+
# Include subdirectory makefiles
# ============================================================
diff --git a/media/libstagefright/tests/DummyRecorder.cpp b/media/libstagefright/tests/DummyRecorder.cpp
index ac37b28..8f17088 100644
--- a/media/libstagefright/tests/DummyRecorder.cpp
+++ b/media/libstagefright/tests/DummyRecorder.cpp
@@ -61,7 +61,7 @@ status_t DummyRecorder::stop() {
mSource->stop();
void *dummy;
pthread_join(mThread, &dummy);
- status_t err = (status_t) dummy;
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
ALOGV("Ending the reading thread");
return err;
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 49ffcd6..fd889f9 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -35,7 +35,6 @@
#include <gui/SurfaceComposerClient.h>
#include <binder/ProcessState.h>
-#include <ui/FramebufferNativeWindow.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaBufferGroup.h>
@@ -110,7 +109,7 @@ protected:
} else {
ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
sp<IGraphicBufferProducer> sms = (new SurfaceMediaSource(
- getSurfaceWidth(), getSurfaceHeight()))->getBufferQueue();
+ getSurfaceWidth(), getSurfaceHeight()))->getProducer();
sp<Surface> stc = new Surface(sms);
sp<ANativeWindow> window = stc;
@@ -361,9 +360,7 @@ protected:
virtual void SetUp() {
android::ProcessState::self()->startThreadPool();
mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-
- // Manual cast is required to avoid constructor ambiguity
- mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+ mSTC = new Surface(mSMS->getProducer());
mANW = mSTC;
}
@@ -398,7 +395,7 @@ protected:
ALOGV("SMS-GLTest::SetUp()");
android::ProcessState::self()->startThreadPool();
mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
- mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+ mSTC = new Surface(mSMS->getProducer());
mANW = mSTC;
// Doing the setup related to the GL Side
@@ -527,7 +524,8 @@ void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) {
}
// Dequeuing and queuing the buffer without really filling it in.
-void SurfaceMediaSourceTest::oneBufferPassNoFill(int width, int height ) {
+void SurfaceMediaSourceTest::oneBufferPassNoFill(
+ int /* width */, int /* height */) {
ANativeWindowBuffer* anb;
ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(), &anb));
ASSERT_TRUE(anb != NULL);
@@ -746,9 +744,8 @@ TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaS
CHECK(fd >= 0);
sp<MediaRecorder> mr = SurfaceMediaSourceGLTest::setUpMediaRecorder(fd,
- VIDEO_SOURCE_GRALLOC_BUFFER,
- OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth,
- mYuvTexHeight, 30);
+ VIDEO_SOURCE_SURFACE, OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264,
+ mYuvTexWidth, mYuvTexHeight, 30);
// get the reference to the surfacemediasource living in
// mediaserver that is created by stagefrightrecorder
sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
@@ -783,7 +780,7 @@ TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWriter) {
ALOGV("Verify creating a surface w/ right config + dummy writer*********");
mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
- mSTC = new Surface(static_cast<sp<IGraphicBufferProducer> >( mSMS->getBufferQueue()));
+ mSTC = new Surface(mSMS->getProducer());
mANW = mSTC;
DummyRecorder writer(mSMS);
@@ -880,7 +877,7 @@ TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaSameImageEachBufNpotWrite) {
}
CHECK(fd >= 0);
- sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+ sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
// get the reference to the surfacemediasource living in
@@ -923,7 +920,7 @@ TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaDiffImageEachBufNpotWrite) {
}
CHECK(fd >= 0);
- sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_GRALLOC_BUFFER,
+ sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
// get the reference to the surfacemediasource living in
diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/tests/Utils_test.cpp
new file mode 100644
index 0000000..f2825dd
--- /dev/null
+++ b/media/libstagefright/tests/Utils_test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Utils_test"
+
+#include <gtest/gtest.h>
+#include <utils/String8.h>
+#include <utils/Errors.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+class UtilsTest : public ::testing::Test {
+};
+
+TEST_F(UtilsTest, TestFourCC) {
+ ASSERT_EQ(FOURCC('s', 't', 'm' , 'u'), 'stmu');
+}
+
+TEST_F(UtilsTest, TestMathTemplates) {
+ ASSERT_EQ(divRound(-10, -4), 3);
+ ASSERT_EQ(divRound(-11, -4), 3);
+ ASSERT_EQ(divRound(-12, -4), 3);
+ ASSERT_EQ(divRound(-13, -4), 3);
+ ASSERT_EQ(divRound(-14, -4), 4);
+
+ ASSERT_EQ(divRound(10, -4), -3);
+ ASSERT_EQ(divRound(11, -4), -3);
+ ASSERT_EQ(divRound(12, -4), -3);
+ ASSERT_EQ(divRound(13, -4), -3);
+ ASSERT_EQ(divRound(14, -4), -4);
+
+ ASSERT_EQ(divRound(-10, 4), -3);
+ ASSERT_EQ(divRound(-11, 4), -3);
+ ASSERT_EQ(divRound(-12, 4), -3);
+ ASSERT_EQ(divRound(-13, 4), -3);
+ ASSERT_EQ(divRound(-14, 4), -4);
+
+ ASSERT_EQ(divRound(10, 4), 3);
+ ASSERT_EQ(divRound(11, 4), 3);
+ ASSERT_EQ(divRound(12, 4), 3);
+ ASSERT_EQ(divRound(13, 4), 3);
+ ASSERT_EQ(divRound(14, 4), 4);
+
+ ASSERT_EQ(divUp(-11, -4), 3);
+ ASSERT_EQ(divUp(-12, -4), 3);
+ ASSERT_EQ(divUp(-13, -4), 4);
+
+ ASSERT_EQ(divUp(11, -4), -2);
+ ASSERT_EQ(divUp(12, -4), -3);
+ ASSERT_EQ(divUp(13, -4), -3);
+
+ ASSERT_EQ(divUp(-11, 4), -2);
+ ASSERT_EQ(divUp(-12, 4), -3);
+ ASSERT_EQ(divUp(-13, 4), -3);
+
+ ASSERT_EQ(divUp(11, 4), 3);
+ ASSERT_EQ(divUp(12, 4), 3);
+ ASSERT_EQ(divUp(13, 4), 4);
+
+ ASSERT_EQ(abs(5L), 5L);
+ ASSERT_EQ(abs(-25), 25);
+
+ ASSERT_EQ(min(5.6f, 6.0f), 5.6f);
+ ASSERT_EQ(min(6.0f, 5.6f), 5.6f);
+ ASSERT_EQ(min(-4.3, 8.6), -4.3);
+ ASSERT_EQ(min(8.6, -4.3), -4.3);
+
+ ASSERT_EQ(max(5.6f, 6.0f), 6.0f);
+ ASSERT_EQ(max(6.0f, 5.6f), 6.0f);
+ ASSERT_EQ(max(-4.3, 8.6), 8.6);
+ ASSERT_EQ(max(8.6, -4.3), 8.6);
+
+ ASSERT_EQ(periodicError(124, 100), 24);
+ ASSERT_EQ(periodicError(288, 100), 12);
+ ASSERT_EQ(periodicError(-345, 100), 45);
+ ASSERT_EQ(periodicError(-493, 100), 7);
+ ASSERT_EQ(periodicError(-550, 100), 50);
+ ASSERT_EQ(periodicError(-600, 100), 0);
+}
+
+} // namespace android
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index f099bbd..6a8b9fc 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -9,7 +9,8 @@ LOCAL_SRC_FILES:= \
TimedTextSRTSource.cpp \
TimedTextPlayer.cpp
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror
+
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/av/media/libstagefright
diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp
index 12fd7f4..71aa21e 100644
--- a/media/libstagefright/timedtext/TimedTextDriver.cpp
+++ b/media/libstagefright/timedtext/TimedTextDriver.cpp
@@ -20,6 +20,7 @@
#include <binder/IPCThreadState.h>
+#include <media/IMediaHTTPService.h>
#include <media/mediaplayer.h>
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/DataSource.h>
@@ -40,9 +41,11 @@
namespace android {
TimedTextDriver::TimedTextDriver(
- const wp<MediaPlayerBase> &listener)
+ const wp<MediaPlayerBase> &listener,
+ const sp<IMediaHTTPService> &httpService)
: mLooper(new ALooper),
mListener(listener),
+ mHTTPService(httpService),
mState(UNINITIALIZED),
mCurrentTrackIndex(UINT_MAX) {
mLooper->setName("TimedTextDriver");
@@ -207,7 +210,7 @@ status_t TimedTextDriver::addOutOfBandTextSource(
}
sp<DataSource> dataSource =
- DataSource::CreateFromURI(uri);
+ DataSource::CreateFromURI(mHTTPService, uri);
return createOutOfBandTextSource(trackIndex, mimeType, dataSource);
}
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index 9fb0afe..a070487 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "TimedTextPlayer"
#include <utils/Log.h>
+#include <inttypes.h>
#include <limits.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -271,7 +272,7 @@ int64_t TimedTextPlayer::delayUsFromCurrentTime(int64_t fireTimeUs) {
sp<MediaPlayerBase> listener = mListener.promote();
if (listener == NULL) {
// TODO: it may be better to return kInvalidTimeUs
- ALOGE("%s: Listener is NULL. (fireTimeUs = %lld)",
+ ALOGE("%s: Listener is NULL. (fireTimeUs = %" PRId64" )",
__FUNCTION__, fireTimeUs);
return 0;
}
diff --git a/media/libstagefright/timedtext/TimedTextSource.h b/media/libstagefright/timedtext/TimedTextSource.h
index 756cc31..8c1c1cd 100644
--- a/media/libstagefright/timedtext/TimedTextSource.h
+++ b/media/libstagefright/timedtext/TimedTextSource.h
@@ -47,7 +47,7 @@ class TimedTextSource : public RefBase {
int64_t *endTimeUs,
Parcel *parcel,
const MediaSource::ReadOptions *options = NULL) = 0;
- virtual status_t extractGlobalDescriptions(Parcel *parcel) {
+ virtual status_t extractGlobalDescriptions(Parcel * /* parcel */) {
return INVALID_OPERATION;
}
virtual sp<MetaData> getFormat();
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
index a5e7ba2..9a9fde2 100644
--- a/media/libstagefright/timedtext/test/Android.mk
+++ b/media/libstagefright/timedtext/test/Android.mk
@@ -2,7 +2,6 @@ LOCAL_PATH:= $(call my-dir)
# ================================================================
# Unit tests for libstagefright_timedtext
-# See also /development/testrunner/test_defs.xml
# ================================================================
# ================================================================
@@ -18,10 +17,13 @@ LOCAL_SRC_FILES := TimedTextSRTSource_test.cpp
LOCAL_C_INCLUDES := \
$(TOP)/external/expat/lib \
- $(TOP)/frameworks/base/media/libstagefright/timedtext
+ $(TOP)/frameworks/av/media/libstagefright/timedtext
LOCAL_SHARED_LIBRARIES := \
+ libbinder \
libexpat \
- libstagefright
+ libstagefright \
+ libstagefright_foundation \
+ libutils
include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/webm/Android.mk b/media/libstagefright/webm/Android.mk
new file mode 100644
index 0000000..7081463
--- /dev/null
+++ b/media/libstagefright/webm/Android.mk
@@ -0,0 +1,23 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_CPPFLAGS += -D__STDINT_LIMITS \
+ -Werror
+
+LOCAL_SRC_FILES:= EbmlUtil.cpp \
+ WebmElement.cpp \
+ WebmFrame.cpp \
+ WebmFrameThread.cpp \
+ WebmWriter.cpp
+
+
+LOCAL_C_INCLUDES += $(TOP)/frameworks/av/include
+
+LOCAL_SHARED_LIBRARIES += libstagefright_foundation \
+ libstagefright \
+ libutils \
+ liblog
+
+LOCAL_MODULE:= libstagefright_webm
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/webm/EbmlUtil.cpp b/media/libstagefright/webm/EbmlUtil.cpp
new file mode 100644
index 0000000..449fec6
--- /dev/null
+++ b/media/libstagefright/webm/EbmlUtil.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+namespace {
+
+// Table for Seal's algorithm for Number of Trailing Zeros. Hacker's Delight
+// online, Figure 5-18 (http://www.hackersdelight.org/revisions.pdf)
+// The entries whose value is -1 are never referenced.
+int NTZ_TABLE[] = {
+ 32, 0, 1, 12, 2, 6, -1, 13, 3, -1, 7, -1, -1, -1, -1, 14,
+ 10, 4, -1, -1, 8, -1, -1, 25, -1, -1, -1, -1, -1, 21, 27, 15,
+ 31, 11, 5, -1, -1, -1, -1, -1, 9, -1, -1, 24, -1, -1, 20, 26,
+ 30, -1, -1, -1, -1, 23, -1, 19, 29, -1, 22, 18, 28, 17, 16, -1
+};
+
+int numberOfTrailingZeros32(int32_t i) {
+ uint32_t u = (i & -i) * 0x0450FBAF;
+ return NTZ_TABLE[(u) >> 26];
+}
+
+uint64_t highestOneBit(uint64_t n) {
+ n |= (n >> 1);
+ n |= (n >> 2);
+ n |= (n >> 4);
+ n |= (n >> 8);
+ n |= (n >> 16);
+ n |= (n >> 32);
+ return n - (n >> 1);
+}
+
+uint64_t _powerOf2(uint64_t u) {
+ uint64_t powerOf2 = highestOneBit(u);
+ return powerOf2 ? powerOf2 : 1;
+}
+
+// Based on Long.numberOfTrailingZeros in Long.java
+int numberOfTrailingZeros(uint64_t u) {
+ int32_t low = u;
+ return low !=0 ? numberOfTrailingZeros32(low)
+ : 32 + numberOfTrailingZeros32((int32_t) (u >> 32));
+}
+}
+
+namespace webm {
+
+// Encode the id and/or size of an EBML element bytes by setting a leading length descriptor bit:
+//
+// 1xxxxxxx - 1-byte values
+// 01xxxxxx xxxxxxxx -
+// 001xxxxx xxxxxxxx xxxxxxxx -
+// 0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx - ...
+// 00001xxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 000001xx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 0000001x xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 00000001 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - 8-byte values
+//
+// This function uses the least the number of bytes possible.
+uint64_t encodeUnsigned(uint64_t u) {
+ uint64_t powerOf2 = _powerOf2(u);
+ if (u + 1 == powerOf2 << 1)
+ powerOf2 <<= 1;
+ int shiftWidth = (7 + numberOfTrailingZeros(powerOf2)) / 7 * 7;
+ long lengthDescriptor = 1 << shiftWidth;
+ return lengthDescriptor | u;
+}
+
+// Like above but pads the input value with leading zeros up to the specified width. The length
+// descriptor is calculated based on width.
+uint64_t encodeUnsigned(uint64_t u, int width) {
+ int shiftWidth = 7 * width;
+ uint64_t lengthDescriptor = 1;
+ lengthDescriptor <<= shiftWidth;
+ return lengthDescriptor | u;
+}
+
+// Calculate the length of an EBML coded id or size from its length descriptor.
+int sizeOf(uint64_t u) {
+ uint64_t powerOf2 = _powerOf2(u);
+ int unsignedLength = numberOfTrailingZeros(powerOf2) / 8 + 1;
+ return unsignedLength;
+}
+
+// Serialize an EBML coded id or size in big-endian order.
+int serializeCodedUnsigned(uint64_t u, uint8_t* bary) {
+ int unsignedLength = sizeOf(u);
+ for (int i = unsignedLength - 1; i >= 0; i--) {
+ bary[i] = u & 0xff;
+ u >>= 8;
+ }
+ return unsignedLength;
+}
+
+}
diff --git a/media/libstagefright/webm/EbmlUtil.h b/media/libstagefright/webm/EbmlUtil.h
new file mode 100644
index 0000000..eb9c37c
--- /dev/null
+++ b/media/libstagefright/webm/EbmlUtil.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EBMLUTIL_H_
+#define EBMLUTIL_H_
+
+#include <stdint.h>
+
+namespace webm {
+
+// Encode the id and/or size of an EBML element bytes by setting a leading length descriptor bit:
+//
+// 1xxxxxxx - 1-byte values
+// 01xxxxxx xxxxxxxx -
+// 001xxxxx xxxxxxxx xxxxxxxx -
+// 0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx - ...
+// 00001xxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 000001xx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 0000001x xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx -
+// 00000001 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - 8-byte values
+//
+// This function uses the least the number of bytes possible.
+uint64_t encodeUnsigned(uint64_t u);
+
+// Like above but pads the input value with leading zeros up to the specified width. The length
+// descriptor is calculated based on width.
+uint64_t encodeUnsigned(uint64_t u, int width);
+
+// Serialize an EBML coded id or size in big-endian order.
+int serializeCodedUnsigned(uint64_t u, uint8_t* bary);
+
+// Calculate the length of an EBML coded id or size from its length descriptor.
+int sizeOf(uint64_t u);
+
+}
+
+#endif /* EBMLUTIL_H_ */
diff --git a/media/libstagefright/webm/LinkedBlockingQueue.h b/media/libstagefright/webm/LinkedBlockingQueue.h
new file mode 100644
index 0000000..0b6a9a1
--- /dev/null
+++ b/media/libstagefright/webm/LinkedBlockingQueue.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LINKEDBLOCKINGQUEUE_H_
+#define LINKEDBLOCKINGQUEUE_H_
+
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+template<typename T>
+class LinkedBlockingQueue {
+ List<T> mList;
+ Mutex mLock;
+ Condition mContentAvailableCondition;
+
+ T front(bool remove) {
+ Mutex::Autolock autolock(mLock);
+ while (mList.empty()) {
+ mContentAvailableCondition.wait(mLock);
+ }
+ T e = *(mList.begin());
+ if (remove) {
+ mList.erase(mList.begin());
+ }
+ return e;
+ }
+
+ DISALLOW_EVIL_CONSTRUCTORS(LinkedBlockingQueue);
+
+public:
+ LinkedBlockingQueue() {
+ }
+
+ ~LinkedBlockingQueue() {
+ }
+
+ bool empty() {
+ Mutex::Autolock autolock(mLock);
+ return mList.empty();
+ }
+
+ void clear() {
+ Mutex::Autolock autolock(mLock);
+ mList.clear();
+ }
+
+ T peek() {
+ return front(false);
+ }
+
+ T take() {
+ return front(true);
+ }
+
+ void push(T e) {
+ Mutex::Autolock autolock(mLock);
+ mList.push_back(e);
+ mContentAvailableCondition.signal();
+ }
+};
+
+} /* namespace android */
+#endif /* LINKEDBLOCKINGQUEUE_H_ */
diff --git a/media/libstagefright/webm/WebmConstants.h b/media/libstagefright/webm/WebmConstants.h
new file mode 100644
index 0000000..c53f458
--- /dev/null
+++ b/media/libstagefright/webm/WebmConstants.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMCONSTANTS_H_
+#define WEBMCONSTANTS_H_
+
+#include <stdint.h>
+
+namespace webm {
+
+const int kMinEbmlVoidSize = 2;
+const int64_t kMaxMetaSeekSize = 64;
+const int64_t kMkvUnknownLength = 0x01ffffffffffffffl;
+
+// EBML element id's from http://matroska.org/technical/specs/index.html
+enum Mkv {
+ kMkvEbml = 0x1A45DFA3,
+ kMkvEbmlVersion = 0x4286,
+ kMkvEbmlReadVersion = 0x42F7,
+ kMkvEbmlMaxIdlength = 0x42F2,
+ kMkvEbmlMaxSizeLength = 0x42F3,
+ kMkvDocType = 0x4282,
+ kMkvDocTypeVersion = 0x4287,
+ kMkvDocTypeReadVersion = 0x4285,
+ kMkvVoid = 0xEC,
+ kMkvSignatureSlot = 0x1B538667,
+ kMkvSignatureAlgo = 0x7E8A,
+ kMkvSignatureHash = 0x7E9A,
+ kMkvSignaturePublicKey = 0x7EA5,
+ kMkvSignature = 0x7EB5,
+ kMkvSignatureElements = 0x7E5B,
+ kMkvSignatureElementList = 0x7E7B,
+ kMkvSignedElement = 0x6532,
+ kMkvSegment = 0x18538067,
+ kMkvSeekHead = 0x114D9B74,
+ kMkvSeek = 0x4DBB,
+ kMkvSeekId = 0x53AB,
+ kMkvSeekPosition = 0x53AC,
+ kMkvInfo = 0x1549A966,
+ kMkvTimecodeScale = 0x2AD7B1,
+ kMkvSegmentDuration = 0x4489,
+ kMkvDateUtc = 0x4461,
+ kMkvMuxingApp = 0x4D80,
+ kMkvWritingApp = 0x5741,
+ kMkvCluster = 0x1F43B675,
+ kMkvTimecode = 0xE7,
+ kMkvPrevSize = 0xAB,
+ kMkvBlockGroup = 0xA0,
+ kMkvBlock = 0xA1,
+ kMkvBlockAdditions = 0x75A1,
+ kMkvBlockMore = 0xA6,
+ kMkvBlockAddId = 0xEE,
+ kMkvBlockAdditional = 0xA5,
+ kMkvBlockDuration = 0x9B,
+ kMkvReferenceBlock = 0xFB,
+ kMkvLaceNumber = 0xCC,
+ kMkvSimpleBlock = 0xA3,
+ kMkvTracks = 0x1654AE6B,
+ kMkvTrackEntry = 0xAE,
+ kMkvTrackNumber = 0xD7,
+ kMkvTrackUid = 0x73C5,
+ kMkvTrackType = 0x83,
+ kMkvFlagEnabled = 0xB9,
+ kMkvFlagDefault = 0x88,
+ kMkvFlagForced = 0x55AA,
+ kMkvFlagLacing = 0x9C,
+ kMkvDefaultDuration = 0x23E383,
+ kMkvMaxBlockAdditionId = 0x55EE,
+ kMkvName = 0x536E,
+ kMkvLanguage = 0x22B59C,
+ kMkvCodecId = 0x86,
+ kMkvCodecPrivate = 0x63A2,
+ kMkvCodecName = 0x258688,
+ kMkvVideo = 0xE0,
+ kMkvFlagInterlaced = 0x9A,
+ kMkvStereoMode = 0x53B8,
+ kMkvAlphaMode = 0x53C0,
+ kMkvPixelWidth = 0xB0,
+ kMkvPixelHeight = 0xBA,
+ kMkvPixelCropBottom = 0x54AA,
+ kMkvPixelCropTop = 0x54BB,
+ kMkvPixelCropLeft = 0x54CC,
+ kMkvPixelCropRight = 0x54DD,
+ kMkvDisplayWidth = 0x54B0,
+ kMkvDisplayHeight = 0x54BA,
+ kMkvDisplayUnit = 0x54B2,
+ kMkvAspectRatioType = 0x54B3,
+ kMkvFrameRate = 0x2383E3,
+ kMkvAudio = 0xE1,
+ kMkvSamplingFrequency = 0xB5,
+ kMkvOutputSamplingFrequency = 0x78B5,
+ kMkvChannels = 0x9F,
+ kMkvBitDepth = 0x6264,
+ kMkvCues = 0x1C53BB6B,
+ kMkvCuePoint = 0xBB,
+ kMkvCueTime = 0xB3,
+ kMkvCueTrackPositions = 0xB7,
+ kMkvCueTrack = 0xF7,
+ kMkvCueClusterPosition = 0xF1,
+ kMkvCueBlockNumber = 0x5378
+};
+
+enum TrackTypes {
+ kInvalidType = -1,
+ kVideoType = 0x1,
+ kAudioType = 0x2,
+ kComplexType = 0x3,
+ kLogoType = 0x10,
+ kSubtitleType = 0x11,
+ kButtonsType = 0x12,
+ kControlType = 0x20
+};
+
+enum TrackNum {
+ kVideoTrackNum = 0x1,
+ kAudioTrackNum = 0x2
+};
+}
+
+#endif /* WEBMCONSTANTS_H_ */
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
new file mode 100644
index 0000000..a008cab
--- /dev/null
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "WebmElement"
+
+#include "EbmlUtil.h"
+#include "WebmElement.h"
+#include "WebmConstants.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/Log.h>
+
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+using namespace android;
+using namespace webm;
+
+namespace {
+
+int64_t voidSize(int64_t totalSize) {
+ if (totalSize < 2) {
+ return -1;
+ }
+ if (totalSize < 9) {
+ return totalSize - 2;
+ }
+ return totalSize - 9;
+}
+
+uint64_t childrenSum(const List<sp<WebmElement> >& children) {
+ uint64_t total = 0;
+ for (List<sp<WebmElement> >::const_iterator it = children.begin();
+ it != children.end(); ++it) {
+ total += (*it)->totalSize();
+ }
+ return total;
+}
+
+void populateCommonTrackEntries(
+ int num,
+ uint64_t uid,
+ bool lacing,
+ const char *lang,
+ const char *codec,
+ TrackTypes type,
+ List<sp<WebmElement> > &ls) {
+ ls.push_back(new WebmUnsigned(kMkvTrackNumber, num));
+ ls.push_back(new WebmUnsigned(kMkvTrackUid, uid));
+ ls.push_back(new WebmUnsigned(kMkvFlagLacing, lacing));
+ ls.push_back(new WebmString(kMkvLanguage, lang));
+ ls.push_back(new WebmString(kMkvCodecId, codec));
+ ls.push_back(new WebmUnsigned(kMkvTrackType, type));
+}
+}
+
+namespace android {
+
+WebmElement::WebmElement(uint64_t id, uint64_t size)
+ : mId(id), mSize(size) {
+}
+
+WebmElement::~WebmElement() {
+}
+
+int WebmElement::serializePayloadSize(uint8_t *buf) {
+ return serializeCodedUnsigned(encodeUnsigned(mSize), buf);
+}
+
+uint64_t WebmElement::serializeInto(uint8_t *buf) {
+ uint8_t *cur = buf;
+ int head = serializeCodedUnsigned(mId, cur);
+ cur += head;
+ int neck = serializePayloadSize(cur);
+ cur += neck;
+ serializePayload(cur);
+ cur += mSize;
+ return cur - buf;
+}
+
+uint64_t WebmElement::totalSize() {
+ uint8_t buf[8];
+ //............... + sizeOf(encodeUnsigned(size))
+ return sizeOf(mId) + serializePayloadSize(buf) + mSize;
+}
+
+uint8_t *WebmElement::serialize(uint64_t& size) {
+ size = totalSize();
+ uint8_t *buf = new uint8_t[size];
+ serializeInto(buf);
+ return buf;
+}
+
+int WebmElement::write(int fd, uint64_t& size) {
+ uint8_t buf[8];
+ size = totalSize();
+ off64_t off = ::lseek64(fd, (size - 1), SEEK_CUR) - (size - 1);
+ ::write(fd, buf, 1); // extend file
+
+ off64_t curOff = off + size;
+ off64_t alignedOff = off & ~(::sysconf(_SC_PAGE_SIZE) - 1);
+ off64_t mapSize = curOff - alignedOff;
+ off64_t pageOff = off - alignedOff;
+ void *dst = ::mmap64(NULL, mapSize, PROT_WRITE, MAP_SHARED, fd, alignedOff);
+ if (dst == MAP_FAILED) {
+ ALOGE("mmap64 failed; errno = %d", errno);
+ ALOGE("fd %d; flags: %o", fd, ::fcntl(fd, F_GETFL, 0));
+ return errno;
+ } else {
+ serializeInto((uint8_t*) dst + pageOff);
+ ::msync(dst, mapSize, MS_SYNC);
+ return ::munmap(dst, mapSize);
+ }
+}
+
+//=================================================================================================
+
+WebmUnsigned::WebmUnsigned(uint64_t id, uint64_t value)
+ : WebmElement(id, sizeOf(value)), mValue(value) {
+}
+
+void WebmUnsigned::serializePayload(uint8_t *buf) {
+ serializeCodedUnsigned(mValue, buf);
+}
+
+//=================================================================================================
+
+WebmFloat::WebmFloat(uint64_t id, double value)
+ : WebmElement(id, sizeof(double)), mValue(value) {
+}
+
+WebmFloat::WebmFloat(uint64_t id, float value)
+ : WebmElement(id, sizeof(float)), mValue(value) {
+}
+
+void WebmFloat::serializePayload(uint8_t *buf) {
+ uint64_t data;
+ if (mSize == sizeof(float)) {
+ float f = mValue;
+ data = *reinterpret_cast<const uint32_t*>(&f);
+ } else {
+ data = *reinterpret_cast<const uint64_t*>(&mValue);
+ }
+ for (int i = mSize - 1; i >= 0; --i) {
+ buf[i] = data & 0xff;
+ data >>= 8;
+ }
+}
+
+//=================================================================================================
+
+WebmBinary::WebmBinary(uint64_t id, const sp<ABuffer> &ref)
+ : WebmElement(id, ref->size()), mRef(ref) {
+}
+
+void WebmBinary::serializePayload(uint8_t *buf) {
+ memcpy(buf, mRef->data(), mRef->size());
+}
+
+//=================================================================================================
+
+WebmString::WebmString(uint64_t id, const char *str)
+ : WebmElement(id, strlen(str)), mStr(str) {
+}
+
+void WebmString::serializePayload(uint8_t *buf) {
+ memcpy(buf, mStr, strlen(mStr));
+}
+
+//=================================================================================================
+
+WebmSimpleBlock::WebmSimpleBlock(
+ int trackNum,
+ int16_t relTimecode,
+ bool key,
+ const sp<ABuffer>& orig)
+ // ............................ trackNum*1 + timecode*2 + flags*1
+ // ^^^
+ // Only the least significant byte of trackNum is encoded
+ : WebmElement(kMkvSimpleBlock, orig->size() + 4),
+ mTrackNum(trackNum),
+ mRelTimecode(relTimecode),
+ mKey(key),
+ mRef(orig) {
+}
+
+void WebmSimpleBlock::serializePayload(uint8_t *buf) {
+ serializeCodedUnsigned(encodeUnsigned(mTrackNum), buf);
+ buf[1] = (mRelTimecode & 0xff00) >> 8;
+ buf[2] = mRelTimecode & 0xff;
+ buf[3] = mKey ? 0x80 : 0;
+ memcpy(buf + 4, mRef->data(), mSize - 4);
+}
+
+//=================================================================================================
+
+EbmlVoid::EbmlVoid(uint64_t totalSize)
+ : WebmElement(kMkvVoid, voidSize(totalSize)),
+ mSizeWidth(totalSize - sizeOf(kMkvVoid) - voidSize(totalSize)) {
+ CHECK_GE(voidSize(totalSize), 0);
+}
+
+int EbmlVoid::serializePayloadSize(uint8_t *buf) {
+ return serializeCodedUnsigned(encodeUnsigned(mSize, mSizeWidth), buf);
+}
+
+void EbmlVoid::serializePayload(uint8_t *buf) {
+ ::memset(buf, 0, mSize);
+ return;
+}
+
+//=================================================================================================
+
+WebmMaster::WebmMaster(uint64_t id, const List<sp<WebmElement> >& children)
+ : WebmElement(id, childrenSum(children)), mChildren(children) {
+}
+
+WebmMaster::WebmMaster(uint64_t id)
+ : WebmElement(id, 0) {
+}
+
+int WebmMaster::serializePayloadSize(uint8_t *buf) {
+ if (mSize == 0){
+ return serializeCodedUnsigned(kMkvUnknownLength, buf);
+ }
+ return WebmElement::serializePayloadSize(buf);
+}
+
+void WebmMaster::serializePayload(uint8_t *buf) {
+ uint64_t off = 0;
+ for (List<sp<WebmElement> >::const_iterator it = mChildren.begin(); it != mChildren.end();
+ ++it) {
+ sp<WebmElement> child = (*it);
+ child->serializeInto(buf + off);
+ off += child->totalSize();
+ }
+}
+
+//=================================================================================================
+
+sp<WebmElement> WebmElement::CuePointEntry(uint64_t time, int track, uint64_t off) {
+ List<sp<WebmElement> > cuePointEntryFields;
+ cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueTrack, track));
+ cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueClusterPosition, off));
+ WebmElement *cueTrackPositions = new WebmMaster(kMkvCueTrackPositions, cuePointEntryFields);
+
+ cuePointEntryFields.clear();
+ cuePointEntryFields.push_back(new WebmUnsigned(kMkvCueTime, time));
+ cuePointEntryFields.push_back(cueTrackPositions);
+ return new WebmMaster(kMkvCuePoint, cuePointEntryFields);
+}
+
+sp<WebmElement> WebmElement::SeekEntry(uint64_t id, uint64_t off) {
+ List<sp<WebmElement> > seekEntryFields;
+ seekEntryFields.push_back(new WebmUnsigned(kMkvSeekId, id));
+ seekEntryFields.push_back(new WebmUnsigned(kMkvSeekPosition, off));
+ return new WebmMaster(kMkvSeek, seekEntryFields);
+}
+
+sp<WebmElement> WebmElement::EbmlHeader(
+ int ver,
+ int readVer,
+ int maxIdLen,
+ int maxSizeLen,
+ int docVer,
+ int docReadVer) {
+ List<sp<WebmElement> > headerFields;
+ headerFields.push_back(new WebmUnsigned(kMkvEbmlVersion, ver));
+ headerFields.push_back(new WebmUnsigned(kMkvEbmlReadVersion, readVer));
+ headerFields.push_back(new WebmUnsigned(kMkvEbmlMaxIdlength, maxIdLen));
+ headerFields.push_back(new WebmUnsigned(kMkvEbmlMaxSizeLength, maxSizeLen));
+ headerFields.push_back(new WebmString(kMkvDocType, "webm"));
+ headerFields.push_back(new WebmUnsigned(kMkvDocTypeVersion, docVer));
+ headerFields.push_back(new WebmUnsigned(kMkvDocTypeReadVersion, docReadVer));
+ return new WebmMaster(kMkvEbml, headerFields);
+}
+
+sp<WebmElement> WebmElement::SegmentInfo(uint64_t scale, double dur) {
+ List<sp<WebmElement> > segmentInfo;
+ // place duration first; easier to patch
+ segmentInfo.push_back(new WebmFloat(kMkvSegmentDuration, dur));
+ segmentInfo.push_back(new WebmUnsigned(kMkvTimecodeScale, scale));
+ segmentInfo.push_back(new WebmString(kMkvMuxingApp, "android"));
+ segmentInfo.push_back(new WebmString(kMkvWritingApp, "android"));
+ return new WebmMaster(kMkvInfo, segmentInfo);
+}
+
+sp<WebmElement> WebmElement::AudioTrackEntry(
+ int chans,
+ double rate,
+ const sp<ABuffer> &buf,
+ int bps,
+ uint64_t uid,
+ bool lacing,
+ const char *lang) {
+ if (uid == 0) {
+ uid = kAudioTrackNum;
+ }
+
+ List<sp<WebmElement> > trackEntryFields;
+ populateCommonTrackEntries(
+ kAudioTrackNum,
+ uid,
+ lacing,
+ lang,
+ "A_VORBIS",
+ kAudioType,
+ trackEntryFields);
+
+ List<sp<WebmElement> > audioInfo;
+ audioInfo.push_back(new WebmUnsigned(kMkvChannels, chans));
+ audioInfo.push_back(new WebmFloat(kMkvSamplingFrequency, rate));
+ if (bps) {
+ WebmElement *bitDepth = new WebmUnsigned(kMkvBitDepth, bps);
+ audioInfo.push_back(bitDepth);
+ }
+
+ trackEntryFields.push_back(new WebmMaster(kMkvAudio, audioInfo));
+ trackEntryFields.push_back(new WebmBinary(kMkvCodecPrivate, buf));
+ return new WebmMaster(kMkvTrackEntry, trackEntryFields);
+}
+
+sp<WebmElement> WebmElement::VideoTrackEntry(
+ uint64_t width,
+ uint64_t height,
+ uint64_t uid,
+ bool lacing,
+ const char *lang) {
+ if (uid == 0) {
+ uid = kVideoTrackNum;
+ }
+
+ List<sp<WebmElement> > trackEntryFields;
+ populateCommonTrackEntries(
+ kVideoTrackNum,
+ uid,
+ lacing,
+ lang,
+ "V_VP8",
+ kVideoType,
+ trackEntryFields);
+
+ List<sp<WebmElement> > videoInfo;
+ videoInfo.push_back(new WebmUnsigned(kMkvPixelWidth, width));
+ videoInfo.push_back(new WebmUnsigned(kMkvPixelHeight, height));
+
+ trackEntryFields.push_back(new WebmMaster(kMkvVideo, videoInfo));
+ return new WebmMaster(kMkvTrackEntry, trackEntryFields);
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/WebmElement.h
new file mode 100644
index 0000000..f19933e
--- /dev/null
+++ b/media/libstagefright/webm/WebmElement.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMELEMENT_H_
+#define WEBMELEMENT_H_
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/List.h>
+
+namespace android {
+
+struct WebmElement : public LightRefBase<WebmElement> {
+ const uint64_t mId, mSize;
+
+ WebmElement(uint64_t id, uint64_t size);
+ virtual ~WebmElement();
+
+ virtual int serializePayloadSize(uint8_t *buf);
+ virtual void serializePayload(uint8_t *buf)=0;
+ uint64_t totalSize();
+ uint64_t serializeInto(uint8_t *buf);
+ uint8_t *serialize(uint64_t& size);
+ int write(int fd, uint64_t& size);
+
+ static sp<WebmElement> EbmlHeader(
+ int ver = 1,
+ int readVer = 1,
+ int maxIdLen = 4,
+ int maxSizeLen = 8,
+ int docVer = 2,
+ int docReadVer = 2);
+
+ static sp<WebmElement> SegmentInfo(uint64_t scale = 1000000, double dur = 0);
+
+ static sp<WebmElement> AudioTrackEntry(
+ int chans,
+ double rate,
+ const sp<ABuffer> &buf,
+ int bps = 0,
+ uint64_t uid = 0,
+ bool lacing = false,
+ const char *lang = "und");
+
+ static sp<WebmElement> VideoTrackEntry(
+ uint64_t width,
+ uint64_t height,
+ uint64_t uid = 0,
+ bool lacing = false,
+ const char *lang = "und");
+
+ static sp<WebmElement> SeekEntry(uint64_t id, uint64_t off);
+ static sp<WebmElement> CuePointEntry(uint64_t time, int track, uint64_t off);
+ static sp<WebmElement> SimpleBlock(
+ int trackNum,
+ int16_t timecode,
+ bool key,
+ const uint8_t *data,
+ uint64_t dataSize);
+};
+
+struct WebmUnsigned : public WebmElement {
+ WebmUnsigned(uint64_t id, uint64_t value);
+ const uint64_t mValue;
+ void serializePayload(uint8_t *buf);
+};
+
+struct WebmFloat : public WebmElement {
+ const double mValue;
+ WebmFloat(uint64_t id, float value);
+ WebmFloat(uint64_t id, double value);
+ void serializePayload(uint8_t *buf);
+};
+
+struct WebmBinary : public WebmElement {
+ const sp<ABuffer> mRef;
+ WebmBinary(uint64_t id, const sp<ABuffer> &ref);
+ void serializePayload(uint8_t *buf);
+};
+
+struct WebmString : public WebmElement {
+ const char *const mStr;
+ WebmString(uint64_t id, const char *str);
+ void serializePayload(uint8_t *buf);
+};
+
+struct WebmSimpleBlock : public WebmElement {
+ const int mTrackNum;
+ const int16_t mRelTimecode;
+ const bool mKey;
+ const sp<ABuffer> mRef;
+
+ WebmSimpleBlock(int trackNum, int16_t timecode, bool key, const sp<ABuffer>& orig);
+ void serializePayload(uint8_t *buf);
+};
+
+struct EbmlVoid : public WebmElement {
+ const uint64_t mSizeWidth;
+ EbmlVoid(uint64_t totalSize);
+ int serializePayloadSize(uint8_t *buf);
+ void serializePayload(uint8_t *buf);
+};
+
+struct WebmMaster : public WebmElement {
+ const List<sp<WebmElement> > mChildren;
+ WebmMaster(uint64_t id);
+ WebmMaster(uint64_t id, const List<sp<WebmElement> > &children);
+ int serializePayloadSize(uint8_t *buf);
+ void serializePayload(uint8_t *buf);
+};
+
+} /* namespace android */
+#endif /* WEBMELEMENT_H_ */
diff --git a/media/libstagefright/webm/WebmFrame.cpp b/media/libstagefright/webm/WebmFrame.cpp
new file mode 100644
index 0000000..e5134ed
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrame.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WebmFrame"
+
+#include "WebmFrame.h"
+#include "WebmConstants.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <unistd.h>
+
+using namespace android;
+using namespace webm;
+
+namespace {
+sp<ABuffer> toABuffer(MediaBuffer *mbuf) {
+ sp<ABuffer> abuf = new ABuffer(mbuf->range_length());
+ memcpy(abuf->data(), (uint8_t*) mbuf->data() + mbuf->range_offset(), mbuf->range_length());
+ return abuf;
+}
+}
+
+namespace android {
+
+const sp<WebmFrame> WebmFrame::EOS = new WebmFrame();
+
+WebmFrame::WebmFrame()
+ : mType(kInvalidType),
+ mKey(false),
+ mAbsTimecode(UINT64_MAX),
+ mData(new ABuffer(0)),
+ mEos(true) {
+}
+
+WebmFrame::WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *mbuf)
+ : mType(type),
+ mKey(key),
+ mAbsTimecode(absTimecode),
+ mData(toABuffer(mbuf)),
+ mEos(false) {
+}
+
+sp<WebmElement> WebmFrame::SimpleBlock(uint64_t baseTimecode) const {
+ return new WebmSimpleBlock(
+ mType == kVideoType ? kVideoTrackNum : kAudioTrackNum,
+ mAbsTimecode - baseTimecode,
+ mKey,
+ mData);
+}
+
+bool WebmFrame::operator<(const WebmFrame &other) const {
+ if (this->mEos) {
+ return false;
+ }
+ if (other.mEos) {
+ return true;
+ }
+ if (this->mAbsTimecode == other.mAbsTimecode) {
+ if (this->mType == kAudioType && other.mType == kVideoType) {
+ return true;
+ }
+ if (this->mType == kVideoType && other.mType == kAudioType) {
+ return false;
+ }
+ return false;
+ }
+ return this->mAbsTimecode < other.mAbsTimecode;
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmFrame.h b/media/libstagefright/webm/WebmFrame.h
new file mode 100644
index 0000000..4f0b055
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrame.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMFRAME_H_
+#define WEBMFRAME_H_
+
+#include "WebmElement.h"
+
+namespace android {
+
+struct WebmFrame : LightRefBase<WebmFrame> {
+public:
+ const int mType;
+ const bool mKey;
+ const uint64_t mAbsTimecode;
+ const sp<ABuffer> mData;
+ const bool mEos;
+
+ WebmFrame();
+ WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *buf);
+ ~WebmFrame() {}
+
+ sp<WebmElement> SimpleBlock(uint64_t baseTimecode) const;
+
+ bool operator<(const WebmFrame &other) const;
+
+ static const sp<WebmFrame> EOS;
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(WebmFrame);
+};
+
+} /* namespace android */
+#endif /* WEBMFRAME_H_ */
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
new file mode 100644
index 0000000..a4b8a42
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WebmFrameThread"
+
+#include "WebmConstants.h"
+#include "WebmFrameThread.h"
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <utils/Log.h>
+#include <inttypes.h>
+
+using namespace webm;
+
+namespace android {
+
+void *WebmFrameThread::wrap(void *arg) {
+ WebmFrameThread *worker = reinterpret_cast<WebmFrameThread*>(arg);
+ worker->run();
+ return NULL;
+}
+
+status_t WebmFrameThread::start() {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+ pthread_create(&mThread, &attr, WebmFrameThread::wrap, this);
+ pthread_attr_destroy(&attr);
+ return OK;
+}
+
+status_t WebmFrameThread::stop() {
+ void *status;
+ pthread_join(mThread, &status);
+ return (status_t)(intptr_t)status;
+}
+
+//=================================================================================================
+
+WebmFrameSourceThread::WebmFrameSourceThread(
+ int type,
+ LinkedBlockingQueue<const sp<WebmFrame> >& sink)
+ : mType(type), mSink(sink) {
+}
+
+//=================================================================================================
+
+WebmFrameSinkThread::WebmFrameSinkThread(
+ const int& fd,
+ const uint64_t& off,
+ sp<WebmFrameSourceThread> videoThread,
+ sp<WebmFrameSourceThread> audioThread,
+ List<sp<WebmElement> >& cues)
+ : mFd(fd),
+ mSegmentDataStart(off),
+ mVideoFrames(videoThread->mSink),
+ mAudioFrames(audioThread->mSink),
+ mCues(cues),
+ mDone(true) {
+}
+
+WebmFrameSinkThread::WebmFrameSinkThread(
+ const int& fd,
+ const uint64_t& off,
+ LinkedBlockingQueue<const sp<WebmFrame> >& videoSource,
+ LinkedBlockingQueue<const sp<WebmFrame> >& audioSource,
+ List<sp<WebmElement> >& cues)
+ : mFd(fd),
+ mSegmentDataStart(off),
+ mVideoFrames(videoSource),
+ mAudioFrames(audioSource),
+ mCues(cues),
+ mDone(true) {
+}
+
+// Initializes a webm cluster with its starting timecode.
+//
+// frames:
+// sequence of input audio/video frames received from the source.
+//
+// clusterTimecodeL:
+// the starting timecode of the cluster; this is the timecode of the first
+// frame since frames are ordered by timestamp.
+//
+// children:
+// list to hold child elements in a webm cluster (start timecode and
+// simple blocks).
+//
+// static
+void WebmFrameSinkThread::initCluster(
+ List<const sp<WebmFrame> >& frames,
+ uint64_t& clusterTimecodeL,
+ List<sp<WebmElement> >& children) {
+ CHECK(!frames.empty() && children.empty());
+
+ const sp<WebmFrame> f = *(frames.begin());
+ clusterTimecodeL = f->mAbsTimecode;
+ WebmUnsigned *clusterTimecode = new WebmUnsigned(kMkvTimecode, clusterTimecodeL);
+ children.clear();
+ children.push_back(clusterTimecode);
+}
+
+void WebmFrameSinkThread::writeCluster(List<sp<WebmElement> >& children) {
+ // children must contain at least one simpleblock and its timecode
+ CHECK_GE(children.size(), 2);
+
+ uint64_t size;
+ sp<WebmElement> cluster = new WebmMaster(kMkvCluster, children);
+ cluster->write(mFd, size);
+ children.clear();
+}
+
+// Write out (possibly multiple) webm cluster(s) from frames split on video key frames.
+//
+// last:
+// current flush is triggered by EOS instead of a second outstanding video key frame.
+void WebmFrameSinkThread::flushFrames(List<const sp<WebmFrame> >& frames, bool last) {
+ if (frames.empty()) {
+ return;
+ }
+
+ uint64_t clusterTimecodeL;
+ List<sp<WebmElement> > children;
+ initCluster(frames, clusterTimecodeL, children);
+
+ uint64_t cueTime = clusterTimecodeL;
+ off_t fpos = ::lseek(mFd, 0, SEEK_CUR);
+ size_t n = frames.size();
+ if (!last) {
+ // If we are not flushing the last sequence of outstanding frames, flushFrames
+ // must have been called right after we have pushed a second outstanding video key
+ // frame (the last frame), which belongs to the next cluster; also hold back on
+ // flushing the second to last frame before we check its type. A audio frame
+ // should precede the aforementioned video key frame in the next sequence, a video
+ // frame should be the last frame in the current (to-be-flushed) sequence.
+ CHECK_GE(n, 2);
+ n -= 2;
+ }
+
+ for (size_t i = 0; i < n; i++) {
+ const sp<WebmFrame> f = *(frames.begin());
+ if (f->mType == kVideoType && f->mKey) {
+ cueTime = f->mAbsTimecode;
+ }
+
+ if (f->mAbsTimecode - clusterTimecodeL > INT16_MAX) {
+ writeCluster(children);
+ initCluster(frames, clusterTimecodeL, children);
+ }
+
+ frames.erase(frames.begin());
+ children.push_back(f->SimpleBlock(clusterTimecodeL));
+ }
+
+ // equivalent to last==false
+ if (!frames.empty()) {
+ // decide whether to write out the second to last frame.
+ const sp<WebmFrame> secondLastFrame = *(frames.begin());
+ if (secondLastFrame->mType == kVideoType) {
+ frames.erase(frames.begin());
+ children.push_back(secondLastFrame->SimpleBlock(clusterTimecodeL));
+ }
+ }
+
+ writeCluster(children);
+ sp<WebmElement> cuePoint = WebmElement::CuePointEntry(cueTime, 1, fpos - mSegmentDataStart);
+ mCues.push_back(cuePoint);
+}
+
+status_t WebmFrameSinkThread::start() {
+ mDone = false;
+ return WebmFrameThread::start();
+}
+
+status_t WebmFrameSinkThread::stop() {
+ mDone = true;
+ mVideoFrames.push(WebmFrame::EOS);
+ mAudioFrames.push(WebmFrame::EOS);
+ return WebmFrameThread::stop();
+}
+
+void WebmFrameSinkThread::run() {
+ int numVideoKeyFrames = 0;
+ List<const sp<WebmFrame> > outstandingFrames;
+ while (!mDone) {
+ ALOGV("wait v frame");
+ const sp<WebmFrame> videoFrame = mVideoFrames.peek();
+ ALOGV("v frame: %p", videoFrame.get());
+
+ ALOGV("wait a frame");
+ const sp<WebmFrame> audioFrame = mAudioFrames.peek();
+ ALOGV("a frame: %p", audioFrame.get());
+
+ if (videoFrame->mEos && audioFrame->mEos) {
+ break;
+ }
+
+ if (*audioFrame < *videoFrame) {
+ ALOGV("take a frame");
+ mAudioFrames.take();
+ outstandingFrames.push_back(audioFrame);
+ } else {
+ ALOGV("take v frame");
+ mVideoFrames.take();
+ outstandingFrames.push_back(videoFrame);
+ if (videoFrame->mKey)
+ numVideoKeyFrames++;
+ }
+
+ if (numVideoKeyFrames == 2) {
+ flushFrames(outstandingFrames, /* last = */ false);
+ numVideoKeyFrames--;
+ }
+ }
+ ALOGV("flushing last cluster (size %zu)", outstandingFrames.size());
+ flushFrames(outstandingFrames, /* last = */ true);
+ mDone = true;
+}
+
+//=================================================================================================
+
+static const int64_t kInitialDelayTimeUs = 700000LL;
+
+void WebmFrameMediaSourceThread::clearFlags() {
+ mDone = false;
+ mPaused = false;
+ mResumed = false;
+ mStarted = false;
+ mReachedEOS = false;
+}
+
+WebmFrameMediaSourceThread::WebmFrameMediaSourceThread(
+ const sp<MediaSource>& source,
+ int type,
+ LinkedBlockingQueue<const sp<WebmFrame> >& sink,
+ uint64_t timeCodeScale,
+ int64_t startTimeRealUs,
+ int32_t startTimeOffsetMs,
+ int numTracks,
+ bool realTimeRecording)
+ : WebmFrameSourceThread(type, sink),
+ mSource(source),
+ mTimeCodeScale(timeCodeScale),
+ mTrackDurationUs(0) {
+ clearFlags();
+ mStartTimeUs = startTimeRealUs;
+ if (realTimeRecording && numTracks > 1) {
+ /*
+ * Copied from MPEG4Writer
+ *
+ * This extra delay of accepting incoming audio/video signals
+ * helps to align a/v start time at the beginning of a recording
+ * session, and it also helps eliminate the "recording" sound for
+ * camcorder applications.
+ *
+ * If client does not set the start time offset, we fall back to
+ * use the default initial delay value.
+ */
+ int64_t startTimeOffsetUs = startTimeOffsetMs * 1000LL;
+ if (startTimeOffsetUs < 0) { // Start time offset was not set
+ startTimeOffsetUs = kInitialDelayTimeUs;
+ }
+ mStartTimeUs += startTimeOffsetUs;
+ ALOGI("Start time offset: %" PRId64 " us", startTimeOffsetUs);
+ }
+}
+
+status_t WebmFrameMediaSourceThread::start() {
+ sp<MetaData> meta = new MetaData;
+ meta->setInt64(kKeyTime, mStartTimeUs);
+ status_t err = mSource->start(meta.get());
+ if (err != OK) {
+ mDone = true;
+ mReachedEOS = true;
+ return err;
+ } else {
+ mStarted = true;
+ return WebmFrameThread::start();
+ }
+}
+
+status_t WebmFrameMediaSourceThread::resume() {
+ if (!mDone && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ }
+ return OK;
+}
+
+status_t WebmFrameMediaSourceThread::pause() {
+ if (mStarted) {
+ mPaused = true;
+ }
+ return OK;
+}
+
+status_t WebmFrameMediaSourceThread::stop() {
+ if (mStarted) {
+ mStarted = false;
+ mDone = true;
+ mSource->stop();
+ return WebmFrameThread::stop();
+ }
+ return OK;
+}
+
+void WebmFrameMediaSourceThread::run() {
+ int32_t count = 0;
+ int64_t timestampUs = 0xdeadbeef;
+ int64_t lastTimestampUs = 0; // Previous sample time stamp
+ int64_t lastDurationUs = 0; // Previous sample duration
+ int64_t previousPausedDurationUs = 0;
+
+ const uint64_t kUninitialized = 0xffffffffffffffffL;
+ mStartTimeUs = kUninitialized;
+
+ status_t err = OK;
+ MediaBuffer *buffer;
+ while (!mDone && (err = mSource->read(&buffer, NULL)) == OK) {
+ if (buffer->range_length() == 0) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ sp<MetaData> md = buffer->meta_data();
+ CHECK(md->findInt64(kKeyTime, &timestampUs));
+ if (mStartTimeUs == kUninitialized) {
+ mStartTimeUs = timestampUs;
+ }
+ timestampUs -= mStartTimeUs;
+
+ if (mPaused && !mResumed) {
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastTimestampUs = timestampUs;
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+ ++count;
+
+ // adjust time-stamps after pause/resume
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ CHECK_GE(durExcludingEarlierPausesUs, 0ll);
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ CHECK_GE(pausedDurationUs, lastDurationUs);
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ CHECK_GE(timestampUs, 0ll);
+
+ int32_t isSync = false;
+ md->findInt32(kKeyIsSyncFrame, &isSync);
+ const sp<WebmFrame> f = new WebmFrame(
+ mType,
+ isSync,
+ timestampUs * 1000 / mTimeCodeScale,
+ buffer);
+ mSink.push(f);
+
+ ALOGV(
+ "%s %s frame at %" PRId64 " size %zu\n",
+ mType == kVideoType ? "video" : "audio",
+ isSync ? "I" : "P",
+ timestampUs * 1000 / mTimeCodeScale,
+ buffer->range_length());
+
+ buffer->release();
+ buffer = NULL;
+
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
+ }
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastTimestampUs = timestampUs;
+ }
+
+ mTrackDurationUs += lastDurationUs;
+ mSink.push(WebmFrame::EOS);
+}
+}
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/WebmFrameThread.h
new file mode 100644
index 0000000..d65d9b7
--- /dev/null
+++ b/media/libstagefright/webm/WebmFrameThread.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMFRAMETHREAD_H_
+#define WEBMFRAMETHREAD_H_
+
+#include "WebmFrame.h"
+#include "LinkedBlockingQueue.h"
+
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaSource.h>
+
+#include <utils/List.h>
+#include <utils/Errors.h>
+
+#include <pthread.h>
+
+namespace android {
+
+class WebmFrameThread : public LightRefBase<WebmFrameThread> {
+public:
+ virtual void run() = 0;
+ virtual bool running() { return false; }
+ virtual status_t start();
+ virtual status_t pause() { return OK; }
+ virtual status_t resume() { return OK; }
+ virtual status_t stop();
+ virtual ~WebmFrameThread() { stop(); }
+ static void *wrap(void *arg);
+
+protected:
+ WebmFrameThread()
+ : mThread(0) {
+ }
+
+private:
+ pthread_t mThread;
+ DISALLOW_EVIL_CONSTRUCTORS(WebmFrameThread);
+};
+
+//=================================================================================================
+
+class WebmFrameSourceThread;
+class WebmFrameSinkThread : public WebmFrameThread {
+public:
+ WebmFrameSinkThread(
+ const int& fd,
+ const uint64_t& off,
+ sp<WebmFrameSourceThread> videoThread,
+ sp<WebmFrameSourceThread> audioThread,
+ List<sp<WebmElement> >& cues);
+
+ WebmFrameSinkThread(
+ const int& fd,
+ const uint64_t& off,
+ LinkedBlockingQueue<const sp<WebmFrame> >& videoSource,
+ LinkedBlockingQueue<const sp<WebmFrame> >& audioSource,
+ List<sp<WebmElement> >& cues);
+
+ void run();
+ bool running() {
+ return !mDone;
+ }
+ status_t start();
+ status_t stop();
+
+private:
+ const int& mFd;
+ const uint64_t& mSegmentDataStart;
+ LinkedBlockingQueue<const sp<WebmFrame> >& mVideoFrames;
+ LinkedBlockingQueue<const sp<WebmFrame> >& mAudioFrames;
+ List<sp<WebmElement> >& mCues;
+
+ volatile bool mDone;
+
+ static void initCluster(
+ List<const sp<WebmFrame> >& frames,
+ uint64_t& clusterTimecodeL,
+ List<sp<WebmElement> >& children);
+ void writeCluster(List<sp<WebmElement> >& children);
+ void flushFrames(List<const sp<WebmFrame> >& frames, bool last);
+};
+
+//=================================================================================================
+
+class WebmFrameSourceThread : public WebmFrameThread {
+public:
+ WebmFrameSourceThread(int type, LinkedBlockingQueue<const sp<WebmFrame> >& sink);
+ virtual int64_t getDurationUs() = 0;
+protected:
+ const int mType;
+ LinkedBlockingQueue<const sp<WebmFrame> >& mSink;
+
+ friend class WebmFrameSinkThread;
+};
+
+//=================================================================================================
+
+class WebmFrameEmptySourceThread : public WebmFrameSourceThread {
+public:
+ WebmFrameEmptySourceThread(int type, LinkedBlockingQueue<const sp<WebmFrame> >& sink)
+ : WebmFrameSourceThread(type, sink) {
+ }
+ void run() { mSink.push(WebmFrame::EOS); }
+ int64_t getDurationUs() { return 0; }
+};
+
+//=================================================================================================
+
+class WebmFrameMediaSourceThread: public WebmFrameSourceThread {
+public:
+ WebmFrameMediaSourceThread(
+ const sp<MediaSource>& source,
+ int type,
+ LinkedBlockingQueue<const sp<WebmFrame> >& sink,
+ uint64_t timeCodeScale,
+ int64_t startTimeRealUs,
+ int32_t startTimeOffsetMs,
+ int numPeers,
+ bool realTimeRecording);
+
+ void run();
+ status_t start();
+ status_t resume();
+ status_t pause();
+ status_t stop();
+ int64_t getDurationUs() {
+ return mTrackDurationUs;
+ }
+
+private:
+ const sp<MediaSource> mSource;
+ const uint64_t mTimeCodeScale;
+ uint64_t mStartTimeUs;
+
+ volatile bool mDone;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mStarted;
+ volatile bool mReachedEOS;
+ int64_t mTrackDurationUs;
+
+ void clearFlags();
+};
+} /* namespace android */
+
+#endif /* WEBMFRAMETHREAD_H_ */
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
new file mode 100644
index 0000000..03cf92a
--- /dev/null
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "WebmWriter"
+
+#include "EbmlUtil.h"
+#include "WebmWriter.h"
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <utils/Errors.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <inttypes.h>
+
+using namespace webm;
+
+namespace {
+size_t XiphLaceCodeLen(size_t size) {
+ return size / 0xff + 1;
+}
+
+size_t XiphLaceEnc(uint8_t *buf, size_t size) {
+ size_t i;
+ for (i = 0; size >= 0xff; ++i, size -= 0xff) {
+ buf[i] = 0xff;
+ }
+ buf[i++] = size;
+ return i;
+}
+}
+
+namespace android {
+
+static const int64_t kMinStreamableFileSizeInBytes = 5 * 1024 * 1024;
+
+WebmWriter::WebmWriter(int fd)
+ : mFd(dup(fd)),
+ mInitCheck(mFd < 0 ? NO_INIT : OK),
+ mTimeCodeScale(1000000),
+ mStartTimestampUs(0),
+ mStartTimeOffsetMs(0),
+ mSegmentOffset(0),
+ mSegmentDataStart(0),
+ mInfoOffset(0),
+ mInfoSize(0),
+ mTracksOffset(0),
+ mCuesOffset(0),
+ mPaused(false),
+ mStarted(false),
+ mIsFileSizeLimitExplicitlyRequested(false),
+ mIsRealTimeRecording(false),
+ mStreamableFile(true),
+ mEstimatedCuesSize(0) {
+ mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
+ mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
+ mSinkThread = new WebmFrameSinkThread(
+ mFd,
+ mSegmentDataStart,
+ mStreams[kVideoIndex].mSink,
+ mStreams[kAudioIndex].mSink,
+ mCuePoints);
+}
+
+WebmWriter::WebmWriter(const char *filename)
+ : mInitCheck(NO_INIT),
+ mTimeCodeScale(1000000),
+ mStartTimestampUs(0),
+ mStartTimeOffsetMs(0),
+ mSegmentOffset(0),
+ mSegmentDataStart(0),
+ mInfoOffset(0),
+ mInfoSize(0),
+ mTracksOffset(0),
+ mCuesOffset(0),
+ mPaused(false),
+ mStarted(false),
+ mIsFileSizeLimitExplicitlyRequested(false),
+ mIsRealTimeRecording(false),
+ mStreamableFile(true),
+ mEstimatedCuesSize(0) {
+ mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (mFd >= 0) {
+ ALOGV("fd %d; flags: %o", mFd, fcntl(mFd, F_GETFL, 0));
+ mInitCheck = OK;
+ }
+ mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
+ mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
+ mSinkThread = new WebmFrameSinkThread(
+ mFd,
+ mSegmentDataStart,
+ mStreams[kVideoIndex].mSink,
+ mStreams[kAudioIndex].mSink,
+ mCuePoints);
+}
+
+// static
+sp<WebmElement> WebmWriter::videoTrack(const sp<MetaData>& md) {
+ int32_t width, height;
+ CHECK(md->findInt32(kKeyWidth, &width));
+ CHECK(md->findInt32(kKeyHeight, &height));
+ return WebmElement::VideoTrackEntry(width, height);
+}
+
+// static
+sp<WebmElement> WebmWriter::audioTrack(const sp<MetaData>& md) {
+ int32_t nChannels, samplerate;
+ uint32_t type;
+ const void *headerData1;
+ const char headerData2[] = { 3, 'v', 'o', 'r', 'b', 'i', 's', 7, 0, 0, 0,
+ 'a', 'n', 'd', 'r', 'o', 'i', 'd', 0, 0, 0, 0, 1 };
+ const void *headerData3;
+ size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
+
+ CHECK(md->findInt32(kKeyChannelCount, &nChannels));
+ CHECK(md->findInt32(kKeySampleRate, &samplerate));
+ CHECK(md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1));
+ CHECK(md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3));
+
+ size_t codecPrivateSize = 1;
+ codecPrivateSize += XiphLaceCodeLen(headerSize1);
+ codecPrivateSize += XiphLaceCodeLen(headerSize2);
+ codecPrivateSize += headerSize1 + headerSize2 + headerSize3;
+
+ off_t off = 0;
+ sp<ABuffer> codecPrivateBuf = new ABuffer(codecPrivateSize);
+ uint8_t *codecPrivateData = codecPrivateBuf->data();
+ codecPrivateData[off++] = 2;
+
+ off += XiphLaceEnc(codecPrivateData + off, headerSize1);
+ off += XiphLaceEnc(codecPrivateData + off, headerSize2);
+
+ memcpy(codecPrivateData + off, headerData1, headerSize1);
+ off += headerSize1;
+ memcpy(codecPrivateData + off, headerData2, headerSize2);
+ off += headerSize2;
+ memcpy(codecPrivateData + off, headerData3, headerSize3);
+
+ sp<WebmElement> entry = WebmElement::AudioTrackEntry(
+ nChannels,
+ samplerate,
+ codecPrivateBuf);
+ return entry;
+}
+
+size_t WebmWriter::numTracks() {
+ Mutex::Autolock autolock(mLock);
+
+ size_t numTracks = 0;
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ if (mStreams[i].mTrackEntry != NULL) {
+ numTracks++;
+ }
+ }
+
+ return numTracks;
+}
+
+uint64_t WebmWriter::estimateCuesSize(int32_t bitRate) {
+ // This implementation is based on estimateMoovBoxSize in MPEG4Writer.
+ //
+ // Statistical analysis shows that metadata usually accounts
+ // for a small portion of the total file size, usually < 0.6%.
+
+ // The default MIN_MOOV_BOX_SIZE is set to 0.6% x 1MB / 2,
+ // where 1MB is the common file size limit for MMS application.
+ // The default MAX _MOOV_BOX_SIZE value is based on about 3
+ // minute video recording with a bit rate about 3 Mbps, because
+ // statistics also show that most of the video captured are going
+ // to be less than 3 minutes.
+
+ // If the estimation is wrong, we will pay the price of wasting
+ // some reserved space. This should not happen so often statistically.
+ static const int32_t factor = 2;
+ static const int64_t MIN_CUES_SIZE = 3 * 1024; // 3 KB
+ static const int64_t MAX_CUES_SIZE = (180 * 3000000 * 6LL / 8000);
+ int64_t size = MIN_CUES_SIZE;
+
+ // Max file size limit is set
+ if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
+ size = mMaxFileSizeLimitBytes * 6 / 1000;
+ }
+
+ // Max file duration limit is set
+ if (mMaxFileDurationLimitUs != 0) {
+ if (bitRate > 0) {
+ int64_t size2 = ((mMaxFileDurationLimitUs * bitRate * 6) / 1000 / 8000000);
+ if (mMaxFileSizeLimitBytes != 0 && mIsFileSizeLimitExplicitlyRequested) {
+ // When both file size and duration limits are set,
+ // we use the smaller limit of the two.
+ if (size > size2) {
+ size = size2;
+ }
+ } else {
+ // Only max file duration limit is set
+ size = size2;
+ }
+ }
+ }
+
+ if (size < MIN_CUES_SIZE) {
+ size = MIN_CUES_SIZE;
+ }
+
+ // Any long duration recording will be probably end up with
+ // non-streamable webm file.
+ if (size > MAX_CUES_SIZE) {
+ size = MAX_CUES_SIZE;
+ }
+
+ ALOGV("limits: %" PRId64 "/%" PRId64 " bytes/us,"
+ " bit rate: %d bps and the estimated cues size %" PRId64 " bytes",
+ mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
+ return factor * size;
+}
+
+void WebmWriter::initStream(size_t idx) {
+ if (mStreams[idx].mThread != NULL) {
+ return;
+ }
+ if (mStreams[idx].mSource == NULL) {
+ ALOGV("adding dummy source ... ");
+ mStreams[idx].mThread = new WebmFrameEmptySourceThread(
+ mStreams[idx].mType, mStreams[idx].mSink);
+ } else {
+ ALOGV("adding source %p", mStreams[idx].mSource.get());
+ mStreams[idx].mThread = new WebmFrameMediaSourceThread(
+ mStreams[idx].mSource,
+ mStreams[idx].mType,
+ mStreams[idx].mSink,
+ mTimeCodeScale,
+ mStartTimestampUs,
+ mStartTimeOffsetMs,
+ numTracks(),
+ mIsRealTimeRecording);
+ }
+}
+
+void WebmWriter::release() {
+ close(mFd);
+ mFd = -1;
+ mInitCheck = NO_INIT;
+ mStarted = false;
+}
+
+status_t WebmWriter::reset() {
+ if (mInitCheck != OK) {
+ return OK;
+ } else {
+ if (!mStarted) {
+ release();
+ return OK;
+ }
+ }
+
+ status_t err = OK;
+ int64_t maxDurationUs = 0;
+ int64_t minDurationUs = 0x7fffffffffffffffLL;
+ for (int i = 0; i < kMaxStreams; ++i) {
+ if (mStreams[i].mThread == NULL) {
+ continue;
+ }
+
+ status_t status = mStreams[i].mThread->stop();
+ if (err == OK && status != OK) {
+ err = status;
+ }
+
+ int64_t durationUs = mStreams[i].mThread->getDurationUs();
+ if (durationUs > maxDurationUs) {
+ maxDurationUs = durationUs;
+ }
+ if (durationUs < minDurationUs) {
+ minDurationUs = durationUs;
+ }
+ }
+
+ if (numTracks() > 1) {
+ ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us", minDurationUs, maxDurationUs);
+ }
+
+ mSinkThread->stop();
+
+ // Do not write out movie header on error.
+ if (err != OK) {
+ release();
+ return err;
+ }
+
+ sp<WebmElement> cues = new WebmMaster(kMkvCues, mCuePoints);
+ uint64_t cuesSize = cues->totalSize();
+ // TRICKY Even when the cues do fit in the space we reserved, if they do not fit
+ // perfectly, we still need to check if there is enough "extra space" to write an
+ // EBML void element.
+ if (cuesSize != mEstimatedCuesSize && cuesSize > mEstimatedCuesSize - kMinEbmlVoidSize) {
+ mCuesOffset = ::lseek(mFd, 0, SEEK_CUR);
+ cues->write(mFd, cuesSize);
+ } else {
+ uint64_t spaceSize;
+ ::lseek(mFd, mCuesOffset, SEEK_SET);
+ cues->write(mFd, cuesSize);
+ sp<WebmElement> space = new EbmlVoid(mEstimatedCuesSize - cuesSize);
+ space->write(mFd, spaceSize);
+ }
+
+ mCuePoints.clear();
+ mStreams[kVideoIndex].mSink.clear();
+ mStreams[kAudioIndex].mSink.clear();
+
+ uint8_t bary[sizeof(uint64_t)];
+ uint64_t totalSize = ::lseek(mFd, 0, SEEK_END);
+ uint64_t segmentSize = totalSize - mSegmentDataStart;
+ ::lseek(mFd, mSegmentOffset + sizeOf(kMkvSegment), SEEK_SET);
+ uint64_t segmentSizeCoded = encodeUnsigned(segmentSize, sizeOf(kMkvUnknownLength));
+ serializeCodedUnsigned(segmentSizeCoded, bary);
+ ::write(mFd, bary, sizeOf(kMkvUnknownLength));
+
+ uint64_t size;
+ uint64_t durationOffset = mInfoOffset + sizeOf(kMkvInfo) + sizeOf(mInfoSize)
+ + sizeOf(kMkvSegmentDuration) + sizeOf(sizeof(double));
+ sp<WebmElement> duration = new WebmFloat(
+ kMkvSegmentDuration,
+ (double) (maxDurationUs * 1000 / mTimeCodeScale));
+ duration->serializePayload(bary);
+ ::lseek(mFd, durationOffset, SEEK_SET);
+ ::write(mFd, bary, sizeof(double));
+
+ List<sp<WebmElement> > seekEntries;
+ seekEntries.push_back(WebmElement::SeekEntry(kMkvInfo, mInfoOffset - mSegmentDataStart));
+ seekEntries.push_back(WebmElement::SeekEntry(kMkvTracks, mTracksOffset - mSegmentDataStart));
+ seekEntries.push_back(WebmElement::SeekEntry(kMkvCues, mCuesOffset - mSegmentDataStart));
+ sp<WebmElement> seekHead = new WebmMaster(kMkvSeekHead, seekEntries);
+
+ uint64_t metaSeekSize;
+ ::lseek(mFd, mSegmentDataStart, SEEK_SET);
+ seekHead->write(mFd, metaSeekSize);
+
+ uint64_t spaceSize;
+ sp<WebmElement> space = new EbmlVoid(kMaxMetaSeekSize - metaSeekSize);
+ space->write(mFd, spaceSize);
+
+ release();
+ return err;
+}
+
+status_t WebmWriter::addSource(const sp<MediaSource> &source) {
+ Mutex::Autolock l(mLock);
+ if (mStarted) {
+ ALOGE("Attempt to add source AFTER recording is started");
+ return UNKNOWN_ERROR;
+ }
+
+ // At most 2 tracks can be supported.
+ if (mStreams[kVideoIndex].mTrackEntry != NULL
+ && mStreams[kAudioIndex].mTrackEntry != NULL) {
+ ALOGE("Too many tracks (2) to add");
+ return ERROR_UNSUPPORTED;
+ }
+
+ CHECK(source != NULL);
+
+ // A track of type other than video or audio is not supported.
+ const char *mime;
+ source->getFormat()->findCString(kKeyMIMEType, &mime);
+ const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
+ const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
+
+ size_t streamIndex;
+ if (!strncasecmp(mime, vp8, strlen(vp8))) {
+ streamIndex = kVideoIndex;
+ } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
+ streamIndex = kAudioIndex;
+ } else {
+ ALOGE("Track (%s) other than %s or %s is not supported", mime, vp8, vorbis);
+ return ERROR_UNSUPPORTED;
+ }
+
+ // No more than one video or one audio track is supported.
+ if (mStreams[streamIndex].mTrackEntry != NULL) {
+ ALOGE("%s track already exists", mStreams[streamIndex].mName);
+ return ERROR_UNSUPPORTED;
+ }
+
+ // This is the first track of either audio or video.
+ // Go ahead to add the track.
+ mStreams[streamIndex].mSource = source;
+ mStreams[streamIndex].mTrackEntry = mStreams[streamIndex].mMakeTrack(source->getFormat());
+
+ return OK;
+}
+
+status_t WebmWriter::start(MetaData *params) {
+ if (mInitCheck != OK) {
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStreams[kVideoIndex].mTrackEntry == NULL
+ && mStreams[kAudioIndex].mTrackEntry == NULL) {
+ ALOGE("No source added");
+ return INVALID_OPERATION;
+ }
+
+ if (mMaxFileSizeLimitBytes != 0) {
+ mIsFileSizeLimitExplicitlyRequested = true;
+ }
+
+ if (params) {
+ int32_t isRealTimeRecording;
+ params->findInt32(kKeyRealTimeRecording, &isRealTimeRecording);
+ mIsRealTimeRecording = isRealTimeRecording;
+ }
+
+ if (mStarted) {
+ if (mPaused) {
+ mPaused = false;
+ mStreams[kAudioIndex].mThread->resume();
+ mStreams[kVideoIndex].mThread->resume();
+ }
+ return OK;
+ }
+
+ if (params) {
+ int32_t tcsl;
+ if (params->findInt32(kKeyTimeScale, &tcsl)) {
+ mTimeCodeScale = tcsl;
+ }
+ }
+ CHECK_GT(mTimeCodeScale, 0);
+ ALOGV("movie time scale: %" PRIu64, mTimeCodeScale);
+
+ /*
+ * When the requested file size limit is small, the priority
+ * is to meet the file size limit requirement, rather than
+ * to make the file streamable. mStreamableFile does not tell
+ * whether the actual recorded file is streamable or not.
+ */
+ mStreamableFile = (!mMaxFileSizeLimitBytes)
+ || (mMaxFileSizeLimitBytes >= kMinStreamableFileSizeInBytes);
+
+ /*
+ * Write various metadata.
+ */
+ sp<WebmElement> ebml, segment, info, seekHead, tracks, cues;
+ ebml = WebmElement::EbmlHeader();
+ segment = new WebmMaster(kMkvSegment);
+ seekHead = new EbmlVoid(kMaxMetaSeekSize);
+ info = WebmElement::SegmentInfo(mTimeCodeScale, 0);
+
+ List<sp<WebmElement> > children;
+ for (size_t i = 0; i < kMaxStreams; ++i) {
+ if (mStreams[i].mTrackEntry != NULL) {
+ children.push_back(mStreams[i].mTrackEntry);
+ }
+ }
+ tracks = new WebmMaster(kMkvTracks, children);
+
+ if (!mStreamableFile) {
+ cues = NULL;
+ } else {
+ int32_t bitRate = -1;
+ if (params) {
+ params->findInt32(kKeyBitRate, &bitRate);
+ }
+ mEstimatedCuesSize = estimateCuesSize(bitRate);
+ CHECK_GE(mEstimatedCuesSize, 8);
+ cues = new EbmlVoid(mEstimatedCuesSize);
+ }
+
+ sp<WebmElement> elems[] = { ebml, segment, seekHead, info, tracks, cues };
+ size_t nElems = sizeof(elems) / sizeof(elems[0]);
+ uint64_t offsets[nElems];
+ uint64_t sizes[nElems];
+ for (uint32_t i = 0; i < nElems; i++) {
+ WebmElement *e = elems[i].get();
+ if (!e) {
+ continue;
+ }
+
+ uint64_t size;
+ offsets[i] = ::lseek(mFd, 0, SEEK_CUR);
+ sizes[i] = e->mSize;
+ e->write(mFd, size);
+ }
+
+ mSegmentOffset = offsets[1];
+ mSegmentDataStart = offsets[2];
+ mInfoOffset = offsets[3];
+ mInfoSize = sizes[3];
+ mTracksOffset = offsets[4];
+ mCuesOffset = offsets[5];
+
+ // start threads
+ if (params) {
+ params->findInt64(kKeyTime, &mStartTimestampUs);
+ }
+
+ initStream(kAudioIndex);
+ initStream(kVideoIndex);
+
+ mStreams[kAudioIndex].mThread->start();
+ mStreams[kVideoIndex].mThread->start();
+ mSinkThread->start();
+
+ mStarted = true;
+ return OK;
+}
+
+status_t WebmWriter::pause() {
+ if (mInitCheck != OK) {
+ return OK;
+ }
+ mPaused = true;
+ status_t err = OK;
+ for (int i = 0; i < kMaxStreams; ++i) {
+ if (mStreams[i].mThread == NULL) {
+ continue;
+ }
+ status_t status = mStreams[i].mThread->pause();
+ if (status != OK) {
+ err = status;
+ }
+ }
+ return err;
+}
+
+status_t WebmWriter::stop() {
+ return reset();
+}
+
+bool WebmWriter::reachedEOS() {
+ return !mSinkThread->running();
+}
+} /* namespace android */
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
new file mode 100644
index 0000000..36b6965
--- /dev/null
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WEBMWRITER_H_
+#define WEBMWRITER_H_
+
+#include "WebmConstants.h"
+#include "WebmFrameThread.h"
+#include "LinkedBlockingQueue.h"
+
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaWriter.h>
+
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+
+#include <stdint.h>
+
+using namespace webm;
+
+namespace android {
+
+class WebmWriter : public MediaWriter {
+public:
+ WebmWriter(int fd);
+ WebmWriter(const char *filename);
+ ~WebmWriter() { reset(); }
+
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual status_t start(MetaData *param = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+ virtual bool reachedEOS();
+
+ virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
+ virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+
+private:
+ int mFd;
+ status_t mInitCheck;
+
+ uint64_t mTimeCodeScale;
+ int64_t mStartTimestampUs;
+ int32_t mStartTimeOffsetMs;
+
+ uint64_t mSegmentOffset;
+ uint64_t mSegmentDataStart;
+ uint64_t mInfoOffset;
+ uint64_t mInfoSize;
+ uint64_t mTracksOffset;
+ uint64_t mCuesOffset;
+
+ bool mPaused;
+ bool mStarted;
+ bool mIsFileSizeLimitExplicitlyRequested;
+ bool mIsRealTimeRecording;
+ bool mStreamableFile;
+ uint64_t mEstimatedCuesSize;
+
+ Mutex mLock;
+ List<sp<WebmElement> > mCuePoints;
+
+ enum {
+ kAudioIndex = 0,
+ kVideoIndex = 1,
+ kMaxStreams = 2,
+ };
+
+ struct WebmStream {
+ int mType;
+ const char *mName;
+ sp<WebmElement> (*mMakeTrack)(const sp<MetaData>&);
+
+ sp<MediaSource> mSource;
+ sp<WebmElement> mTrackEntry;
+ sp<WebmFrameSourceThread> mThread;
+ LinkedBlockingQueue<const sp<WebmFrame> > mSink;
+
+ WebmStream()
+ : mType(kInvalidType),
+ mName("Invalid"),
+ mMakeTrack(NULL) {
+ }
+
+ WebmStream(int type, const char *name, sp<WebmElement> (*makeTrack)(const sp<MetaData>&))
+ : mType(type),
+ mName(name),
+ mMakeTrack(makeTrack) {
+ }
+
+ WebmStream &operator=(const WebmStream &other) {
+ mType = other.mType;
+ mName = other.mName;
+ mMakeTrack = other.mMakeTrack;
+ return *this;
+ }
+ };
+ WebmStream mStreams[kMaxStreams];
+
+ sp<WebmFrameSinkThread> mSinkThread;
+
+ size_t numTracks();
+ uint64_t estimateCuesSize(int32_t bitRate);
+ void initStream(size_t idx);
+ void release();
+ status_t reset();
+
+ static sp<WebmElement> videoTrack(const sp<MetaData>& md);
+ static sp<WebmElement> audioTrack(const sp<MetaData>& md);
+
+ DISALLOW_EVIL_CONSTRUCTORS(WebmWriter);
+};
+
+} /* namespace android */
+#endif /* WEBMWRITER_H_ */
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index 1887b8b..e88a3bd 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -685,9 +685,8 @@ status_t RTPSender::onRTCPData(const sp<ABuffer> &buffer) {
return OK;
}
-status_t RTPSender::parseReceiverReport(const uint8_t *data, size_t size) {
- // hexdump(data, size);
-
+status_t RTPSender::parseReceiverReport(
+ const uint8_t *data, size_t /* size */) {
float fractionLost = data[12] / 256.0f;
ALOGI("lost %.2f %% of packets during report interval.",
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 753b3ec..2834a66 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -74,19 +74,6 @@ Converter::Converter(
}
}
-static void ReleaseMediaBufferReference(const sp<ABuffer> &accessUnit) {
- void *mbuf;
- if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
- && mbuf != NULL) {
- ALOGV("releasing mbuf %p", mbuf);
-
- accessUnit->meta()->setPointer("mediaBuffer", NULL);
-
- static_cast<MediaBuffer *>(mbuf)->release();
- mbuf = NULL;
- }
-}
-
void Converter::releaseEncoder() {
if (mEncoder == NULL) {
return;
@@ -95,18 +82,7 @@ void Converter::releaseEncoder() {
mEncoder->release();
mEncoder.clear();
- while (!mInputBufferQueue.empty()) {
- sp<ABuffer> accessUnit = *mInputBufferQueue.begin();
- mInputBufferQueue.erase(mInputBufferQueue.begin());
-
- ReleaseMediaBufferReference(accessUnit);
- }
-
- for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) {
- sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i);
- ReleaseMediaBufferReference(accessUnit);
- }
-
+ mInputBufferQueue.clear();
mEncoderInputBuffers.clear();
mEncoderOutputBuffers.clear();
}
@@ -328,7 +304,7 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
- ReleaseMediaBufferReference(accessUnit);
+ accessUnit->setMediaBufferBase(NULL);
}
break;
}
@@ -351,15 +327,16 @@ void Converter::onMessageReceived(const sp<AMessage> &msg) {
ALOGI("dropping frame.");
}
- ReleaseMediaBufferReference(accessUnit);
+ accessUnit->setMediaBufferBase(NULL);
break;
}
#if 0
- void *mbuf;
- if (accessUnit->meta()->findPointer("mediaBuffer", &mbuf)
- && mbuf != NULL) {
+ MediaBuffer *mbuf =
+ (MediaBuffer *)(accessUnit->getMediaBufferBase());
+ if (mbuf != NULL) {
ALOGI("queueing mbuf %p", mbuf);
+ mbuf->release();
}
#endif
@@ -647,13 +624,13 @@ status_t Converter::feedEncoderInputBuffers() {
buffer->data(),
buffer->size());
- void *mediaBuffer;
- if (buffer->meta()->findPointer("mediaBuffer", &mediaBuffer)
- && mediaBuffer != NULL) {
- mEncoderInputBuffers.itemAt(bufferIndex)->meta()
- ->setPointer("mediaBuffer", mediaBuffer);
+ MediaBuffer *mediaBuffer =
+ (MediaBuffer *)(buffer->getMediaBufferBase());
+ if (mediaBuffer != NULL) {
+ mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase(
+ mediaBuffer);
- buffer->meta()->setPointer("mediaBuffer", NULL);
+ buffer->setMediaBufferBase(NULL);
}
} else {
flags = MediaCodec::BUFFER_FLAG_EOS;
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 7e8891d..86b918f 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -179,7 +179,7 @@ void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
} else {
// video encoder will release MediaBuffer when done
// with underlying data.
- accessUnit->meta()->setPointer("mediaBuffer", mbuf);
+ accessUnit->setMediaBufferBase(mbuf);
}
sp<AMessage> notify = mNotify->dup();
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 286ea13..2cb4786 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -29,6 +29,7 @@
#include <binder/IServiceManager.h>
#include <cutils/properties.h>
#include <media/IHDCP.h>
+#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -749,7 +750,8 @@ status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
mExtractor = new NuMediaExtractor;
- status_t err = mExtractor->setDataSource(mMediaPath.c_str());
+ status_t err = mExtractor->setDataSource(
+ NULL /* httpService */, mMediaPath.c_str());
if (err != OK) {
return err;
@@ -1053,7 +1055,7 @@ status_t WifiDisplaySource::PlaybackSession::addVideoSource(
err = source->setMaxAcquiredBufferCount(numInputBuffers);
CHECK_EQ(err, (status_t)OK);
- mBufferQueue = source->getBufferQueue();
+ mProducer = source->getProducer();
return OK;
}
@@ -1077,7 +1079,7 @@ status_t WifiDisplaySource::PlaybackSession::addAudioSource(bool usePCMAudio) {
}
sp<IGraphicBufferProducer> WifiDisplaySource::PlaybackSession::getSurfaceTexture() {
- return mBufferQueue;
+ return mProducer;
}
void WifiDisplaySource::PlaybackSession::requestIDRFrame() {
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 5c8ee94..2824143 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -25,7 +25,6 @@
namespace android {
struct ABuffer;
-struct BufferQueue;
struct IHDCP;
struct IGraphicBufferProducer;
struct MediaPuller;
@@ -111,7 +110,7 @@ private:
int64_t mLastLifesignUs;
- sp<BufferQueue> mBufferQueue;
+ sp<IGraphicBufferProducer> mProducer;
KeyedVector<size_t, sp<Track> > mTracks;
ssize_t mVideoTrackIndex;
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
index cc8dee3..59d7e6e 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
@@ -79,6 +79,8 @@ status_t RepeaterSource::stop() {
ALOGV("stopping");
+ status_t err = mSource->stop();
+
if (mLooper != NULL) {
mLooper->stop();
mLooper.clear();
@@ -92,7 +94,6 @@ status_t RepeaterSource::stop() {
mBuffer = NULL;
}
- status_t err = mSource->stop();
ALOGV("stopped");
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
index eeb3700..50d317a 100644
--- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp
+++ b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
@@ -565,7 +565,7 @@ status_t TSPacketizer::packetize(
}
}
- // size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload;
+ size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload;
ALOGV("packet 1 contains %zd padding bytes and %zd bytes of payload",
numPaddingBytes, numBytesOfPayload);
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 05e4018..da405e2 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -746,7 +746,7 @@ status_t WifiDisplaySource::sendM16(int32_t sessionID) {
}
status_t WifiDisplaySource::onReceiveM1Response(
- int32_t sessionID, const sp<ParsedMessage> &msg) {
+ int32_t /* sessionID */, const sp<ParsedMessage> &msg) {
int32_t statusCode;
if (!msg->getStatusCode(&statusCode)) {
return ERROR_MALFORMED;
@@ -991,7 +991,7 @@ status_t WifiDisplaySource::onReceiveM4Response(
}
status_t WifiDisplaySource::onReceiveM5Response(
- int32_t sessionID, const sp<ParsedMessage> &msg) {
+ int32_t /* sessionID */, const sp<ParsedMessage> &msg) {
int32_t statusCode;
if (!msg->getStatusCode(&statusCode)) {
return ERROR_MALFORMED;
@@ -1005,7 +1005,7 @@ status_t WifiDisplaySource::onReceiveM5Response(
}
status_t WifiDisplaySource::onReceiveM16Response(
- int32_t sessionID, const sp<ParsedMessage> &msg) {
+ int32_t sessionID, const sp<ParsedMessage> & /* msg */) {
// If only the response was required to include a "Session:" header...
CHECK_EQ(sessionID, mClientSessionID);
@@ -1680,7 +1680,7 @@ WifiDisplaySource::HDCPObserver::HDCPObserver(
}
void WifiDisplaySource::HDCPObserver::notify(
- int msg, int ext1, int ext2, const Parcel *obj) {
+ int msg, int ext1, int ext2, const Parcel * /* obj */) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("msg", msg);
notify->setInt32("ext1", ext1);
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
index b3f7b1b..bb86dfc 100644
--- a/media/libstagefright/yuv/Android.mk
+++ b/media/libstagefright/yuv/Android.mk
@@ -12,5 +12,7 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE:= libstagefright_yuv
+LOCAL_CFLAGS += -Werror
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
index 7b9000b..bb3e2fd 100644
--- a/media/libstagefright/yuv/YUVImage.cpp
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -226,8 +226,8 @@ void YUVImage::fastCopyRectangle420Planar(
&ySrcOffsetIncrement, &uSrcOffsetIncrement, &vSrcOffsetIncrement);
int32_t yDestOffsetIncrement;
- int32_t uDestOffsetIncrement;
- int32_t vDestOffsetIncrement;
+ int32_t uDestOffsetIncrement = 0;
+ int32_t vDestOffsetIncrement = 0;
destImage.getOffsetIncrementsPerDataRow(
&yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
@@ -309,7 +309,7 @@ void YUVImage::fastCopyRectangle420SemiPlanar(
int32_t yDestOffsetIncrement;
int32_t uDestOffsetIncrement;
- int32_t vDestOffsetIncrement;
+ int32_t vDestOffsetIncrement = 0;
destImage.getOffsetIncrementsPerDataRow(
&yDestOffsetIncrement, &uDestOffsetIncrement, &vDestOffsetIncrement);
@@ -393,9 +393,9 @@ bool YUVImage::writeToPPM(const char *filename) const {
fprintf(fp, "255\n");
for (int32_t y = 0; y < mHeight; ++y) {
for (int32_t x = 0; x < mWidth; ++x) {
- uint8_t yValue;
- uint8_t uValue;
- uint8_t vValue;
+ uint8_t yValue = 0u;
+ uint8_t uValue = 0u;
+ uint8_t vValue = 0u;
getPixelValue(x, y, &yValue, &uValue, & vValue);
uint8_t rValue;