summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xlibvideoeditor/Android.mk1
-rwxr-xr-xlibvideoeditor/lvpp/Android.mk104
-rwxr-xr-xlibvideoeditor/lvpp/DummyAudioSource.cpp163
-rwxr-xr-xlibvideoeditor/lvpp/DummyAudioSource.h73
-rwxr-xr-xlibvideoeditor/lvpp/DummyVideoSource.cpp171
-rwxr-xr-xlibvideoeditor/lvpp/DummyVideoSource.h74
-rwxr-xr-xlibvideoeditor/lvpp/I420ColorConverter.cpp55
-rwxr-xr-xlibvideoeditor/lvpp/I420ColorConverter.h35
-rwxr-xr-xlibvideoeditor/lvpp/NativeWindowRenderer.cpp623
-rwxr-xr-xlibvideoeditor/lvpp/NativeWindowRenderer.h182
-rwxr-xr-xlibvideoeditor/lvpp/PreviewPlayer.cpp2082
-rwxr-xr-xlibvideoeditor/lvpp/PreviewPlayer.h298
-rwxr-xr-xlibvideoeditor/lvpp/PreviewRenderer.cpp141
-rwxr-xr-xlibvideoeditor/lvpp/PreviewRenderer.h67
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorAudioPlayer.cpp896
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorAudioPlayer.h141
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp293
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorBGAudioProcessing.h110
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorPlayer.cpp577
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorPlayer.h160
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorPreviewController.cpp1467
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorPreviewController.h157
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorSRC.cpp335
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorSRC.h87
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorTools.cpp3883
-rwxr-xr-xlibvideoeditor/lvpp/VideoEditorTools.h153
-rwxr-xr-xlibvideoeditor/osal/Android.mk1
-rwxr-xr-xlibvideoeditor/osal/inc/LVOSA_FileReader_optim.h85
-rwxr-xr-xlibvideoeditor/osal/inc/LV_Macros.h107
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_CharStar.h78
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Clock.h48
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_CoreID.h199
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Debug.h266
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Error.h174
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Export.h61
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileCommon.h118
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileCommon_priv.h106
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileReader.h131
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileReader_priv.h38
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileWriter.h135
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_FileWriter_priv.h37
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Memory.h57
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Mutex.h56
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_OptionID.h72
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Semaphore.h54
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Thread.h127
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Thread_priv.h63
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Time.h48
-rwxr-xr-xlibvideoeditor/osal/inc/M4OSA_Types.h104
-rwxr-xr-xlibvideoeditor/osal/src/Android.mk67
-rwxr-xr-xlibvideoeditor/osal/src/LVOSA_FileReader_optim.c1052
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_CharStar.c506
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_Clock.c101
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_FileCommon.c667
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_FileReader.c549
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_FileWriter.c574
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_Mutex.c275
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_Random.c90
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_Semaphore.c263
-rwxr-xr-xlibvideoeditor/osal/src/M4OSA_Thread.c797
-rwxr-xr-xlibvideoeditor/osal/src/M4PSW_DebugTrace.c84
-rwxr-xr-xlibvideoeditor/osal/src/M4PSW_MemoryInterface.c71
-rwxr-xr-xlibvideoeditor/osal/src/M4PSW_Trace.c98
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/Android.mk1
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h329
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h112
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h245
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/src/Android.mk54
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c914
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c312
-rwxr-xr-xlibvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c5370
-rwxr-xr-xlibvideoeditor/vss/Android.mk1
-rwxr-xr-xlibvideoeditor/vss/common/inc/From2iToMono_16.h30
-rwxr-xr-xlibvideoeditor/vss/common/inc/LVM_Types.h180
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4AD_Common.h302
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4AD_Null.h54
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4AIR_API.h184
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4AMRR_CoreReader.h251
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4Common_types.h235
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4DA_Types.h203
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4DECODER_Common.h389
-rw-r--r--libvideoeditor/vss/common/inc/M4DECODER_Null.h57
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h254
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4ENCODER_common.h471
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4OSA_CoreID.h200
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4PCMR_CoreReader.h132
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4READER_3gpCom.h193
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4READER_Amr.h61
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4READER_Common.h717
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4READER_Pcm.h59
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4SYS_AccessUnit.h90
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4SYS_Stream.h185
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h48
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h50
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VD_Tools.h47
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VFL_transition.h102
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VIFI_Clip.h35
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VIFI_Defines.h154
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h785
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4VPP_API.h151
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4WRITER_common.h261
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4_BitStreamParser.h134
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4_Common.h163
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4_Utils.h49
-rwxr-xr-xlibvideoeditor/vss/common/inc/M4_VideoEditingCommon.h336
-rwxr-xr-xlibvideoeditor/vss/common/inc/MonoTo2I_16.h30
-rwxr-xr-xlibvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h26
-rwxr-xr-xlibvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h132
-rwxr-xr-xlibvideoeditor/vss/common/inc/SSRC.h296
-rwxr-xr-xlibvideoeditor/vss/common/inc/VideoEditorResampler.h45
-rwxr-xr-xlibvideoeditor/vss/common/inc/marker.h78
-rwxr-xr-xlibvideoeditor/vss/inc/M4EXIFC_CommonAPI.h118
-rwxr-xr-xlibvideoeditor/vss/inc/M4PTO3GPP_API.h256
-rwxr-xr-xlibvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h104
-rwxr-xr-xlibvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h223
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_API.h819
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h278
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h302
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h156
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h651
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h781
-rwxr-xr-xlibvideoeditor/vss/inc/M4xVSS_API.h590
-rwxr-xr-xlibvideoeditor/vss/inc/M4xVSS_Internal.h587
-rwxr-xr-xlibvideoeditor/vss/mcs/Android.mk1
-rwxr-xr-xlibvideoeditor/vss/mcs/inc/M4MCS_API.h575
-rwxr-xr-xlibvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h123
-rwxr-xr-xlibvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h75
-rwxr-xr-xlibvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h344
-rwxr-xr-xlibvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h606
-rwxr-xr-xlibvideoeditor/vss/mcs/src/Android.mk58
-rwxr-xr-xlibvideoeditor/vss/mcs/src/M4MCS_API.c10949
-rwxr-xr-xlibvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c183
-rwxr-xr-xlibvideoeditor/vss/mcs/src/M4MCS_Codecs.c917
-rwxr-xr-xlibvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c467
-rwxr-xr-xlibvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c455
-rwxr-xr-xlibvideoeditor/vss/src/Android.mk88
-rwxr-xr-xlibvideoeditor/vss/src/M4AD_Null.c256
-rwxr-xr-xlibvideoeditor/vss/src/M4AIR_API.c968
-rwxr-xr-xlibvideoeditor/vss/src/M4AMRR_CoreReader.c909
-rwxr-xr-xlibvideoeditor/vss/src/M4ChannelConverter.c54
-rwxr-xr-xlibvideoeditor/vss/src/M4DECODER_Null.c436
-rwxr-xr-xlibvideoeditor/vss/src/M4PCMR_CoreReader.c716
-rwxr-xr-xlibvideoeditor/vss/src/M4PTO3GPP_API.c1928
-rwxr-xr-xlibvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c139
-rwxr-xr-xlibvideoeditor/vss/src/M4READER_Amr.c790
-rwxr-xr-xlibvideoeditor/vss/src/M4READER_Pcm.c720
-rwxr-xr-xlibvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c698
-rwxr-xr-xlibvideoeditor/vss/src/M4VD_Tools.c99
-rwxr-xr-xlibvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c219
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c4139
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_Clip.c2100
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c1032
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_Codecs.c1037
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_Edit.c3475
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_EditAudio.c2013
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_EditVideo.c3922
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c469
-rwxr-xr-xlibvideoeditor/vss/src/M4xVSS_API.c6367
-rwxr-xr-xlibvideoeditor/vss/src/M4xVSS_internal.c4889
-rwxr-xr-xlibvideoeditor/vss/src/VideoEditorResampler.cpp172
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/Android.mk1
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h33
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h39
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h41
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h132
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h77
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h32
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h102
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h46
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h120
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h36
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/Android.mk72
-rw-r--r--libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp179
-rw-r--r--libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h90
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp2008
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp991
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp755
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c265
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp803
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp433
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp1764
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp1304
-rwxr-xr-xlibvideoeditor/vss/video_filters/Android.mk5
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/Android.mk57
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VFL_transition.c510
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c197
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_Clip.c280
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c201
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c153
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c255
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c278
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c422
-rwxr-xr-xlibvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c423
193 files changed, 103551 insertions, 0 deletions
diff --git a/libvideoeditor/Android.mk b/libvideoeditor/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
new file mode 100755
index 0000000..4c354ec
--- /dev/null
+++ b/libvideoeditor/lvpp/Android.mk
@@ -0,0 +1,104 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideoeditorplayer
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditorplayer
+
+LOCAL_SRC_FILES:= \
+ VideoEditorTools.cpp \
+ VideoEditorPlayer.cpp \
+ PreviewPlayer.cpp \
+ VideoEditorAudioPlayer.cpp \
+ VideoEditorPreviewController.cpp \
+ VideoEditorSRC.cpp \
+ DummyAudioSource.cpp \
+ DummyVideoSource.cpp \
+ VideoEditorBGAudioProcessing.cpp \
+ PreviewRenderer.cpp \
+ I420ColorConverter.cpp \
+ NativeWindowRenderer.cpp
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal \
+ libstagefright_color_conversion
+
+
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudioutils \
+ libbinder \
+ libutils \
+ libcutils \
+ libmedia \
+ libmedia_native \
+ libdrmframework \
+ libstagefright \
+ libstagefright_omx \
+ libstagefright_foundation \
+ libgui \
+ libaudioflinger \
+ libui \
+ libEGL \
+ libGLESv2
+
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/system/media/audio_utils/include \
+ $(TOP)/frameworks/base/media/libmediaplayerservice \
+ $(TOP)/frameworks/base/media/libstagefright \
+ $(TOP)/frameworks/base/media/libstagefright/include \
+ $(TOP)/frameworks/base/media/libstagefright/rtsp \
+ $(call include-path-for, corecg graphics) \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
+ $(TOP)/frameworks/av/libvideoeditor/lvpp \
+ $(TOP)/frameworks/base/services/audioflinger \
+ $(TOP)/frameworks/native/include/media/editor \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/services/audioflinger
+
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+ -DM4_ENABLE_RENDERINGMODE \
+ -DUSE_STAGEFRIGHT_CODECS \
+ -DUSE_STAGEFRIGHT_AUDIODEC \
+ -DUSE_STAGEFRIGHT_VIDEODEC \
+ -DUSE_STAGEFRIGHT_AUDIOENC \
+ -DUSE_STAGEFRIGHT_VIDEOENC \
+ -DUSE_STAGEFRIGHT_READERS \
+ -DUSE_STAGEFRIGHT_3GPP_READER
+
+include $(BUILD_SHARED_LIBRARY)
+
+#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/libvideoeditor/lvpp/DummyAudioSource.cpp b/libvideoeditor/lvpp/DummyAudioSource.cpp
new file mode 100755
index 0000000..dbcab68
--- /dev/null
+++ b/libvideoeditor/lvpp/DummyAudioSource.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "DummyAudioSource"
+#include <utils/Log.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MetaData.h>
+#include "DummyAudioSource.h"
+
+
+namespace android {
+
+//static
+sp<DummyAudioSource> DummyAudioSource::Create(
+ int32_t samplingRate, int32_t channelCount,
+ int64_t frameDurationUs, int64_t audioDurationUs) {
+
+ ALOGV("Create ");
+ return new DummyAudioSource(samplingRate,
+ channelCount,
+ frameDurationUs,
+ audioDurationUs);
+
+}
+
+DummyAudioSource::DummyAudioSource(
+ int32_t samplingRate, int32_t channelCount,
+ int64_t frameDurationUs, int64_t audioDurationUs)
+ : mSamplingRate(samplingRate),
+ mChannelCount(channelCount),
+ mFrameDurationUs(frameDurationUs),
+ mNumberOfSamplePerFrame(0),
+ mAudioDurationUs(audioDurationUs),
+ mTimeStampUs(0),
+ mBufferGroup(NULL) {
+
+ mNumberOfSamplePerFrame = (int32_t)
+ ((1L * mSamplingRate * mFrameDurationUs)/1000000);
+ mNumberOfSamplePerFrame = mNumberOfSamplePerFrame * mChannelCount;
+
+ ALOGV("Constructor: E");
+ ALOGV("samplingRate = %d", samplingRate);
+ ALOGV("channelCount = %d", channelCount);
+ ALOGV("frameDurationUs = %lld", frameDurationUs);
+ ALOGV("audioDurationUs = %lld", audioDurationUs);
+ ALOGV("mNumberOfSamplePerFrame = %d", mNumberOfSamplePerFrame);
+ ALOGV("Constructor: X");
+}
+
+DummyAudioSource::~DummyAudioSource() {
+ /* Do nothing here? */
+ ALOGV("~DummyAudioSource");
+}
+
+void DummyAudioSource::setDuration(int64_t audioDurationUs) {
+ ALOGV("setDuration: %lld us added to %lld us",
+ audioDurationUs, mAudioDurationUs);
+
+ Mutex::Autolock autoLock(mLock);
+ mAudioDurationUs += audioDurationUs;
+}
+
+status_t DummyAudioSource::start(MetaData *params) {
+ ALOGV("start: E");
+ status_t err = OK;
+
+ mTimeStampUs = 0;
+
+ mBufferGroup = new MediaBufferGroup;
+ mBufferGroup->add_buffer(
+ new MediaBuffer(mNumberOfSamplePerFrame * sizeof(int16_t)));
+
+ ALOGV("start: X");
+
+ return err;
+}
+
+status_t DummyAudioSource::stop() {
+ ALOGV("stop");
+
+ delete mBufferGroup;
+ mBufferGroup = NULL;
+
+ return OK;
+}
+
+
+sp<MetaData> DummyAudioSource::getFormat() {
+ ALOGV("getFormat");
+
+ sp<MetaData> meta = new MetaData;
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ meta->setInt32(kKeyChannelCount, mChannelCount);
+ meta->setInt32(kKeySampleRate, mSamplingRate);
+ meta->setInt64(kKeyDuration, mFrameDurationUs);
+ meta->setCString(kKeyDecoderComponent, "DummyAudioSource");
+
+ return meta;
+}
+
+status_t DummyAudioSource::read(
+ MediaBuffer **out, const MediaSource::ReadOptions *options) {
+
+ ALOGV("read: E");
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ CHECK(seekTimeUs >= 0);
+ mTimeStampUs = seekTimeUs;
+ }
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mTimeStampUs >= mAudioDurationUs) {
+ ALOGI("read: EOS reached %lld > %lld",
+ mTimeStampUs, mAudioDurationUs);
+
+ *out = NULL;
+ return ERROR_END_OF_STREAM;
+ }
+ }
+
+ MediaBuffer *buffer;
+ status_t err = mBufferGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ ALOGE("Failed to acquire buffer from mBufferGroup: %d", err);
+ return err;
+ }
+
+ memset((uint8_t *) buffer->data() + buffer->range_offset(),
+ 0, mNumberOfSamplePerFrame << 1);
+ buffer->set_range(buffer->range_offset(), (mNumberOfSamplePerFrame << 1));
+ buffer->meta_data()->setInt64(kKeyTime, mTimeStampUs);
+
+ ALOGV("read: offset = %d, size = %d, mTimeStampUs = %lld",
+ buffer->range_offset(), buffer->size(), mTimeStampUs);
+
+ mTimeStampUs = mTimeStampUs + mFrameDurationUs;
+ *out = buffer;
+
+ return OK;
+}
+
+}// namespace android
diff --git a/libvideoeditor/lvpp/DummyAudioSource.h b/libvideoeditor/lvpp/DummyAudioSource.h
new file mode 100755
index 0000000..5f25a8c
--- /dev/null
+++ b/libvideoeditor/lvpp/DummyAudioSource.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DUMMY_AUDIOSOURCE_H_
+#define DUMMY_AUDIOSOURCE_H_
+
+#include <media/stagefright/MediaSource.h>
+
+
+namespace android {
+
+class MetaData;
+struct MediaBufferGroup;
+
+struct DummyAudioSource : public MediaSource {
+
+public:
+ static sp<DummyAudioSource> Create(
+ int32_t samplingRate, int32_t channelCount,
+ int64_t frameDurationUs, int64_t audioDurationUs);
+
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer,
+ const MediaSource::ReadOptions *options = NULL);
+
+ void setDuration(int64_t audioDurationUs);
+
+protected:
+ virtual ~DummyAudioSource();
+
+private:
+ int32_t mSamplingRate;
+ int32_t mChannelCount;
+ int64_t mFrameDurationUs;
+ int32_t mNumberOfSamplePerFrame;
+ int64_t mAudioDurationUs;
+ int64_t mTimeStampUs;
+ Mutex mLock;
+
+ MediaBufferGroup *mBufferGroup;
+
+ DummyAudioSource(
+ int32_t samplingRate, int32_t channelCount,
+ int64_t frameDurationUs, int64_t audioDurationUs);
+
+ // Don't call me
+ DummyAudioSource(const DummyAudioSource &);
+ DummyAudioSource &operator=(const DummyAudioSource &);
+
+};
+
+}//namespace android
+
+
+#endif //DUMMY_AUDIOSOURCE_H_
+
diff --git a/libvideoeditor/lvpp/DummyVideoSource.cpp b/libvideoeditor/lvpp/DummyVideoSource.cpp
new file mode 100755
index 0000000..b06f937
--- /dev/null
+++ b/libvideoeditor/lvpp/DummyVideoSource.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DummyVideoSource"
+#include <stdlib.h>
+#include <utils/Log.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MetaData.h>
+#include "VideoEditorTools.h"
+#include "DummyVideoSource.h"
+
+
+namespace android {
+
+sp<DummyVideoSource> DummyVideoSource::Create(
+ uint32_t width, uint32_t height,
+ uint64_t clipDuration, const char *imageUri) {
+
+ ALOGV("Create");
+ return new DummyVideoSource(
+ width, height, clipDuration, imageUri);
+
+}
+
+
+DummyVideoSource::DummyVideoSource(
+ uint32_t width, uint32_t height,
+ uint64_t clipDuration, const char *imageUri) {
+
+ ALOGV("Constructor: E");
+
+ mFrameWidth = width;
+ mFrameHeight = height;
+ mImageClipDuration = clipDuration;
+ mUri = imageUri;
+ mImageBuffer = NULL;
+
+ ALOGV("%s", mUri);
+ ALOGV("Constructor: X");
+}
+
+
+DummyVideoSource::~DummyVideoSource() {
+ /* Do nothing here? */
+ ALOGV("~DummyVideoSource");
+}
+
+
+
+status_t DummyVideoSource::start(MetaData *params) {
+ ALOGV("start: E");
+
+ // Get the frame buffer from the rgb file, mUri,
+ // and store its content into a MediaBuffer
+ status_t err = LvGetImageThumbNail(
+ (const char *)mUri,
+ mFrameHeight, mFrameWidth,
+ (M4OSA_Void **) &mImageBuffer);
+ if (err != OK) {
+ ALOGE("LvGetImageThumbNail failed: %d", err);
+ return err;
+ }
+
+ mIsFirstImageFrame = true;
+ mImageSeekTime = 0;
+ mImagePlayStartTime = 0;
+ mFrameTimeUs = 0;
+
+ ALOGV("start: X");
+ return OK;
+}
+
+
+status_t DummyVideoSource::stop() {
+ ALOGV("stop");
+ status_t err = OK;
+
+ if (mImageBuffer != NULL) {
+ free(mImageBuffer);
+ mImageBuffer = NULL;
+ }
+
+ return err;
+}
+
+
+sp<MetaData> DummyVideoSource::getFormat() {
+ ALOGV("getFormat");
+
+ sp<MetaData> meta = new MetaData;
+ meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar);
+ meta->setInt32(kKeyWidth, mFrameWidth);
+ meta->setInt32(kKeyHeight, mFrameHeight);
+ meta->setInt64(kKeyDuration, mImageClipDuration);
+ meta->setCString(kKeyDecoderComponent, "DummyVideoSource");
+
+ return meta;
+}
+
+status_t DummyVideoSource::read(
+ MediaBuffer **out,
+ const MediaSource::ReadOptions *options) {
+
+ ALOGV("read: E");
+
+ const int32_t kTimeScale = 1000; /* time scale in ms */
+ bool seeking = false;
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode seekMode;
+ if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+ seeking = true;
+ mImageSeekTime = seekTimeUs;
+ M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
+ }
+
+ if ((mImageSeekTime == mImageClipDuration) ||
+ (mFrameTimeUs == (int64_t)mImageClipDuration)) {
+ ALOGV("read: EOS reached");
+ *out = NULL;
+ return ERROR_END_OF_STREAM;
+ }
+
+ status_t err = OK;
+ MediaBuffer *buffer = new MediaBuffer(
+ mImageBuffer, (mFrameWidth * mFrameHeight * 1.5));
+
+ // Set timestamp of buffer
+ if (mIsFirstImageFrame) {
+ M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
+ mFrameTimeUs = (mImageSeekTime + 1);
+ ALOGV("read: jpg 1st frame timeUs = %lld, begin cut time = %ld",
+ mFrameTimeUs, mImageSeekTime);
+
+ mIsFirstImageFrame = false;
+ } else {
+ M4OSA_Time currentTimeMs;
+ M4OSA_clockGetTime(&currentTimeMs, kTimeScale);
+
+ mFrameTimeUs = mImageSeekTime +
+ (currentTimeMs - mImagePlayStartTime) * 1000LL;
+
+ ALOGV("read: jpg frame timeUs = %lld", mFrameTimeUs);
+ }
+
+ buffer->meta_data()->setInt64(kKeyTime, mFrameTimeUs);
+ buffer->set_range(buffer->range_offset(),
+ mFrameWidth * mFrameHeight * 1.5);
+
+ *out = buffer;
+ return err;
+}
+
+}// namespace android
diff --git a/libvideoeditor/lvpp/DummyVideoSource.h b/libvideoeditor/lvpp/DummyVideoSource.h
new file mode 100755
index 0000000..16514f2
--- /dev/null
+++ b/libvideoeditor/lvpp/DummyVideoSource.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DUMMY_VIDEOSOURCE_H_
+#define DUMMY_VIDEOSOURCE_H_
+
+#include <media/stagefright/MediaSource.h>
+#include "M4OSA_Clock.h"
+#include "M4OSA_Time.h"
+#include "M4OSA_Types.h"
+
+namespace android {
+
+class MediaBuffer;
+class MetaData;
+
+struct DummyVideoSource : public MediaSource {
+
+public:
+ static sp<DummyVideoSource> Create(
+ uint32_t width, uint32_t height,
+ uint64_t clipDuration, const char *imageUri);
+
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer,
+ const MediaSource::ReadOptions *options = NULL);
+
+protected:
+ virtual ~DummyVideoSource();
+
+private:
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+ uint64_t mImageClipDuration;
+ const char *mUri;
+ int64_t mFrameTimeUs;
+ bool mIsFirstImageFrame;
+ void *mImageBuffer;
+ M4OSA_Time mImagePlayStartTime;
+ uint32_t mImageSeekTime;
+
+ DummyVideoSource(
+ uint32_t width, uint32_t height,
+ uint64_t clipDuration, const char *imageUri);
+
+ // Don't call me
+ DummyVideoSource(const DummyVideoSource &);
+ DummyVideoSource &operator=(const DummyVideoSource &);
+
+};
+
+
+}//namespace android
+
+
+#endif //DUMMY_VIDEOSOURCE_H_
+
diff --git a/libvideoeditor/lvpp/I420ColorConverter.cpp b/libvideoeditor/lvpp/I420ColorConverter.cpp
new file mode 100755
index 0000000..321d3fe
--- /dev/null
+++ b/libvideoeditor/lvpp/I420ColorConverter.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <I420ColorConverter.h>
+#include <cutils/log.h>
+#include <dlfcn.h>
+
+I420ColorConverter::I420ColorConverter() {
+ // Open the shared library
+ mHandle = dlopen("libI420colorconvert.so", RTLD_NOW);
+
+ if (mHandle == NULL) {
+ ALOGW("I420ColorConverter: cannot load libI420colorconvert.so");
+ return;
+ }
+
+ // Find the entry point
+ void (*getI420ColorConverter)(I420ColorConverter *converter) =
+ (void (*)(I420ColorConverter*)) dlsym(mHandle, "getI420ColorConverter");
+
+ if (getI420ColorConverter == NULL) {
+ ALOGW("I420ColorConverter: cannot load getI420ColorConverter");
+ dlclose(mHandle);
+ mHandle = NULL;
+ return;
+ }
+
+ // Fill the function pointers.
+ getI420ColorConverter(this);
+
+ ALOGI("I420ColorConverter: libI420colorconvert.so loaded");
+}
+
+bool I420ColorConverter::isLoaded() {
+ return mHandle != NULL;
+}
+
+I420ColorConverter::~I420ColorConverter() {
+ if (mHandle) {
+ dlclose(mHandle);
+ }
+}
diff --git a/libvideoeditor/lvpp/I420ColorConverter.h b/libvideoeditor/lvpp/I420ColorConverter.h
new file mode 100755
index 0000000..8d48e44
--- /dev/null
+++ b/libvideoeditor/lvpp/I420ColorConverter.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef I420_COLOR_CONVERTER_H
+#define I420_COLOR_CONVERTER_H
+
+#include <II420ColorConverter.h>
+
+// This is a wrapper around the I420 color converter functions in
+// II420ColorConverter, which is loaded from a shared library.
+class I420ColorConverter: public II420ColorConverter {
+public:
+ I420ColorConverter();
+ ~I420ColorConverter();
+
+ // Returns true if the converter functions are successfully loaded.
+ bool isLoaded();
+private:
+ void* mHandle;
+};
+
+#endif /* I420_COLOR_CONVERTER_H */
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.cpp b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
new file mode 100755
index 0000000..0f3ea3c
--- /dev/null
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.cpp
@@ -0,0 +1,623 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "NativeWindowRenderer"
+#include "NativeWindowRenderer.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <cutils/log.h>
+#include <gui/SurfaceTexture.h>
+#include <gui/SurfaceTextureClient.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include "VideoEditorTools.h"
+
+#define CHECK_EGL_ERROR CHECK(EGL_SUCCESS == eglGetError())
+#define CHECK_GL_ERROR CHECK(GLenum(GL_NO_ERROR) == glGetError())
+
+//
+// Vertex and fragment programs
+//
+
+// The matrix is derived from
+// frameworks/base/media/libstagefright/colorconversion/ColorConverter.cpp
+//
+// R * 255 = 1.164 * (Y - 16) + 1.596 * (V - 128)
+// G * 255 = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128)
+// B * 255 = 1.164 * (Y - 16) + 2.018 * (U - 128)
+//
+// Here we assume YUV are in the range of [0,255], RGB are in the range of
+// [0, 1]
+#define RGB2YUV_MATRIX \
+"const mat4 rgb2yuv = mat4("\
+" 65.52255, -37.79398, 111.98732, 0.00000,"\
+" 128.62729, -74.19334, -93.81088, 0.00000,"\
+" 24.92233, 111.98732, -18.17644, 0.00000,"\
+" 16.00000, 128.00000, 128.00000, 1.00000);\n"
+
+#define YUV2RGB_MATRIX \
+"const mat4 yuv2rgb = mat4("\
+" 0.00456, 0.00456, 0.00456, 0.00000,"\
+" 0.00000, -0.00153, 0.00791, 0.00000,"\
+" 0.00626, -0.00319, 0.00000, 0.00000,"\
+" -0.87416, 0.53133, -1.08599, 1.00000);\n"
+
+static const char vSrcNormal[] =
+ "attribute vec4 vPosition;\n"
+ "attribute vec2 vTexPos;\n"
+ "uniform mat4 texMatrix;\n"
+ "varying vec2 texCoords;\n"
+ "varying float topDown;\n"
+ "void main() {\n"
+ " gl_Position = vPosition;\n"
+ " texCoords = (texMatrix * vec4(vTexPos, 0.0, 1.0)).xy;\n"
+ " topDown = vTexPos.y;\n"
+ "}\n";
+
+static const char fSrcNormal[] =
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES texSampler;\n"
+ "varying vec2 texCoords;\n"
+ "void main() {\n"
+ " gl_FragColor = texture2D(texSampler, texCoords);\n"
+ "}\n";
+
+static const char fSrcSepia[] =
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES texSampler;\n"
+ "varying vec2 texCoords;\n"
+ RGB2YUV_MATRIX
+ YUV2RGB_MATRIX
+ "void main() {\n"
+ " vec4 rgb = texture2D(texSampler, texCoords);\n"
+ " vec4 yuv = rgb2yuv * rgb;\n"
+ " yuv = vec4(yuv.x, 117.0, 139.0, 1.0);\n"
+ " gl_FragColor = yuv2rgb * yuv;\n"
+ "}\n";
+
+static const char fSrcNegative[] =
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES texSampler;\n"
+ "varying vec2 texCoords;\n"
+ RGB2YUV_MATRIX
+ YUV2RGB_MATRIX
+ "void main() {\n"
+ " vec4 rgb = texture2D(texSampler, texCoords);\n"
+ " vec4 yuv = rgb2yuv * rgb;\n"
+ " yuv = vec4(255.0 - yuv.x, yuv.y, yuv.z, 1.0);\n"
+ " gl_FragColor = yuv2rgb * yuv;\n"
+ "}\n";
+
+static const char fSrcGradient[] =
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES texSampler;\n"
+ "varying vec2 texCoords;\n"
+ "varying float topDown;\n"
+ RGB2YUV_MATRIX
+ YUV2RGB_MATRIX
+ "void main() {\n"
+ " vec4 rgb = texture2D(texSampler, texCoords);\n"
+ " vec4 yuv = rgb2yuv * rgb;\n"
+ " vec4 mixin = vec4(15.0/31.0, 59.0/63.0, 31.0/31.0, 1.0);\n"
+ " vec4 yuv2 = rgb2yuv * vec4((mixin.xyz * topDown), 1);\n"
+ " yuv = vec4(yuv.x, yuv2.y, yuv2.z, 1);\n"
+ " gl_FragColor = yuv2rgb * yuv;\n"
+ "}\n";
+
+namespace android {
+
+NativeWindowRenderer::NativeWindowRenderer(sp<ANativeWindow> nativeWindow,
+ int width, int height)
+ : mNativeWindow(nativeWindow)
+ , mDstWidth(width)
+ , mDstHeight(height)
+ , mLastVideoEffect(-1)
+ , mNextTextureId(100)
+ , mActiveInputs(0)
+ , mThreadCmd(CMD_IDLE) {
+ createThread(threadStart, this);
+}
+
+// The functions below run in the GL thread.
+//
+// All GL-related work is done in this thread, and other threads send
+// requests to this thread using a command code. We expect most of the
+// time there will only be one thread sending in requests, so we let
+// other threads wait until the request is finished by GL thread.
+
+int NativeWindowRenderer::threadStart(void* self) {
+ ALOGD("create thread");
+ ((NativeWindowRenderer*)self)->glThread();
+ return 0;
+}
+
+void NativeWindowRenderer::glThread() {
+ initializeEGL();
+ createPrograms();
+
+ Mutex::Autolock autoLock(mLock);
+ bool quit = false;
+ while (!quit) {
+ switch (mThreadCmd) {
+ case CMD_IDLE:
+ mCond.wait(mLock);
+ continue;
+ case CMD_RENDER_INPUT:
+ render(mThreadRenderInput);
+ break;
+ case CMD_RESERVE_TEXTURE:
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, mThreadTextureId);
+ CHECK_GL_ERROR;
+ break;
+ case CMD_DELETE_TEXTURE:
+ glDeleteTextures(1, &mThreadTextureId);
+ break;
+ case CMD_QUIT:
+ terminateEGL();
+ quit = true;
+ break;
+ }
+ // Tell the requester that the command is finished.
+ mThreadCmd = CMD_IDLE;
+ mCond.broadcast();
+ }
+ ALOGD("quit");
+}
+
+void NativeWindowRenderer::initializeEGL() {
+ mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ CHECK_EGL_ERROR;
+
+ EGLint majorVersion;
+ EGLint minorVersion;
+ eglInitialize(mEglDisplay, &majorVersion, &minorVersion);
+ CHECK_EGL_ERROR;
+
+ EGLConfig config;
+ EGLint numConfigs = -1;
+ EGLint configAttribs[] = {
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_RED_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_BLUE_SIZE, 8,
+ EGL_NONE
+ };
+ eglChooseConfig(mEglDisplay, configAttribs, &config, 1, &numConfigs);
+ CHECK_EGL_ERROR;
+
+ mEglSurface = eglCreateWindowSurface(mEglDisplay, config,
+ mNativeWindow.get(), NULL);
+ CHECK_EGL_ERROR;
+
+ EGLint contextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE };
+ mEglContext = eglCreateContext(mEglDisplay, config, EGL_NO_CONTEXT,
+ contextAttribs);
+ CHECK_EGL_ERROR;
+
+ eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface, mEglContext);
+ CHECK_EGL_ERROR;
+}
+
+void NativeWindowRenderer::terminateEGL() {
+ eglDestroyContext(mEglDisplay, mEglContext);
+ eglDestroySurface(mEglDisplay, mEglSurface);
+ eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ eglTerminate(mEglDisplay);
+}
+
+void NativeWindowRenderer::createPrograms() {
+ GLuint vShader;
+ loadShader(GL_VERTEX_SHADER, vSrcNormal, &vShader);
+
+ const char* fSrc[NUMBER_OF_EFFECTS] = {
+ fSrcNormal, fSrcSepia, fSrcNegative, fSrcGradient
+ };
+
+ for (int i = 0; i < NUMBER_OF_EFFECTS; i++) {
+ GLuint fShader;
+ loadShader(GL_FRAGMENT_SHADER, fSrc[i], &fShader);
+ createProgram(vShader, fShader, &mProgram[i]);
+ glDeleteShader(fShader);
+ CHECK_GL_ERROR;
+ }
+
+ glDeleteShader(vShader);
+ CHECK_GL_ERROR;
+}
+
+void NativeWindowRenderer::createProgram(
+ GLuint vertexShader, GLuint fragmentShader, GLuint* outPgm) {
+
+ GLuint program = glCreateProgram();
+ CHECK_GL_ERROR;
+
+ glAttachShader(program, vertexShader);
+ CHECK_GL_ERROR;
+
+ glAttachShader(program, fragmentShader);
+ CHECK_GL_ERROR;
+
+ glLinkProgram(program);
+ CHECK_GL_ERROR;
+
+ GLint linkStatus = GL_FALSE;
+ glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
+ if (linkStatus != GL_TRUE) {
+ GLint infoLen = 0;
+ glGetProgramiv(program, GL_INFO_LOG_LENGTH, &infoLen);
+ if (infoLen) {
+ char* buf = (char*) malloc(infoLen);
+ if (buf) {
+ glGetProgramInfoLog(program, infoLen, NULL, buf);
+ ALOGE("Program link log:\n%s\n", buf);
+ free(buf);
+ }
+ }
+ glDeleteProgram(program);
+ program = 0;
+ }
+
+ *outPgm = program;
+}
+
+void NativeWindowRenderer::loadShader(GLenum shaderType, const char* pSource,
+ GLuint* outShader) {
+ GLuint shader = glCreateShader(shaderType);
+ CHECK_GL_ERROR;
+
+ glShaderSource(shader, 1, &pSource, NULL);
+ CHECK_GL_ERROR;
+
+ glCompileShader(shader);
+ CHECK_GL_ERROR;
+
+ GLint compiled = 0;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
+ if (!compiled) {
+ GLint infoLen = 0;
+ glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
+ char* buf = (char*) malloc(infoLen);
+ if (buf) {
+ glGetShaderInfoLog(shader, infoLen, NULL, buf);
+ ALOGE("Shader compile log:\n%s\n", buf);
+ free(buf);
+ }
+ glDeleteShader(shader);
+ shader = 0;
+ }
+ *outShader = shader;
+}
+
+NativeWindowRenderer::~NativeWindowRenderer() {
+ CHECK(mActiveInputs == 0);
+ startRequest(CMD_QUIT);
+ sendRequest();
+}
+
+void NativeWindowRenderer::render(RenderInput* input) {
+ sp<SurfaceTexture> ST = input->mST;
+ sp<SurfaceTextureClient> STC = input->mSTC;
+
+ if (input->mIsExternalBuffer) {
+ queueExternalBuffer(STC.get(), input->mBuffer,
+ input->mWidth, input->mHeight);
+ } else {
+ queueInternalBuffer(STC.get(), input->mBuffer);
+ }
+
+ ST->updateTexImage();
+ glClearColor(0, 0, 0, 0);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ calculatePositionCoordinates(input->mRenderingMode,
+ input->mWidth, input->mHeight);
+
+ const GLfloat textureCoordinates[] = {
+ 0.0f, 1.0f,
+ 0.0f, 0.0f,
+ 1.0f, 0.0f,
+ 1.0f, 1.0f,
+ };
+
+ updateProgramAndHandle(input->mVideoEffect);
+
+ glVertexAttribPointer(mPositionHandle, 2, GL_FLOAT, GL_FALSE, 0,
+ mPositionCoordinates);
+ CHECK_GL_ERROR;
+
+ glEnableVertexAttribArray(mPositionHandle);
+ CHECK_GL_ERROR;
+
+ glVertexAttribPointer(mTexPosHandle, 2, GL_FLOAT, GL_FALSE, 0,
+ textureCoordinates);
+ CHECK_GL_ERROR;
+
+ glEnableVertexAttribArray(mTexPosHandle);
+ CHECK_GL_ERROR;
+
+ GLfloat texMatrix[16];
+ ST->getTransformMatrix(texMatrix);
+ glUniformMatrix4fv(mTexMatrixHandle, 1, GL_FALSE, texMatrix);
+ CHECK_GL_ERROR;
+
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, input->mTextureId);
+ CHECK_GL_ERROR;
+
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(
+ GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ CHECK_GL_ERROR;
+
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+ CHECK_GL_ERROR;
+
+ eglSwapBuffers(mEglDisplay, mEglSurface);
+}
+
+void NativeWindowRenderer::queueInternalBuffer(ANativeWindow *anw,
+ MediaBuffer* buffer) {
+ int64_t timeUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ native_window_set_buffers_timestamp(anw, timeUs * 1000);
+ status_t err = anw->queueBuffer(anw, buffer->graphicBuffer().get());
+ if (err != 0) {
+ ALOGE("queueBuffer failed with error %s (%d)", strerror(-err), -err);
+ return;
+ }
+
+ sp<MetaData> metaData = buffer->meta_data();
+ metaData->setInt32(kKeyRendered, 1);
+}
+
+void NativeWindowRenderer::queueExternalBuffer(ANativeWindow* anw,
+ MediaBuffer* buffer, int width, int height) {
+ native_window_set_buffers_geometry(anw, width, height,
+ HAL_PIXEL_FORMAT_YV12);
+ native_window_set_usage(anw, GRALLOC_USAGE_SW_WRITE_OFTEN);
+
+ ANativeWindowBuffer* anb;
+ anw->dequeueBuffer(anw, &anb);
+ CHECK(anb != NULL);
+
+ sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ CHECK(NO_ERROR == anw->lockBuffer(anw, buf->getNativeBuffer()));
+
+ // Copy the buffer
+ uint8_t* img = NULL;
+ buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
+ copyI420Buffer(buffer, img, width, height, buf->getStride());
+ buf->unlock();
+ CHECK(NO_ERROR == anw->queueBuffer(anw, buf->getNativeBuffer()));
+}
+
+void NativeWindowRenderer::copyI420Buffer(MediaBuffer* src, uint8_t* dst,
+ int srcWidth, int srcHeight, int stride) {
+ int strideUV = (stride / 2 + 0xf) & ~0xf;
+ uint8_t* p = (uint8_t*)src->data() + src->range_offset();
+ // Y
+ for (int i = srcHeight; i > 0; i--) {
+ memcpy(dst, p, srcWidth);
+ dst += stride;
+ p += srcWidth;
+ }
+ // The src is I420, the dst is YV12.
+ // U
+ p += srcWidth * srcHeight / 4;
+ for (int i = srcHeight / 2; i > 0; i--) {
+ memcpy(dst, p, srcWidth / 2);
+ dst += strideUV;
+ p += srcWidth / 2;
+ }
+ // V
+ p -= srcWidth * srcHeight / 2;
+ for (int i = srcHeight / 2; i > 0; i--) {
+ memcpy(dst, p, srcWidth / 2);
+ dst += strideUV;
+ p += srcWidth / 2;
+ }
+}
+
+void NativeWindowRenderer::updateProgramAndHandle(uint32_t videoEffect) {
+ if (mLastVideoEffect == videoEffect) {
+ return;
+ }
+
+ mLastVideoEffect = videoEffect;
+ int i;
+ switch (mLastVideoEffect) {
+ case VIDEO_EFFECT_NONE:
+ i = 0;
+ break;
+ case VIDEO_EFFECT_SEPIA:
+ i = 1;
+ break;
+ case VIDEO_EFFECT_NEGATIVE:
+ i = 2;
+ break;
+ case VIDEO_EFFECT_GRADIENT:
+ i = 3;
+ break;
+ default:
+ i = 0;
+ break;
+ }
+ glUseProgram(mProgram[i]);
+ CHECK_GL_ERROR;
+
+ mPositionHandle = glGetAttribLocation(mProgram[i], "vPosition");
+ mTexPosHandle = glGetAttribLocation(mProgram[i], "vTexPos");
+ mTexMatrixHandle = glGetUniformLocation(mProgram[i], "texMatrix");
+ CHECK_GL_ERROR;
+}
+
+void NativeWindowRenderer::calculatePositionCoordinates(
+ M4xVSS_MediaRendering renderingMode, int srcWidth, int srcHeight) {
+ float x, y;
+ switch (renderingMode) {
+ case M4xVSS_kResizing:
+ default:
+ x = 1;
+ y = 1;
+ break;
+ case M4xVSS_kCropping:
+ x = float(srcWidth) / mDstWidth;
+ y = float(srcHeight) / mDstHeight;
+ // Make the smaller side 1
+ if (x > y) {
+ x /= y;
+ y = 1;
+ } else {
+ y /= x;
+ x = 1;
+ }
+ break;
+ case M4xVSS_kBlackBorders:
+ x = float(srcWidth) / mDstWidth;
+ y = float(srcHeight) / mDstHeight;
+ // Make the larger side 1
+ if (x > y) {
+ y /= x;
+ x = 1;
+ } else {
+ x /= y;
+ y = 1;
+ }
+ break;
+ }
+
+ mPositionCoordinates[0] = -x;
+ mPositionCoordinates[1] = y;
+ mPositionCoordinates[2] = -x;
+ mPositionCoordinates[3] = -y;
+ mPositionCoordinates[4] = x;
+ mPositionCoordinates[5] = -y;
+ mPositionCoordinates[6] = x;
+ mPositionCoordinates[7] = y;
+}
+
+//
+// The functions below run in other threads.
+//
+
+void NativeWindowRenderer::startRequest(int cmd) {
+ mLock.lock();
+ while (mThreadCmd != CMD_IDLE) {
+ mCond.wait(mLock);
+ }
+ mThreadCmd = cmd;
+}
+
+void NativeWindowRenderer::sendRequest() {
+ mCond.broadcast();
+ while (mThreadCmd != CMD_IDLE) {
+ mCond.wait(mLock);
+ }
+ mLock.unlock();
+}
+
+RenderInput* NativeWindowRenderer::createRenderInput() {
+ ALOGD("new render input %d", mNextTextureId);
+ RenderInput* input = new RenderInput(this, mNextTextureId);
+
+ startRequest(CMD_RESERVE_TEXTURE);
+ mThreadTextureId = mNextTextureId;
+ sendRequest();
+
+ mNextTextureId++;
+ mActiveInputs++;
+ return input;
+}
+
+void NativeWindowRenderer::destroyRenderInput(RenderInput* input) {
+ ALOGD("destroy render input %d", input->mTextureId);
+ GLuint textureId = input->mTextureId;
+ delete input;
+
+ startRequest(CMD_DELETE_TEXTURE);
+ mThreadTextureId = textureId;
+ sendRequest();
+
+ mActiveInputs--;
+}
+
+//
+// RenderInput
+//
+
+RenderInput::RenderInput(NativeWindowRenderer* renderer, GLuint textureId)
+ : mRenderer(renderer)
+ , mTextureId(textureId) {
+ mST = new SurfaceTexture(mTextureId);
+ uint32_t outWidth, outHeight, outTransform;
+ mST->connect(NATIVE_WINDOW_API_MEDIA, &outWidth, &outHeight, &outTransform);
+
+ mSTC = new SurfaceTextureClient(mST);
+}
+
+RenderInput::~RenderInput() {
+}
+
+ANativeWindow* RenderInput::getTargetWindow() {
+ return mSTC.get();
+}
+
+void RenderInput::updateVideoSize(sp<MetaData> meta) {
+ CHECK(meta->findInt32(kKeyWidth, &mWidth));
+ CHECK(meta->findInt32(kKeyHeight, &mHeight));
+
+ int left, top, right, bottom;
+ if (meta->findRect(kKeyCropRect, &left, &top, &right, &bottom)) {
+ mWidth = right - left + 1;
+ mHeight = bottom - top + 1;
+ }
+
+ // If rotation degrees is 90 or 270, swap width and height
+ // (mWidth and mHeight are the _rotated_ source rectangle).
+ int32_t rotationDegrees;
+ if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
+ rotationDegrees = 0;
+ }
+
+ if (rotationDegrees == 90 || rotationDegrees == 270) {
+ int tmp = mWidth;
+ mWidth = mHeight;
+ mHeight = tmp;
+ }
+}
+
+void RenderInput::render(MediaBuffer* buffer, uint32_t videoEffect,
+ M4xVSS_MediaRendering renderingMode, bool isExternalBuffer) {
+ mVideoEffect = videoEffect;
+ mRenderingMode = renderingMode;
+ mIsExternalBuffer = isExternalBuffer;
+ mBuffer = buffer;
+
+ mRenderer->startRequest(NativeWindowRenderer::CMD_RENDER_INPUT);
+ mRenderer->mThreadRenderInput = this;
+ mRenderer->sendRequest();
+}
+
+} // namespace android
diff --git a/libvideoeditor/lvpp/NativeWindowRenderer.h b/libvideoeditor/lvpp/NativeWindowRenderer.h
new file mode 100755
index 0000000..8fbb4f9
--- /dev/null
+++ b/libvideoeditor/lvpp/NativeWindowRenderer.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NATIVE_WINDOW_RENDERER_H_
+#define NATIVE_WINDOW_RENDERER_H_
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+#include "M4xVSS_API.h"
+
+// The NativeWindowRenderer draws video frames stored in MediaBuffers to
+// an ANativeWindow. It can apply "rendering mode" and color effects to
+// the frames. "Rendering mode" is the option to do resizing, cropping,
+// or black-bordering when the source and destination aspect ratio are
+// different. Color effects include sepia, negative, and gradient.
+//
+// The input to NativeWindowRenderer is provided by the RenderInput class,
+// and there can be multiple active RenderInput at the same time. Although
+// we only expect that happens briefly when one clip is about to finish
+// and the next clip is about to start.
+//
+// We allocate a SurfaceTexture for each RenderInput and the user can use
+// the getTargetWindow() function to get the corresponding ANativeWindow
+// for that SurfaceTexture. The intention is that the user can pass that
+// ANativeWindow to OMXCodec::Create() so the codec can decode directly
+// to buffers provided by the texture.
+
+namespace android {
+
+class SurfaceTexture;
+class SurfaceTextureClient;
+class RenderInput;
+
+class NativeWindowRenderer {
+public:
+ NativeWindowRenderer(sp<ANativeWindow> nativeWindow, int width, int height);
+ ~NativeWindowRenderer();
+
+ RenderInput* createRenderInput();
+ void destroyRenderInput(RenderInput* input);
+
+private:
+ // No copy constructor and assignment
+ NativeWindowRenderer(const NativeWindowRenderer &);
+ NativeWindowRenderer &operator=(const NativeWindowRenderer &);
+
+ // Initialization and finialization
+ void initializeEGL();
+ void terminateEGL();
+ void createPrograms();
+ void createProgram(
+ GLuint vertexShader, GLuint fragmentShader, GLuint* outPgm);
+ void loadShader(
+ GLenum shaderType, const char* pSource, GLuint* outShader);
+
+ // These functions are executed every frame.
+ void render(RenderInput* input);
+ void queueInternalBuffer(ANativeWindow* anw, MediaBuffer* buffer);
+ void queueExternalBuffer(ANativeWindow* anw, MediaBuffer* buffer,
+ int width, int height);
+ void copyI420Buffer(MediaBuffer* src, uint8_t* dst,
+ int srcWidth, int srcHeight, int stride);
+ void updateProgramAndHandle(uint32_t videoEffect);
+ void calculatePositionCoordinates(M4xVSS_MediaRendering renderingMode,
+ int srcWidth, int srcHeight);
+
+ // These variables are initialized once and doesn't change afterwards.
+ sp<ANativeWindow> mNativeWindow;
+ int mDstWidth, mDstHeight;
+ EGLDisplay mEglDisplay;
+ EGLSurface mEglSurface;
+ EGLContext mEglContext;
+ enum {
+ EFFECT_NORMAL,
+ EFFECT_SEPIA,
+ EFFECT_NEGATIVE,
+ EFFECT_GRADIENT,
+ NUMBER_OF_EFFECTS
+ };
+ GLuint mProgram[NUMBER_OF_EFFECTS];
+
+ // We use one shader program for each effect. mLastVideoEffect remembers
+ // the program used for the last frame. when the effect used changes,
+ // we change the program used and update the handles.
+ uint32_t mLastVideoEffect;
+ GLint mPositionHandle;
+ GLint mTexPosHandle;
+ GLint mTexMatrixHandle;
+
+ // This is the vertex coordinates used for the frame texture.
+ // It's calculated according the the rendering mode and the source and
+ // destination aspect ratio.
+ GLfloat mPositionCoordinates[8];
+
+ // We use a different GL id for each SurfaceTexture.
+ GLuint mNextTextureId;
+
+ // Number of existing RenderInputs, just for debugging.
+ int mActiveInputs;
+
+ // The GL thread functions
+ static int threadStart(void* self);
+ void glThread();
+
+ // These variables are used to communicate between the GL thread and
+ // other threads.
+ Mutex mLock;
+ Condition mCond;
+ enum {
+ CMD_IDLE,
+ CMD_RENDER_INPUT,
+ CMD_RESERVE_TEXTURE,
+ CMD_DELETE_TEXTURE,
+ CMD_QUIT,
+ };
+ int mThreadCmd;
+ RenderInput* mThreadRenderInput;
+ GLuint mThreadTextureId;
+
+ // These functions are used to send commands to the GL thread.
+ // sendRequest() also waits for the GL thread acknowledges the
+ // command is finished.
+ void startRequest(int cmd);
+ void sendRequest();
+
+ friend class RenderInput;
+};
+
+class RenderInput {
+public:
+ // Returns the ANativeWindow corresponds to the SurfaceTexture.
+ ANativeWindow* getTargetWindow();
+
+ // Updates video frame size from the MediaSource's metadata. Specifically
+ // we look for kKeyWidth, kKeyHeight, and (optionally) kKeyCropRect.
+ void updateVideoSize(sp<MetaData> meta);
+
+ // Renders the buffer with the given video effect and rending mode.
+ // The video effets are defined in VideoEditorTools.h
+ // Set isExternalBuffer to true only when the buffer given is not
+ // provided by the SurfaceTexture.
+ void render(MediaBuffer *buffer, uint32_t videoEffect,
+ M4xVSS_MediaRendering renderingMode, bool isExternalBuffer);
+private:
+ RenderInput(NativeWindowRenderer* renderer, GLuint textureId);
+ ~RenderInput();
+ NativeWindowRenderer* mRenderer;
+ GLuint mTextureId;
+ sp<SurfaceTexture> mST;
+ sp<SurfaceTextureClient> mSTC;
+ int mWidth, mHeight;
+
+ // These are only valid during render() calls
+ uint32_t mVideoEffect;
+ M4xVSS_MediaRendering mRenderingMode;
+ bool mIsExternalBuffer;
+ MediaBuffer* mBuffer;
+
+ friend class NativeWindowRenderer;
+};
+
+} // namespace android
+
+#endif // NATIVE_WINDOW_RENDERER_H_
diff --git a/libvideoeditor/lvpp/PreviewPlayer.cpp b/libvideoeditor/lvpp/PreviewPlayer.cpp
new file mode 100755
index 0000000..34731d7
--- /dev/null
+++ b/libvideoeditor/lvpp/PreviewPlayer.cpp
@@ -0,0 +1,2082 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "PreviewPlayer"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <media/IMediaPlayerService.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <gui/Surface.h>
+#include <gui/ISurfaceTexture.h>
+#include <gui/SurfaceTextureClient.h>
+
+#include "VideoEditorPreviewController.h"
+#include "DummyAudioSource.h"
+#include "DummyVideoSource.h"
+#include "VideoEditorSRC.h"
+#include "PreviewPlayer.h"
+
+namespace android {
+
+
+void addBatteryData(uint32_t params) {
+ sp<IBinder> binder =
+ defaultServiceManager()->getService(String16("media.player"));
+ sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
+ CHECK(service.get() != NULL);
+
+ service->addBatteryData(params);
+}
+
+struct PreviewPlayerEvent : public TimedEventQueue::Event {
+ PreviewPlayerEvent(
+ PreviewPlayer *player,
+ void (PreviewPlayer::*method)())
+ : mPlayer(player),
+ mMethod(method) {
+ }
+
+protected:
+ virtual ~PreviewPlayerEvent() {}
+
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ (mPlayer->*mMethod)();
+ }
+
+private:
+ PreviewPlayer *mPlayer;
+ void (PreviewPlayer::*mMethod)();
+
+ PreviewPlayerEvent(const PreviewPlayerEvent &);
+ PreviewPlayerEvent &operator=(const PreviewPlayerEvent &);
+};
+
+PreviewPlayer::PreviewPlayer(NativeWindowRenderer* renderer)
+ : mQueueStarted(false),
+ mTimeSource(NULL),
+ mVideoRendererIsPreview(false),
+ mAudioPlayer(NULL),
+ mDisplayWidth(0),
+ mDisplayHeight(0),
+ mFlags(0),
+ mExtractorFlags(0),
+ mVideoBuffer(NULL),
+ mLastVideoTimeUs(-1),
+ mNativeWindowRenderer(renderer),
+ mCurrFramingEffectIndex(0),
+ mFrameRGBBuffer(NULL),
+ mFrameYUVBuffer(NULL) {
+
+ CHECK_EQ(mClient.connect(), (status_t)OK);
+ DataSource::RegisterDefaultSniffers();
+
+
+ mVideoRenderer = NULL;
+ mEffectsSettings = NULL;
+ mAudioPlayer = NULL;
+ mAudioMixStoryBoardTS = 0;
+ mCurrentMediaBeginCutTime = 0;
+ mCurrentMediaVolumeValue = 0;
+ mNumberEffects = 0;
+ mDecodedVideoTs = 0;
+ mDecVideoTsStoryBoard = 0;
+ mCurrentVideoEffect = VIDEO_EFFECT_NONE;
+ mProgressCbInterval = 0;
+ mNumberDecVideoFrames = 0;
+ mOverlayUpdateEventPosted = false;
+ mIsChangeSourceRequired = true;
+
+ mVideoEvent = new PreviewPlayerEvent(this, &PreviewPlayer::onVideoEvent);
+ mVideoEventPending = false;
+ mVideoLagEvent = new PreviewPlayerEvent(this, &PreviewPlayer::onVideoLagUpdate);
+ mVideoEventPending = false;
+ mCheckAudioStatusEvent = new PreviewPlayerEvent(
+ this, &PreviewPlayer::onCheckAudioStatus);
+ mAudioStatusEventPending = false;
+ mStreamDoneEvent = new PreviewPlayerEvent(
+ this, &PreviewPlayer::onStreamDone);
+ mStreamDoneEventPending = false;
+ mProgressCbEvent = new PreviewPlayerEvent(this,
+ &PreviewPlayer::onProgressCbEvent);
+
+ mOverlayUpdateEvent = new PreviewPlayerEvent(this,
+ &PreviewPlayer::onUpdateOverlayEvent);
+ mProgressCbEventPending = false;
+
+ mOverlayUpdateEventPending = false;
+ mRenderingMode = (M4xVSS_MediaRendering)MEDIA_RENDERING_INVALID;
+ mIsFiftiesEffectStarted = false;
+ reset();
+}
+
+PreviewPlayer::~PreviewPlayer() {
+
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+
+ if (mVideoRenderer) {
+ mNativeWindowRenderer->destroyRenderInput(mVideoRenderer);
+ }
+
+ Mutex::Autolock lock(mLock);
+ clear_l();
+ mClient.disconnect();
+}
+
+void PreviewPlayer::cancelPlayerEvents_l(bool updateProgressCb) {
+ mQueue.cancelEvent(mVideoEvent->eventID());
+ mVideoEventPending = false;
+ mQueue.cancelEvent(mStreamDoneEvent->eventID());
+ mStreamDoneEventPending = false;
+ mQueue.cancelEvent(mCheckAudioStatusEvent->eventID());
+ mAudioStatusEventPending = false;
+ mQueue.cancelEvent(mVideoLagEvent->eventID());
+ mVideoLagEventPending = false;
+ if (updateProgressCb) {
+ mQueue.cancelEvent(mProgressCbEvent->eventID());
+ mProgressCbEventPending = false;
+ }
+}
+
+status_t PreviewPlayer::setDataSource(const char *path) {
+ Mutex::Autolock autoLock(mLock);
+ return setDataSource_l(path);
+}
+
+status_t PreviewPlayer::setDataSource_l(const char *path) {
+ reset_l();
+
+ mUri = path;
+
+ // The actual work will be done during preparation in the call to
+ // ::finishSetDataSource_l to avoid blocking the calling thread in
+ // setDataSource for any significant time.
+ return OK;
+}
+
+status_t PreviewPlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
+ bool haveAudio = false;
+ bool haveVideo = false;
+ for (size_t i = 0; i < extractor->countTracks(); ++i) {
+ sp<MetaData> meta = extractor->getTrackMetaData(i);
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
+ setVideoSource(extractor->getTrack(i));
+ haveVideo = true;
+ } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
+ setAudioSource(extractor->getTrack(i));
+ haveAudio = true;
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
+ // Only do this for vorbis audio, none of the other audio
+ // formats even support this ringtone specific hack and
+ // retrieving the metadata on some extractors may turn out
+ // to be very expensive.
+ sp<MetaData> fileMeta = extractor->getMetaData();
+ int32_t loop;
+ if (fileMeta != NULL
+ && fileMeta->findInt32(kKeyAutoLoop, &loop)
+ && loop != 0) {
+ mFlags |= AUTO_LOOPING;
+ }
+ }
+ }
+
+ if (haveAudio && haveVideo) {
+ break;
+ }
+ }
+
+ /* Add the support for Dummy audio*/
+ if( !haveAudio ){
+ mAudioTrack = DummyAudioSource::Create(32000, 2, 20000,
+ ((mPlayEndTimeMsec)*1000LL));
+ if(mAudioTrack != NULL) {
+ haveAudio = true;
+ }
+ }
+
+ if (!haveAudio && !haveVideo) {
+ return UNKNOWN_ERROR;
+ }
+
+ mExtractorFlags = extractor->flags();
+ return OK;
+}
+
+status_t PreviewPlayer::setDataSource_l_jpg() {
+ ALOGV("setDataSource_l_jpg");
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ mAudioSource = DummyAudioSource::Create(32000, 2, 20000,
+ ((mPlayEndTimeMsec)*1000LL));
+ if(mAudioSource != NULL) {
+ setAudioSource(mAudioSource);
+ }
+ status_t error = mAudioSource->start();
+ if (error != OK) {
+ ALOGE("Error starting dummy audio source");
+ mAudioSource.clear();
+ return err;
+ }
+
+ mDurationUs = (mPlayEndTimeMsec - mPlayBeginTimeMsec)*1000LL;
+
+ mVideoSource = DummyVideoSource::Create(mVideoWidth, mVideoHeight,
+ mDurationUs, mUri);
+
+ updateSizeToRender(mVideoSource->getFormat());
+ setVideoSource(mVideoSource);
+ status_t err1 = mVideoSource->start();
+ if (err1 != OK) {
+ mVideoSource.clear();
+ return err;
+ }
+
+ mIsVideoSourceJpg = true;
+ return OK;
+}
+
+void PreviewPlayer::reset_l() {
+
+ if (mFlags & PREPARING) {
+ mFlags |= PREPARE_CANCELLED;
+ }
+
+ while (mFlags & PREPARING) {
+ mPreparedCondition.wait(mLock);
+ }
+
+ cancelPlayerEvents_l();
+ mAudioTrack.clear();
+ mVideoTrack.clear();
+
+ // Shutdown audio first, so that the respone to the reset request
+ // appears to happen instantaneously as far as the user is concerned
+ // If we did this later, audio would continue playing while we
+ // shutdown the video-related resources and the player appear to
+ // not be as responsive to a reset request.
+ if (mAudioPlayer == NULL && mAudioSource != NULL) {
+ // If we had an audio player, it would have effectively
+ // taken possession of the audio source and stopped it when
+ // _it_ is stopped. Otherwise this is still our responsibility.
+ mAudioSource->stop();
+ }
+ mAudioSource.clear();
+
+ mTimeSource = NULL;
+
+ //Single audio player instance used
+ //So donot delete it here
+ //It is deleted from PreviewController class
+ //delete mAudioPlayer;
+ mAudioPlayer = NULL;
+
+ if (mVideoBuffer) {
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ }
+
+ if (mVideoSource != NULL) {
+ mVideoSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mVideoSource;
+ mVideoSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+ IPCThreadState::self()->flushCommands();
+ }
+
+ mDurationUs = -1;
+ mFlags = 0;
+ mExtractorFlags = 0;
+ mVideoWidth = mVideoHeight = -1;
+ mTimeSourceDeltaUs = 0;
+ mVideoTimeUs = 0;
+
+ mSeeking = NO_SEEK;
+ mSeekNotificationSent = false;
+ mSeekTimeUs = 0;
+
+ mUri.setTo("");
+
+ mCurrentVideoEffect = VIDEO_EFFECT_NONE;
+ mIsVideoSourceJpg = false;
+ mFrameRGBBuffer = NULL;
+ if(mFrameYUVBuffer != NULL) {
+ free(mFrameYUVBuffer);
+ mFrameYUVBuffer = NULL;
+ }
+}
+
+status_t PreviewPlayer::play() {
+ ALOGV("play");
+ Mutex::Autolock autoLock(mLock);
+
+ mFlags &= ~CACHE_UNDERRUN;
+ mFlags &= ~INFORMED_AV_EOS;
+ return play_l();
+}
+
+status_t PreviewPlayer::startAudioPlayer_l() {
+ ALOGV("startAudioPlayer_l");
+ CHECK(!(mFlags & AUDIO_RUNNING));
+
+ if (mAudioSource == NULL || mAudioPlayer == NULL) {
+ return OK;
+ }
+
+ if (!(mFlags & AUDIOPLAYER_STARTED)) {
+ mFlags |= AUDIOPLAYER_STARTED;
+
+ // We've already started the MediaSource in order to enable
+ // the prefetcher to read its data.
+ status_t err = mAudioPlayer->start(
+ true /* sourceAlreadyStarted */);
+
+ if (err != OK) {
+ notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ return err;
+ }
+ } else {
+ mAudioPlayer->resume();
+ }
+
+ mFlags |= AUDIO_RUNNING;
+
+ mWatchForAudioEOS = true;
+
+ return OK;
+}
+
+status_t PreviewPlayer::setAudioPlayer(VideoEditorAudioPlayer *audioPlayer) {
+ ALOGV("setAudioPlayer");
+ Mutex::Autolock autoLock(mLock);
+ CHECK(!(mFlags & PLAYING));
+ mAudioPlayer = audioPlayer;
+
+ ALOGV("SetAudioPlayer");
+ mIsChangeSourceRequired = true;
+
+ // check if the new and old source are dummy
+ sp<MediaSource> anAudioSource = mAudioPlayer->getSource();
+ if (anAudioSource == NULL) {
+ // Audio player does not have any source set.
+ ALOGV("setAudioPlayer: Audio player does not have any source set");
+ return OK;
+ }
+
+ // If new video source is not dummy, then always change source
+ // Else audio player continues using old audio source and there are
+ // frame drops to maintain AV sync
+ sp<MetaData> meta;
+ if (mVideoSource != NULL) {
+ meta = mVideoSource->getFormat();
+ const char *pVidSrcType;
+ if (meta->findCString(kKeyDecoderComponent, &pVidSrcType)) {
+ if (strcmp(pVidSrcType, "DummyVideoSource") != 0) {
+ ALOGV(" Video clip with silent audio; need to change source");
+ return OK;
+ }
+ }
+ }
+
+ const char *pSrcType1;
+ const char *pSrcType2;
+ meta = anAudioSource->getFormat();
+
+ if (meta->findCString(kKeyDecoderComponent, &pSrcType1)) {
+ if (strcmp(pSrcType1, "DummyAudioSource") == 0) {
+ meta = mAudioSource->getFormat();
+ if (meta->findCString(kKeyDecoderComponent, &pSrcType2)) {
+ if (strcmp(pSrcType2, "DummyAudioSource") == 0) {
+ mIsChangeSourceRequired = false;
+ // Just set the new play duration for the existing source
+ MediaSource *pMediaSrc = anAudioSource.get();
+ DummyAudioSource *pDummyAudioSource = (DummyAudioSource*)pMediaSrc;
+ //Increment the duration of audio source
+ pDummyAudioSource->setDuration(
+ (int64_t)((mPlayEndTimeMsec)*1000LL));
+
+ // Stop the new audio source
+ // since we continue using old source
+ ALOGV("setAudioPlayer: stop new audio source");
+ mAudioSource->stop();
+ }
+ }
+ }
+ }
+
+ return OK;
+}
+
+void PreviewPlayer::onStreamDone() {
+ ALOGV("onStreamDone");
+ // Posted whenever any stream finishes playing.
+
+ Mutex::Autolock autoLock(mLock);
+ if (!mStreamDoneEventPending) {
+ return;
+ }
+ mStreamDoneEventPending = false;
+
+ if (mStreamDoneStatus != ERROR_END_OF_STREAM) {
+ ALOGV("MEDIA_ERROR %d", mStreamDoneStatus);
+
+ notifyListener_l(
+ MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, mStreamDoneStatus);
+
+ pause_l(true /* at eos */);
+
+ mFlags |= AT_EOS;
+ return;
+ }
+
+ const bool allDone =
+ (mVideoSource == NULL || (mFlags & VIDEO_AT_EOS))
+ && (mAudioSource == NULL || (mFlags & AUDIO_AT_EOS));
+
+ if (!allDone) {
+ return;
+ }
+
+ if (mFlags & (LOOPING | AUTO_LOOPING)) {
+ seekTo_l(0);
+
+ if (mVideoSource != NULL) {
+ postVideoEvent_l();
+ }
+ } else {
+ ALOGV("MEDIA_PLAYBACK_COMPLETE");
+ //pause before sending event
+ pause_l(true /* at eos */);
+
+ //This lock is used to syncronize onStreamDone() in PreviewPlayer and
+ //stopPreview() in PreviewController
+ Mutex::Autolock autoLock(mLockControl);
+ /* Make sure PreviewPlayer only notifies MEDIA_PLAYBACK_COMPLETE once for each clip!
+ * It happens twice in following scenario.
+ * To make the clips in preview storyboard are played and switched smoothly,
+ * PreviewController uses two PreviewPlayer instances and one AudioPlayer.
+ * The two PreviewPlayer use the same AudioPlayer to play the audio,
+ * and change the audio source of the AudioPlayer.
+ * If the audio source of current playing clip and next clip are dummy
+ * audio source(image or video without audio), it will not change the audio source
+ * to avoid the "audio glitch", and keep using the current audio source.
+ * When the video of current clip reached the EOS, PreviewPlayer will set EOS flag
+ * for video and audio, and it will notify MEDIA_PLAYBACK_COMPLETE.
+ * But the audio(dummy audio source) is still playing(for next clip),
+ * and when it reached the EOS, and video reached EOS,
+ * PreviewPlayer will notify MEDIA_PLAYBACK_COMPLETE again. */
+ if (!(mFlags & INFORMED_AV_EOS)) {
+ notifyListener_l(MEDIA_PLAYBACK_COMPLETE);
+ mFlags |= INFORMED_AV_EOS;
+ }
+ mFlags |= AT_EOS;
+ ALOGV("onStreamDone end");
+ return;
+ }
+}
+
+
+status_t PreviewPlayer::play_l() {
+ ALOGV("play_l");
+
+ mFlags &= ~SEEK_PREVIEW;
+
+ if (mFlags & PLAYING) {
+ return OK;
+ }
+ mStartNextPlayer = false;
+
+ if (!(mFlags & PREPARED)) {
+ status_t err = prepare_l();
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ mFlags |= PLAYING;
+ mFlags |= FIRST_FRAME;
+
+ bool deferredAudioSeek = false;
+
+ if (mAudioSource != NULL) {
+ if (mAudioPlayer == NULL) {
+ if (mAudioSink != NULL) {
+
+ mAudioPlayer = new VideoEditorAudioPlayer(mAudioSink, this);
+ mAudioPlayer->setSource(mAudioSource);
+
+ mAudioPlayer->setAudioMixSettings(
+ mPreviewPlayerAudioMixSettings);
+
+ mAudioPlayer->setAudioMixPCMFileHandle(
+ mAudioMixPCMFileHandle);
+
+ mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
+ mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
+ mCurrentMediaVolumeValue);
+
+ mFlags |= AUDIOPLAYER_STARTED;
+ // We've already started the MediaSource in order to enable
+ // the prefetcher to read its data.
+ status_t err = mAudioPlayer->start(
+ true /* sourceAlreadyStarted */);
+
+ if (err != OK) {
+ //delete mAudioPlayer;
+ mAudioPlayer = NULL;
+
+ mFlags &= ~(PLAYING | FIRST_FRAME);
+ return err;
+ }
+
+ mTimeSource = mAudioPlayer;
+ mFlags |= AUDIO_RUNNING;
+ deferredAudioSeek = true;
+ mWatchForAudioSeekComplete = false;
+ mWatchForAudioEOS = true;
+ }
+ } else {
+ bool isAudioPlayerStarted = mAudioPlayer->isStarted();
+
+ if (mIsChangeSourceRequired == true) {
+ ALOGV("play_l: Change audio source required");
+
+ if (isAudioPlayerStarted == true) {
+ mAudioPlayer->pause();
+ }
+
+ mAudioPlayer->setSource(mAudioSource);
+ mAudioPlayer->setObserver(this);
+
+ mAudioPlayer->setAudioMixSettings(
+ mPreviewPlayerAudioMixSettings);
+
+ mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
+ mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
+ mCurrentMediaVolumeValue);
+
+ if (isAudioPlayerStarted == true) {
+ mAudioPlayer->resume();
+ } else {
+ status_t err = OK;
+ err = mAudioPlayer->start(true);
+ if (err != OK) {
+ mAudioPlayer = NULL;
+ mAudioPlayer = NULL;
+
+ mFlags &= ~(PLAYING | FIRST_FRAME);
+ return err;
+ }
+ }
+ } else {
+ ALOGV("play_l: No Source change required");
+ mAudioPlayer->setAudioMixStoryBoardSkimTimeStamp(
+ mAudioMixStoryBoardTS, mCurrentMediaBeginCutTime,
+ mCurrentMediaVolumeValue);
+
+ mAudioPlayer->resume();
+ }
+
+ mFlags |= AUDIOPLAYER_STARTED;
+ mFlags |= AUDIO_RUNNING;
+ mTimeSource = mAudioPlayer;
+ deferredAudioSeek = true;
+ mWatchForAudioSeekComplete = false;
+ mWatchForAudioEOS = true;
+ }
+ }
+
+ if (mTimeSource == NULL && mAudioPlayer == NULL) {
+ mTimeSource = &mSystemTimeSource;
+ }
+
+ // Set the seek option for Image source files and read.
+ // This resets the timestamping for image play
+ if (mIsVideoSourceJpg) {
+ MediaSource::ReadOptions options;
+ MediaBuffer *aLocalBuffer;
+ options.setSeekTo(mSeekTimeUs);
+ mVideoSource->read(&aLocalBuffer, &options);
+ aLocalBuffer->release();
+ }
+
+ if (mVideoSource != NULL) {
+ // Kick off video playback
+ postVideoEvent_l();
+ }
+
+ if (deferredAudioSeek) {
+ // If there was a seek request while we were paused
+ // and we're just starting up again, honor the request now.
+ seekAudioIfNecessary_l();
+ }
+
+ if (mFlags & AT_EOS) {
+ // Legacy behaviour, if a stream finishes playing and then
+ // is started again, we play from the start...
+ seekTo_l(0);
+ }
+
+ return OK;
+}
+
+
+status_t PreviewPlayer::initRenderer_l() {
+ if (mSurface != NULL) {
+ if(mVideoRenderer == NULL) {
+ mVideoRenderer = mNativeWindowRenderer->createRenderInput();
+ if (mVideoSource != NULL) {
+ updateSizeToRender(mVideoSource->getFormat());
+ }
+ }
+ }
+ return OK;
+}
+
+
+status_t PreviewPlayer::seekTo(int64_t timeUs) {
+ Mutex::Autolock autoLock(mLock);
+ if ((mExtractorFlags & MediaExtractor::CAN_SEEK) || (mIsVideoSourceJpg)) {
+ return seekTo_l(timeUs);
+ }
+
+ return OK;
+}
+
+
+status_t PreviewPlayer::getVideoDimensions(
+ int32_t *width, int32_t *height) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mVideoWidth < 0 || mVideoHeight < 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ *width = mVideoWidth;
+ *height = mVideoHeight;
+
+ return OK;
+}
+
+
+status_t PreviewPlayer::initAudioDecoder_l() {
+ sp<MetaData> meta = mAudioTrack->getFormat();
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+ mAudioSource = mAudioTrack;
+ } else {
+ sp<MediaSource> aRawSource;
+ aRawSource = OMXCodec::Create(
+ mClient.interface(), mAudioTrack->getFormat(),
+ false, // createEncoder
+ mAudioTrack);
+
+ if(aRawSource != NULL) {
+ mAudioSource = new VideoEditorSRC(aRawSource);
+ }
+ }
+
+ if (mAudioSource != NULL) {
+ int64_t durationUs;
+ if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
+ setDuration_l(durationUs);
+ }
+ status_t err = mAudioSource->start();
+
+ if (err != OK) {
+ mAudioSource.clear();
+ return err;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
+ // For legacy reasons we're simply going to ignore the absence
+ // of an audio decoder for QCELP instead of aborting playback
+ // altogether.
+ return OK;
+ }
+
+ return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
+}
+
+status_t PreviewPlayer::initVideoDecoder_l(uint32_t flags) {
+ initRenderer_l();
+
+ if (mVideoRenderer == NULL) {
+ ALOGE("Cannot create renderer");
+ return UNKNOWN_ERROR;
+ }
+
+ mVideoSource = OMXCodec::Create(
+ mClient.interface(), mVideoTrack->getFormat(),
+ false,
+ mVideoTrack,
+ NULL, flags, mVideoRenderer->getTargetWindow());
+
+ if (mVideoSource != NULL) {
+ int64_t durationUs;
+ if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
+ setDuration_l(durationUs);
+ }
+
+ updateSizeToRender(mVideoTrack->getFormat());
+
+ status_t err = mVideoSource->start();
+
+ if (err != OK) {
+ mVideoSource.clear();
+ return err;
+ }
+ }
+
+ return mVideoSource != NULL ? OK : UNKNOWN_ERROR;
+}
+
+
+void PreviewPlayer::onVideoEvent() {
+ uint32_t i=0;
+ M4OSA_ERR err1 = M4NO_ERROR;
+ int64_t imageFrameTimeUs = 0;
+
+ Mutex::Autolock autoLock(mLock);
+ if (!mVideoEventPending) {
+ // The event has been cancelled in reset_l() but had already
+ // been scheduled for execution at that time.
+ return;
+ }
+ mVideoEventPending = false;
+
+ if (mFlags & SEEK_PREVIEW) {
+ mFlags &= ~SEEK_PREVIEW;
+ return;
+ }
+
+ TimeSource *ts_st = &mSystemTimeSource;
+ int64_t timeStartUs = ts_st->getRealTimeUs();
+
+ if (mSeeking != NO_SEEK) {
+
+ if(mAudioSource != NULL) {
+
+ // We're going to seek the video source first, followed by
+ // the audio source.
+ // In order to avoid jumps in the DataSource offset caused by
+ // the audio codec prefetching data from the old locations
+ // while the video codec is already reading data from the new
+ // locations, we'll "pause" the audio source, causing it to
+ // stop reading input data until a subsequent seek.
+
+ if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
+ mAudioPlayer->pause();
+ mFlags &= ~AUDIO_RUNNING;
+ }
+ mAudioSource->pause();
+ }
+ }
+
+ if (!mVideoBuffer) {
+ MediaSource::ReadOptions options;
+ if (mSeeking != NO_SEEK) {
+ ALOGV("LV PLAYER seeking to %lld us (%.2f secs)", mSeekTimeUs,
+ mSeekTimeUs / 1E6);
+
+ options.setSeekTo(
+ mSeekTimeUs, MediaSource::ReadOptions::SEEK_CLOSEST);
+ }
+ for (;;) {
+ status_t err = mVideoSource->read(&mVideoBuffer, &options);
+ options.clearSeekTo();
+
+ if (err != OK) {
+ CHECK(!mVideoBuffer);
+
+ if (err == INFO_FORMAT_CHANGED) {
+ ALOGV("LV PLAYER VideoSource signalled format change");
+ notifyVideoSize_l();
+
+ if (mVideoRenderer != NULL) {
+ mVideoRendererIsPreview = false;
+ err = initRenderer_l();
+ if (err != OK) {
+ postStreamDoneEvent_l(err);
+ }
+
+ }
+
+ updateSizeToRender(mVideoSource->getFormat());
+ continue;
+ }
+ // So video playback is complete, but we may still have
+ // a seek request pending that needs to be applied to the audio track
+ if (mSeeking != NO_SEEK) {
+ ALOGV("video stream ended while seeking!");
+ }
+ finishSeekIfNecessary(-1);
+ ALOGV("PreviewPlayer: onVideoEvent EOS reached.");
+ mFlags |= VIDEO_AT_EOS;
+ mFlags |= AUDIO_AT_EOS;
+ mOverlayUpdateEventPosted = false;
+ postStreamDoneEvent_l(err);
+ // Set the last decoded timestamp to duration
+ mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
+ return;
+ }
+
+ if (mVideoBuffer->range_length() == 0) {
+ // Some decoders, notably the PV AVC software decoder
+ // return spurious empty buffers that we just want to ignore.
+
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+
+ int64_t videoTimeUs;
+ CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &videoTimeUs));
+
+ if (mSeeking != NO_SEEK) {
+ if (videoTimeUs < mSeekTimeUs) {
+ // buffers are before seek time
+ // ignore them
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+ } else {
+ if((videoTimeUs/1000) < mPlayBeginTimeMsec) {
+ // Frames are before begin cut time
+ // Donot render
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+ }
+ break;
+ }
+ }
+
+ mNumberDecVideoFrames++;
+
+ int64_t timeUs;
+ CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ setPosition_l(timeUs);
+
+ if (!mStartNextPlayer) {
+ int64_t playbackTimeRemaining = (mPlayEndTimeMsec * 1000LL) - timeUs;
+ if (playbackTimeRemaining <= 1500000) {
+ //When less than 1.5 sec of playback left
+ // send notification to start next player
+
+ mStartNextPlayer = true;
+ notifyListener_l(0xAAAAAAAA);
+ }
+ }
+
+ SeekType wasSeeking = mSeeking;
+ finishSeekIfNecessary(timeUs);
+ if (mAudioPlayer != NULL && !(mFlags & (AUDIO_RUNNING))) {
+ status_t err = startAudioPlayer_l();
+ if (err != OK) {
+ ALOGE("Starting the audio player failed w/ err %d", err);
+ return;
+ }
+ }
+
+ TimeSource *ts = (mFlags & AUDIO_AT_EOS) ? &mSystemTimeSource : mTimeSource;
+
+ if(ts == NULL) {
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ return;
+ }
+
+ if(!mIsVideoSourceJpg) {
+ if (mFlags & FIRST_FRAME) {
+ mFlags &= ~FIRST_FRAME;
+
+ mTimeSourceDeltaUs = ts->getRealTimeUs() - timeUs;
+ }
+
+ int64_t realTimeUs, mediaTimeUs;
+ if (!(mFlags & AUDIO_AT_EOS) && mAudioPlayer != NULL
+ && mAudioPlayer->getMediaTimeMapping(&realTimeUs, &mediaTimeUs)) {
+ mTimeSourceDeltaUs = realTimeUs - mediaTimeUs;
+ }
+
+ int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
+
+ int64_t latenessUs = nowUs - timeUs;
+
+ if (wasSeeking != NO_SEEK) {
+ // Let's display the first frame after seeking right away.
+ latenessUs = 0;
+ }
+ ALOGV("Audio time stamp = %lld and video time stamp = %lld",
+ ts->getRealTimeUs(),timeUs);
+ if (latenessUs > 40000) {
+ // We're more than 40ms late.
+
+ ALOGV("LV PLAYER we're late by %lld us (%.2f secs)",
+ latenessUs, latenessUs / 1E6);
+
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ postVideoEvent_l(0);
+ return;
+ }
+
+ if (latenessUs < -25000) {
+ // We're more than 25ms early.
+ ALOGV("We're more than 25ms early, lateness %lld", latenessUs);
+
+ postVideoEvent_l(25000);
+ return;
+ }
+ }
+
+ if (mVideoRendererIsPreview || mVideoRenderer == NULL) {
+ mVideoRendererIsPreview = false;
+
+ status_t err = initRenderer_l();
+ if (err != OK) {
+ postStreamDoneEvent_l(err);
+ }
+ }
+
+ // If timestamp exceeds endCutTime of clip, donot render
+ if((timeUs/1000) > mPlayEndTimeMsec) {
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ mFlags |= VIDEO_AT_EOS;
+ mFlags |= AUDIO_AT_EOS;
+ ALOGV("PreviewPlayer: onVideoEvent timeUs > mPlayEndTime; send EOS..");
+ mOverlayUpdateEventPosted = false;
+ // Set the last decoded timestamp to duration
+ mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
+ postStreamDoneEvent_l(ERROR_END_OF_STREAM);
+ return;
+ }
+ // Capture the frame timestamp to be rendered
+ mDecodedVideoTs = timeUs;
+
+ // Post processing to apply video effects
+ for(i=0;i<mNumberEffects;i++) {
+ // First check if effect starttime matches the clip being previewed
+ if((mEffectsSettings[i].uiStartTime < (mDecVideoTsStoryBoard/1000)) ||
+ (mEffectsSettings[i].uiStartTime >=
+ ((mDecVideoTsStoryBoard/1000) + mPlayEndTimeMsec - mPlayBeginTimeMsec)))
+ {
+ // This effect doesn't belong to this clip, check next one
+ continue;
+ }
+ // Check if effect applies to this particular frame timestamp
+ if((mEffectsSettings[i].uiStartTime <=
+ (((timeUs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec)) &&
+ ((mEffectsSettings[i].uiStartTime+mEffectsSettings[i].uiDuration) >=
+ (((timeUs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec))
+ && (mEffectsSettings[i].uiDuration != 0)) {
+ setVideoPostProcessingNode(
+ mEffectsSettings[i].VideoEffectType, TRUE);
+ }
+ else {
+ setVideoPostProcessingNode(
+ mEffectsSettings[i].VideoEffectType, FALSE);
+ }
+ }
+
+ //Provide the overlay Update indication when there is an overlay effect
+ if (mCurrentVideoEffect & VIDEO_EFFECT_FRAMING) {
+ mCurrentVideoEffect &= ~VIDEO_EFFECT_FRAMING; //never apply framing here.
+ if (!mOverlayUpdateEventPosted) {
+ // Find the effect in effectSettings array
+ M4OSA_UInt32 index;
+ for (index = 0; index < mNumberEffects; index++) {
+ M4OSA_UInt32 timeMs = mDecodedVideoTs/1000;
+ M4OSA_UInt32 timeOffset = mDecVideoTsStoryBoard/1000;
+ if(mEffectsSettings[index].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
+ if (((mEffectsSettings[index].uiStartTime + 1) <=
+ timeMs + timeOffset - mPlayBeginTimeMsec) &&
+ ((mEffectsSettings[index].uiStartTime - 1 +
+ mEffectsSettings[index].uiDuration) >=
+ timeMs + timeOffset - mPlayBeginTimeMsec))
+ {
+ break;
+ }
+ }
+ }
+ if (index < mNumberEffects) {
+ mCurrFramingEffectIndex = index;
+ mOverlayUpdateEventPosted = true;
+ postOverlayUpdateEvent_l();
+ ALOGV("Framing index = %ld", mCurrFramingEffectIndex);
+ } else {
+ ALOGV("No framing effects found");
+ }
+ }
+
+ } else if (mOverlayUpdateEventPosted) {
+ //Post the event when the overlay is no more valid
+ ALOGV("Overlay is Done");
+ mOverlayUpdateEventPosted = false;
+ postOverlayUpdateEvent_l();
+ }
+
+ if (mVideoRenderer != NULL) {
+ mVideoRenderer->render(mVideoBuffer, mCurrentVideoEffect,
+ mRenderingMode, mIsVideoSourceJpg);
+ }
+
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+
+ // Post progress callback based on callback interval set
+ if(mNumberDecVideoFrames >= mProgressCbInterval) {
+ postProgressCallbackEvent_l();
+ mNumberDecVideoFrames = 0; // reset counter
+ }
+
+ // if reached EndCutTime of clip, post EOS event
+ if((timeUs/1000) >= mPlayEndTimeMsec) {
+ ALOGV("PreviewPlayer: onVideoEvent EOS.");
+ mFlags |= VIDEO_AT_EOS;
+ mFlags |= AUDIO_AT_EOS;
+ mOverlayUpdateEventPosted = false;
+ // Set the last decoded timestamp to duration
+ mDecodedVideoTs = (mPlayEndTimeMsec*1000LL);
+ postStreamDoneEvent_l(ERROR_END_OF_STREAM);
+ }
+ else {
+ if ((wasSeeking != NO_SEEK) && (mFlags & SEEK_PREVIEW)) {
+ mFlags &= ~SEEK_PREVIEW;
+ return;
+ }
+
+ if(!mIsVideoSourceJpg) {
+ postVideoEvent_l(0);
+ }
+ else {
+ postVideoEvent_l(33000);
+ }
+ }
+}
+
+status_t PreviewPlayer::prepare() {
+ ALOGV("prepare");
+ Mutex::Autolock autoLock(mLock);
+ return prepare_l();
+}
+
+status_t PreviewPlayer::prepare_l() {
+ ALOGV("prepare_l");
+ if (mFlags & PREPARED) {
+ return OK;
+ }
+
+ if (mFlags & PREPARING) {
+ return UNKNOWN_ERROR;
+ }
+
+ mIsAsyncPrepare = false;
+ status_t err = prepareAsync_l();
+
+ if (err != OK) {
+ return err;
+ }
+
+ while (mFlags & PREPARING) {
+ mPreparedCondition.wait(mLock);
+ }
+
+ return mPrepareResult;
+}
+
+status_t PreviewPlayer::prepareAsync() {
+ ALOGV("prepareAsync");
+ Mutex::Autolock autoLock(mLock);
+ return prepareAsync_l();
+}
+
+status_t PreviewPlayer::prepareAsync_l() {
+ ALOGV("prepareAsync_l");
+ if (mFlags & PREPARING) {
+ return UNKNOWN_ERROR; // async prepare already pending
+ }
+
+ if (!mQueueStarted) {
+ mQueue.start();
+ mQueueStarted = true;
+ }
+
+ mFlags |= PREPARING;
+ mAsyncPrepareEvent = new PreviewPlayerEvent(
+ this, &PreviewPlayer::onPrepareAsyncEvent);
+
+ mQueue.postEvent(mAsyncPrepareEvent);
+
+ return OK;
+}
+
+status_t PreviewPlayer::finishSetDataSource_l() {
+ sp<DataSource> dataSource;
+ sp<MediaExtractor> extractor;
+
+ dataSource = DataSource::CreateFromURI(mUri.string(), NULL);
+
+ if (dataSource == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ //If file type is .rgb, then no need to check for Extractor
+ int uriLen = strlen(mUri);
+ int startOffset = uriLen - 4;
+ if(!strncasecmp(mUri+startOffset, ".rgb", 4)) {
+ extractor = NULL;
+ }
+ else {
+ extractor = MediaExtractor::Create(dataSource,
+ MEDIA_MIMETYPE_CONTAINER_MPEG4);
+ }
+
+ if (extractor == NULL) {
+ ALOGV("finishSetDataSource_l: failed to create extractor");
+ return setDataSource_l_jpg();
+ }
+
+ return setDataSource_l(extractor);
+}
+
+void PreviewPlayer::onPrepareAsyncEvent() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("onPrepareAsyncEvent");
+
+ if (mFlags & PREPARE_CANCELLED) {
+ ALOGV("prepare was cancelled before doing anything");
+ abortPrepare(UNKNOWN_ERROR);
+ return;
+ }
+
+ if (mUri.size() > 0) {
+ status_t err = finishSetDataSource_l();
+
+ if (err != OK) {
+ abortPrepare(err);
+ return;
+ }
+ }
+
+ if (mVideoTrack != NULL && mVideoSource == NULL) {
+ status_t err = initVideoDecoder_l(OMXCodec::kHardwareCodecsOnly);
+
+ if (err != OK) {
+ abortPrepare(err);
+ return;
+ }
+ }
+
+ if (mAudioTrack != NULL && mAudioSource == NULL) {
+ status_t err = initAudioDecoder_l();
+
+ if (err != OK) {
+ abortPrepare(err);
+ return;
+ }
+ }
+ finishAsyncPrepare_l();
+
+}
+
+void PreviewPlayer::finishAsyncPrepare_l() {
+ ALOGV("finishAsyncPrepare_l");
+ if (mIsAsyncPrepare) {
+ if (mVideoSource == NULL) {
+ notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
+ } else {
+ notifyVideoSize_l();
+ }
+ notifyListener_l(MEDIA_PREPARED);
+ }
+
+ mPrepareResult = OK;
+ mFlags &= ~(PREPARING|PREPARE_CANCELLED);
+ mFlags |= PREPARED;
+ mAsyncPrepareEvent = NULL;
+ mPreparedCondition.broadcast();
+}
+
+void PreviewPlayer::acquireLock() {
+ ALOGV("acquireLock");
+ mLockControl.lock();
+}
+
+void PreviewPlayer::releaseLock() {
+ ALOGV("releaseLock");
+ mLockControl.unlock();
+}
+
+status_t PreviewPlayer::loadEffectsSettings(
+ M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects) {
+
+ ALOGV("loadEffectsSettings");
+ mNumberEffects = nEffects;
+ mEffectsSettings = pEffectSettings;
+ return OK;
+}
+
+status_t PreviewPlayer::loadAudioMixSettings(
+ M4xVSS_AudioMixingSettings* pAudioMixSettings) {
+
+ ALOGV("loadAudioMixSettings");
+ mPreviewPlayerAudioMixSettings = pAudioMixSettings;
+ return OK;
+}
+
+status_t PreviewPlayer::setAudioMixPCMFileHandle(
+ M4OSA_Context pAudioMixPCMFileHandle) {
+
+ ALOGV("setAudioMixPCMFileHandle");
+ mAudioMixPCMFileHandle = pAudioMixPCMFileHandle;
+ return OK;
+}
+
+status_t PreviewPlayer::setAudioMixStoryBoardParam(
+ M4OSA_UInt32 audioMixStoryBoardTS,
+ M4OSA_UInt32 currentMediaBeginCutTime,
+ M4OSA_UInt32 primaryTrackVolValue ) {
+
+ ALOGV("setAudioMixStoryBoardParam");
+ mAudioMixStoryBoardTS = audioMixStoryBoardTS;
+ mCurrentMediaBeginCutTime = currentMediaBeginCutTime;
+ mCurrentMediaVolumeValue = primaryTrackVolValue;
+ return OK;
+}
+
+status_t PreviewPlayer::setPlaybackBeginTime(uint32_t msec) {
+
+ mPlayBeginTimeMsec = msec;
+ return OK;
+}
+
+status_t PreviewPlayer::setPlaybackEndTime(uint32_t msec) {
+
+ mPlayEndTimeMsec = msec;
+ return OK;
+}
+
+status_t PreviewPlayer::setStoryboardStartTime(uint32_t msec) {
+
+ mStoryboardStartTimeMsec = msec;
+ mDecVideoTsStoryBoard = mStoryboardStartTimeMsec * 1000LL;
+ return OK;
+}
+
+status_t PreviewPlayer::setProgressCallbackInterval(uint32_t cbInterval) {
+
+ mProgressCbInterval = cbInterval;
+ return OK;
+}
+
+
+status_t PreviewPlayer::setMediaRenderingMode(
+ M4xVSS_MediaRendering mode,
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
+
+ mRenderingMode = mode;
+
+ /* get the video width and height by resolution */
+ return getVideoSizeByResolution(
+ outputVideoSize,
+ &mOutputVideoWidth, &mOutputVideoHeight);
+
+}
+
+status_t PreviewPlayer::resetJniCallbackTimeStamp() {
+
+ mDecVideoTsStoryBoard = mStoryboardStartTimeMsec * 1000LL;
+ return OK;
+}
+
+void PreviewPlayer::postProgressCallbackEvent_l() {
+ if (mProgressCbEventPending) {
+ return;
+ }
+ mProgressCbEventPending = true;
+
+ mQueue.postEvent(mProgressCbEvent);
+}
+
+
+void PreviewPlayer::onProgressCbEvent() {
+ Mutex::Autolock autoLock(mLock);
+ if (!mProgressCbEventPending) {
+ return;
+ }
+ mProgressCbEventPending = false;
+ // If playback starts from previous I-frame,
+ // then send frame storyboard duration
+ if ((mDecodedVideoTs/1000) < mPlayBeginTimeMsec) {
+ notifyListener_l(MEDIA_INFO, 0, mDecVideoTsStoryBoard/1000);
+ } else {
+ notifyListener_l(MEDIA_INFO, 0,
+ (((mDecodedVideoTs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec));
+ }
+}
+
+void PreviewPlayer::postOverlayUpdateEvent_l() {
+ if (mOverlayUpdateEventPending) {
+ return;
+ }
+ mOverlayUpdateEventPending = true;
+ mQueue.postEvent(mOverlayUpdateEvent);
+}
+
+void PreviewPlayer::onUpdateOverlayEvent() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (!mOverlayUpdateEventPending) {
+ return;
+ }
+ mOverlayUpdateEventPending = false;
+
+ int updateState = mOverlayUpdateEventPosted? 1: 0;
+ notifyListener_l(0xBBBBBBBB, updateState, mCurrFramingEffectIndex);
+}
+
+
+void PreviewPlayer::setVideoPostProcessingNode(
+ M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable) {
+
+ uint32_t effect = VIDEO_EFFECT_NONE;
+
+ //Map M4VSS3GPP_VideoEffectType to local enum
+ switch(type) {
+ case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
+ effect = VIDEO_EFFECT_FADEFROMBLACK;
+ break;
+
+ case M4VSS3GPP_kVideoEffectType_FadeToBlack:
+ effect = VIDEO_EFFECT_FADETOBLACK;
+ break;
+
+ case M4xVSS_kVideoEffectType_BlackAndWhite:
+ effect = VIDEO_EFFECT_BLACKANDWHITE;
+ break;
+
+ case M4xVSS_kVideoEffectType_Pink:
+ effect = VIDEO_EFFECT_PINK;
+ break;
+
+ case M4xVSS_kVideoEffectType_Green:
+ effect = VIDEO_EFFECT_GREEN;
+ break;
+
+ case M4xVSS_kVideoEffectType_Sepia:
+ effect = VIDEO_EFFECT_SEPIA;
+ break;
+
+ case M4xVSS_kVideoEffectType_Negative:
+ effect = VIDEO_EFFECT_NEGATIVE;
+ break;
+
+ case M4xVSS_kVideoEffectType_Framing:
+ effect = VIDEO_EFFECT_FRAMING;
+ break;
+
+ case M4xVSS_kVideoEffectType_Fifties:
+ effect = VIDEO_EFFECT_FIFTIES;
+ break;
+
+ case M4xVSS_kVideoEffectType_ColorRGB16:
+ effect = VIDEO_EFFECT_COLOR_RGB16;
+ break;
+
+ case M4xVSS_kVideoEffectType_Gradient:
+ effect = VIDEO_EFFECT_GRADIENT;
+ break;
+
+ default:
+ effect = VIDEO_EFFECT_NONE;
+ break;
+ }
+
+ if (enable == M4OSA_TRUE) {
+ //If already set, then no need to set again
+ if (!(mCurrentVideoEffect & effect)) {
+ mCurrentVideoEffect |= effect;
+ if (effect == VIDEO_EFFECT_FIFTIES) {
+ mIsFiftiesEffectStarted = true;
+ }
+ }
+ } else {
+ //Reset only if already set
+ if (mCurrentVideoEffect & effect) {
+ mCurrentVideoEffect &= ~effect;
+ }
+ }
+}
+
+status_t PreviewPlayer::setImageClipProperties(uint32_t width,uint32_t height) {
+ mVideoWidth = width;
+ mVideoHeight = height;
+ return OK;
+}
+
+status_t PreviewPlayer::readFirstVideoFrame() {
+ ALOGV("readFirstVideoFrame");
+
+ if (!mVideoBuffer) {
+ MediaSource::ReadOptions options;
+ if (mSeeking != NO_SEEK) {
+ ALOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs,
+ mSeekTimeUs / 1E6);
+
+ options.setSeekTo(
+ mSeekTimeUs, MediaSource::ReadOptions::SEEK_CLOSEST);
+ }
+ for (;;) {
+ status_t err = mVideoSource->read(&mVideoBuffer, &options);
+ options.clearSeekTo();
+
+ if (err != OK) {
+ CHECK(!mVideoBuffer);
+
+ if (err == INFO_FORMAT_CHANGED) {
+ ALOGV("VideoSource signalled format change");
+ notifyVideoSize_l();
+
+ if (mVideoRenderer != NULL) {
+ mVideoRendererIsPreview = false;
+ err = initRenderer_l();
+ if (err != OK) {
+ postStreamDoneEvent_l(err);
+ }
+ }
+
+ updateSizeToRender(mVideoSource->getFormat());
+ continue;
+ }
+ ALOGV("EOS reached.");
+ mFlags |= VIDEO_AT_EOS;
+ mFlags |= AUDIO_AT_EOS;
+ postStreamDoneEvent_l(err);
+ return OK;
+ }
+
+ if (mVideoBuffer->range_length() == 0) {
+ // Some decoders, notably the PV AVC software decoder
+ // return spurious empty buffers that we just want to ignore.
+
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+
+ int64_t videoTimeUs;
+ CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &videoTimeUs));
+ if (mSeeking != NO_SEEK) {
+ if (videoTimeUs < mSeekTimeUs) {
+ // buffers are before seek time
+ // ignore them
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+ } else {
+ if ((videoTimeUs/1000) < mPlayBeginTimeMsec) {
+ // buffers are before begin cut time
+ // ignore them
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ continue;
+ }
+ }
+ break;
+ }
+ }
+
+ int64_t timeUs;
+ CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ setPosition_l(timeUs);
+
+ mDecodedVideoTs = timeUs;
+
+ return OK;
+
+}
+
+status_t PreviewPlayer::getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs) {
+ *lastRenderedTimeMs = (((mDecodedVideoTs+mDecVideoTsStoryBoard)/1000)-mPlayBeginTimeMsec);
+ return OK;
+}
+
+void PreviewPlayer::updateSizeToRender(sp<MetaData> meta) {
+ if (mVideoRenderer) {
+ mVideoRenderer->updateVideoSize(meta);
+ }
+}
+
+void PreviewPlayer::setListener(const wp<MediaPlayerBase> &listener) {
+ Mutex::Autolock autoLock(mLock);
+ mListener = listener;
+}
+
+status_t PreviewPlayer::setDataSource(const sp<IStreamSource> &source) {
+ return INVALID_OPERATION;
+}
+
+void PreviewPlayer::reset() {
+ Mutex::Autolock autoLock(mLock);
+ reset_l();
+}
+
+void PreviewPlayer::clear_l() {
+ mDisplayWidth = 0;
+ mDisplayHeight = 0;
+
+ if (mFlags & PLAYING) {
+ updateBatteryUsage_l();
+ }
+
+ if (mFlags & PREPARING) {
+ mFlags |= PREPARE_CANCELLED;
+
+ if (mFlags & PREPARING_CONNECTED) {
+ // We are basically done preparing, we're just buffering
+ // enough data to start playback, we can safely interrupt that.
+ finishAsyncPrepare_l();
+ }
+ }
+
+ while (mFlags & PREPARING) {
+ mPreparedCondition.wait(mLock);
+ }
+
+ cancelPlayerEvents_l(true);
+
+ mAudioTrack.clear();
+ mVideoTrack.clear();
+
+ // Shutdown audio first, so that the respone to the reset request
+ // appears to happen instantaneously as far as the user is concerned
+ // If we did this later, audio would continue playing while we
+ // shutdown the video-related resources and the player appear to
+ // not be as responsive to a reset request.
+ if (mAudioPlayer == NULL && mAudioSource != NULL) {
+ // If we had an audio player, it would have effectively
+ // taken possession of the audio source and stopped it when
+ // _it_ is stopped. Otherwise this is still our responsibility.
+ mAudioSource->stop();
+ }
+ mAudioSource.clear();
+
+ mTimeSource = NULL;
+
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+
+ if (mVideoSource != NULL) {
+ shutdownVideoDecoder_l();
+ }
+
+ mDurationUs = -1;
+ mFlags = 0;
+ mExtractorFlags = 0;
+ mTimeSourceDeltaUs = 0;
+ mVideoTimeUs = 0;
+
+ mSeeking = NO_SEEK;
+ mSeekNotificationSent = false;
+ mSeekTimeUs = 0;
+
+ mUri.setTo("");
+
+ mBitrate = -1;
+ mLastVideoTimeUs = -1;
+}
+
+void PreviewPlayer::notifyListener_l(int msg, int ext1, int ext2) {
+ if (mListener != NULL) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+
+ if (listener != NULL) {
+ listener->sendEvent(msg, ext1, ext2);
+ }
+ }
+}
+
+void PreviewPlayer::onVideoLagUpdate() {
+ Mutex::Autolock autoLock(mLock);
+ if (!mVideoLagEventPending) {
+ return;
+ }
+ mVideoLagEventPending = false;
+
+ int64_t audioTimeUs = mAudioPlayer->getMediaTimeUs();
+ int64_t videoLateByUs = audioTimeUs - mVideoTimeUs;
+
+ if (!(mFlags & VIDEO_AT_EOS) && videoLateByUs > 300000ll) {
+ ALOGV("video late by %lld ms.", videoLateByUs / 1000ll);
+
+ notifyListener_l(
+ MEDIA_INFO,
+ MEDIA_INFO_VIDEO_TRACK_LAGGING,
+ videoLateByUs / 1000ll);
+ }
+
+ postVideoLagEvent_l();
+}
+
+void PreviewPlayer::notifyVideoSize_l() {
+ sp<MetaData> meta = mVideoSource->getFormat();
+
+ int32_t vWidth, vHeight;
+ int32_t cropLeft, cropTop, cropRight, cropBottom;
+
+ CHECK(meta->findInt32(kKeyWidth, &vWidth));
+ CHECK(meta->findInt32(kKeyHeight, &vHeight));
+
+ mGivenWidth = vWidth;
+ mGivenHeight = vHeight;
+
+ if (!meta->findRect(
+ kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) {
+
+ cropLeft = cropTop = 0;
+ cropRight = vWidth - 1;
+ cropBottom = vHeight - 1;
+
+ ALOGD("got dimensions only %d x %d", vWidth, vHeight);
+ } else {
+ ALOGD("got crop rect %d, %d, %d, %d",
+ cropLeft, cropTop, cropRight, cropBottom);
+ }
+
+ mCropRect.left = cropLeft;
+ mCropRect.right = cropRight;
+ mCropRect.top = cropTop;
+ mCropRect.bottom = cropBottom;
+
+ int32_t displayWidth;
+ if (meta->findInt32(kKeyDisplayWidth, &displayWidth)) {
+ ALOGV("Display width changed (%d=>%d)", mDisplayWidth, displayWidth);
+ mDisplayWidth = displayWidth;
+ }
+ int32_t displayHeight;
+ if (meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
+ ALOGV("Display height changed (%d=>%d)", mDisplayHeight, displayHeight);
+ mDisplayHeight = displayHeight;
+ }
+
+ int32_t usableWidth = cropRight - cropLeft + 1;
+ int32_t usableHeight = cropBottom - cropTop + 1;
+ if (mDisplayWidth != 0) {
+ usableWidth = mDisplayWidth;
+ }
+ if (mDisplayHeight != 0) {
+ usableHeight = mDisplayHeight;
+ }
+
+ int32_t rotationDegrees;
+ if (!mVideoTrack->getFormat()->findInt32(
+ kKeyRotation, &rotationDegrees)) {
+ rotationDegrees = 0;
+ }
+
+ if (rotationDegrees == 90 || rotationDegrees == 270) {
+ notifyListener_l(
+ MEDIA_SET_VIDEO_SIZE, usableHeight, usableWidth);
+ } else {
+ notifyListener_l(
+ MEDIA_SET_VIDEO_SIZE, usableWidth, usableHeight);
+ }
+}
+
+status_t PreviewPlayer::pause() {
+ Mutex::Autolock autoLock(mLock);
+
+ mFlags &= ~CACHE_UNDERRUN;
+
+ return pause_l();
+}
+
+status_t PreviewPlayer::pause_l(bool at_eos) {
+ if (!(mFlags & PLAYING)) {
+ return OK;
+ }
+
+ cancelPlayerEvents_l();
+
+ if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
+ if (at_eos) {
+ // If we played the audio stream to completion we
+ // want to make sure that all samples remaining in the audio
+ // track's queue are played out.
+ mAudioPlayer->pause(true /* playPendingSamples */);
+ } else {
+ mAudioPlayer->pause();
+ }
+
+ mFlags &= ~AUDIO_RUNNING;
+ }
+
+ mFlags &= ~PLAYING;
+ updateBatteryUsage_l();
+
+ return OK;
+}
+
+bool PreviewPlayer::isPlaying() const {
+ return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
+}
+
+void PreviewPlayer::setSurface(const sp<Surface> &surface) {
+ Mutex::Autolock autoLock(mLock);
+
+ mSurface = surface;
+ setNativeWindow_l(surface);
+}
+
+void PreviewPlayer::setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) {
+ Mutex::Autolock autoLock(mLock);
+
+ mSurface.clear();
+ if (surfaceTexture != NULL) {
+ setNativeWindow_l(new SurfaceTextureClient(surfaceTexture));
+ }
+}
+
+void PreviewPlayer::shutdownVideoDecoder_l() {
+ if (mVideoBuffer) {
+ mVideoBuffer->release();
+ mVideoBuffer = NULL;
+ }
+
+ mVideoSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mVideoSource;
+ mVideoSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+ IPCThreadState::self()->flushCommands();
+}
+
+void PreviewPlayer::setNativeWindow_l(const sp<ANativeWindow> &native) {
+ mNativeWindow = native;
+
+ if (mVideoSource == NULL) {
+ return;
+ }
+
+ ALOGI("attempting to reconfigure to use new surface");
+
+ bool wasPlaying = (mFlags & PLAYING) != 0;
+
+ pause_l();
+
+ shutdownVideoDecoder_l();
+
+ CHECK_EQ(initVideoDecoder_l(), (status_t)OK);
+
+ if (mLastVideoTimeUs >= 0) {
+ mSeeking = SEEK;
+ mSeekNotificationSent = true;
+ mSeekTimeUs = mLastVideoTimeUs;
+ mFlags &= ~(AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS);
+ }
+
+ if (wasPlaying) {
+ play_l();
+ }
+}
+
+void PreviewPlayer::setAudioSink(
+ const sp<MediaPlayerBase::AudioSink> &audioSink) {
+ Mutex::Autolock autoLock(mLock);
+
+ mAudioSink = audioSink;
+}
+
+status_t PreviewPlayer::setLooping(bool shouldLoop) {
+ Mutex::Autolock autoLock(mLock);
+
+ mFlags = mFlags & ~LOOPING;
+
+ if (shouldLoop) {
+ mFlags |= LOOPING;
+ }
+
+ return OK;
+}
+
+void PreviewPlayer::setDuration_l(int64_t durationUs) {
+ if (mDurationUs < 0 || durationUs > mDurationUs) {
+ mDurationUs = durationUs;
+ }
+}
+
+status_t PreviewPlayer::getDuration(int64_t *durationUs) {
+ Mutex::Autolock autoLock(mLock);
+ if (mDurationUs < 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ *durationUs = mDurationUs;
+ return OK;
+}
+
+status_t PreviewPlayer::getPosition(int64_t *positionUs) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSeeking != NO_SEEK) {
+ *positionUs = mSeekTimeUs;
+ } else if (mVideoSource != NULL
+ && (mAudioPlayer == NULL || !(mFlags & VIDEO_AT_EOS))) {
+ *positionUs = mVideoTimeUs;
+ } else if (mAudioPlayer != NULL) {
+ *positionUs = mAudioPlayer->getMediaTimeUs();
+ } else {
+ *positionUs = 0;
+ }
+
+ return OK;
+}
+
+void PreviewPlayer::setPosition_l(int64_t timeUs) {
+ mVideoTimeUs = timeUs;
+}
+
+status_t PreviewPlayer::seekTo_l(int64_t timeUs) {
+ ALOGV("seekTo_l");
+ if (mFlags & CACHE_UNDERRUN) {
+ mFlags &= ~CACHE_UNDERRUN;
+ play_l();
+ }
+
+ if ((mFlags & PLAYING) && mVideoSource != NULL && (mFlags & VIDEO_AT_EOS)) {
+ // Video playback completed before, there's no pending
+ // video event right now. In order for this new seek
+ // to be honored, we need to post one.
+
+ postVideoEvent_l();
+ }
+
+ mSeeking = SEEK;
+ mSeekNotificationSent = false;
+ mSeekTimeUs = timeUs;
+ mFlags &= ~(AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS);
+
+ seekAudioIfNecessary_l();
+
+ if (!(mFlags & PLAYING)) {
+ ALOGV("seeking while paused, sending SEEK_COMPLETE notification"
+ " immediately.");
+
+ notifyListener_l(MEDIA_SEEK_COMPLETE);
+ mSeekNotificationSent = true;
+
+ if ((mFlags & PREPARED) && mVideoSource != NULL) {
+ mFlags |= SEEK_PREVIEW;
+ postVideoEvent_l();
+ }
+ }
+
+ return OK;
+}
+
+void PreviewPlayer::seekAudioIfNecessary_l() {
+ if (mSeeking != NO_SEEK && mVideoSource == NULL && mAudioPlayer != NULL) {
+ mAudioPlayer->seekTo(mSeekTimeUs);
+
+ mWatchForAudioSeekComplete = true;
+ mWatchForAudioEOS = true;
+ }
+}
+
+void PreviewPlayer::setAudioSource(const sp<MediaSource>& source) {
+ CHECK(source != NULL);
+ mAudioTrack = source;
+}
+
+void PreviewPlayer::setVideoSource(const sp<MediaSource>& source) {
+ CHECK(source != NULL);
+ mVideoTrack = source;
+}
+
+void PreviewPlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
+ if (mSeeking == SEEK_VIDEO_ONLY) {
+ mSeeking = NO_SEEK;
+ return;
+ }
+
+ if (mSeeking == NO_SEEK || (mFlags & SEEK_PREVIEW)) {
+ return;
+ }
+
+ if (mAudioPlayer != NULL) {
+ ALOGV("seeking audio to %lld us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
+
+ // If we don't have a video time, seek audio to the originally
+ // requested seek time instead.
+
+ mAudioPlayer->seekTo(videoTimeUs < 0 ? mSeekTimeUs : videoTimeUs);
+ mWatchForAudioSeekComplete = true;
+ mWatchForAudioEOS = true;
+ } else if (!mSeekNotificationSent) {
+ // If we're playing video only, report seek complete now,
+ // otherwise audio player will notify us later.
+ notifyListener_l(MEDIA_SEEK_COMPLETE);
+ mSeekNotificationSent = true;
+ }
+
+ mFlags |= FIRST_FRAME;
+ mSeeking = NO_SEEK;
+}
+
+void PreviewPlayer::onCheckAudioStatus() {
+ Mutex::Autolock autoLock(mLock);
+ if (!mAudioStatusEventPending) {
+ // Event was dispatched and while we were blocking on the mutex,
+ // has already been cancelled.
+ return;
+ }
+
+ mAudioStatusEventPending = false;
+
+ if (mWatchForAudioSeekComplete && !mAudioPlayer->isSeeking()) {
+ mWatchForAudioSeekComplete = false;
+
+ if (!mSeekNotificationSent) {
+ notifyListener_l(MEDIA_SEEK_COMPLETE);
+ mSeekNotificationSent = true;
+ }
+
+ mSeeking = NO_SEEK;
+ }
+
+ status_t finalStatus;
+ if (mWatchForAudioEOS && mAudioPlayer->reachedEOS(&finalStatus)) {
+ mWatchForAudioEOS = false;
+ mFlags |= AUDIO_AT_EOS;
+ mFlags |= FIRST_FRAME;
+ postStreamDoneEvent_l(finalStatus);
+ }
+}
+
+void PreviewPlayer::postVideoEvent_l(int64_t delayUs) {
+ if (mVideoEventPending) {
+ return;
+ }
+
+ mVideoEventPending = true;
+ mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs);
+}
+
+void PreviewPlayer::postStreamDoneEvent_l(status_t status) {
+ if (mStreamDoneEventPending) {
+ return;
+ }
+ mStreamDoneEventPending = true;
+
+ mStreamDoneStatus = status;
+ mQueue.postEvent(mStreamDoneEvent);
+}
+
+void PreviewPlayer::postVideoLagEvent_l() {
+ if (mVideoLagEventPending) {
+ return;
+ }
+ mVideoLagEventPending = true;
+ mQueue.postEventWithDelay(mVideoLagEvent, 1000000ll);
+}
+
+void PreviewPlayer::postCheckAudioStatusEvent_l(int64_t delayUs) {
+ if (mAudioStatusEventPending) {
+ return;
+ }
+ mAudioStatusEventPending = true;
+ mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
+}
+
+void PreviewPlayer::abortPrepare(status_t err) {
+ CHECK(err != OK);
+
+ if (mIsAsyncPrepare) {
+ notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ }
+
+ mPrepareResult = err;
+ mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
+ mAsyncPrepareEvent = NULL;
+ mPreparedCondition.broadcast();
+}
+
+uint32_t PreviewPlayer::getSourceSeekFlags() const {
+ Mutex::Autolock lock(mLock);
+ return mExtractorFlags;
+}
+
+void PreviewPlayer::postAudioEOS(int64_t delayUs) {
+ Mutex::Autolock autoLock(mLock);
+ postCheckAudioStatusEvent_l(delayUs);
+}
+
+void PreviewPlayer::postAudioSeekComplete() {
+ Mutex::Autolock autoLock(mLock);
+ postCheckAudioStatusEvent_l(0 /* delayUs */);
+}
+
+void PreviewPlayer::updateBatteryUsage_l() {
+ uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
+ if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
+ params |= IMediaPlayerService::kBatteryDataTrackAudio;
+ }
+ if (mVideoSource != NULL) {
+ params |= IMediaPlayerService::kBatteryDataTrackVideo;
+ }
+ addBatteryData(params);
+}
+
+} // namespace android
diff --git a/libvideoeditor/lvpp/PreviewPlayer.h b/libvideoeditor/lvpp/PreviewPlayer.h
new file mode 100755
index 0000000..177853f
--- /dev/null
+++ b/libvideoeditor/lvpp/PreviewPlayer.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PREVIEW_PLAYER_H_
+
+#define PREVIEW_PLAYER_H_
+
+#include "TimedEventQueue.h"
+#include "VideoEditorAudioPlayer.h"
+
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/TimeSource.h>
+#include <utils/threads.h>
+#include "NativeWindowRenderer.h"
+
+namespace android {
+
+struct VideoEditorAudioPlayer;
+struct MediaExtractor;
+
+struct PreviewPlayer {
+ PreviewPlayer(NativeWindowRenderer* renderer);
+ ~PreviewPlayer();
+
+ void setListener(const wp<MediaPlayerBase> &listener);
+ void reset();
+
+ status_t play();
+ status_t pause();
+
+ bool isPlaying() const;
+ void setSurface(const sp<Surface> &surface);
+ void setSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture);
+ status_t seekTo(int64_t timeUs);
+
+ status_t getVideoDimensions(int32_t *width, int32_t *height) const;
+
+
+ // FIXME: Sync between ...
+ void acquireLock();
+ void releaseLock();
+
+ status_t prepare();
+ status_t prepareAsync();
+ status_t setDataSource(const char *path);
+ status_t setDataSource(const sp<IStreamSource> &source);
+
+ void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
+ status_t setLooping(bool shouldLoop);
+ status_t getDuration(int64_t *durationUs);
+ status_t getPosition(int64_t *positionUs);
+
+ uint32_t getSourceSeekFlags() const;
+
+ void postAudioEOS(int64_t delayUs = 0ll);
+ void postAudioSeekComplete();
+
+ status_t loadEffectsSettings(M4VSS3GPP_EffectSettings* pEffectSettings,
+ int nEffects);
+ status_t loadAudioMixSettings(M4xVSS_AudioMixingSettings* pAudioMixSettings);
+ status_t setAudioMixPCMFileHandle(M4OSA_Context pAudioMixPCMFileHandle);
+ status_t setAudioMixStoryBoardParam(M4OSA_UInt32 audioMixStoryBoardTS,
+ M4OSA_UInt32 currentMediaBeginCutTime,
+ M4OSA_UInt32 currentMediaVolumeVol);
+
+ status_t setPlaybackBeginTime(uint32_t msec);
+ status_t setPlaybackEndTime(uint32_t msec);
+ status_t setStoryboardStartTime(uint32_t msec);
+ status_t setProgressCallbackInterval(uint32_t cbInterval);
+ status_t setMediaRenderingMode(M4xVSS_MediaRendering mode,
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize);
+
+ status_t resetJniCallbackTimeStamp();
+ status_t setImageClipProperties(uint32_t width, uint32_t height);
+ status_t readFirstVideoFrame();
+ status_t getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs);
+ status_t setAudioPlayer(VideoEditorAudioPlayer *audioPlayer);
+
+private:
+ enum {
+ PLAYING = 1,
+ LOOPING = 2,
+ FIRST_FRAME = 4,
+ PREPARING = 8,
+ PREPARED = 16,
+ AT_EOS = 32,
+ PREPARE_CANCELLED = 64,
+ CACHE_UNDERRUN = 128,
+ AUDIO_AT_EOS = 256,
+ VIDEO_AT_EOS = 512,
+ AUTO_LOOPING = 1024,
+ INFORMED_AV_EOS = 2048,
+
+ // We are basically done preparing but are currently buffering
+ // sufficient data to begin playback and finish the preparation phase
+ // for good.
+ PREPARING_CONNECTED = 2048,
+
+ // We're triggering a single video event to display the first frame
+ // after the seekpoint.
+ SEEK_PREVIEW = 4096,
+
+ AUDIO_RUNNING = 8192,
+ AUDIOPLAYER_STARTED = 16384,
+
+ INCOGNITO = 32768,
+ };
+
+ mutable Mutex mLock;
+
+ OMXClient mClient;
+ TimedEventQueue mQueue;
+ bool mQueueStarted;
+ wp<MediaPlayerBase> mListener;
+
+ sp<Surface> mSurface;
+ sp<ANativeWindow> mNativeWindow;
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+
+ SystemTimeSource mSystemTimeSource;
+ TimeSource *mTimeSource;
+
+ String8 mUri;
+
+ sp<MediaSource> mVideoTrack;
+ sp<MediaSource> mVideoSource;
+ bool mVideoRendererIsPreview;
+
+ sp<MediaSource> mAudioTrack;
+ sp<MediaSource> mAudioSource;
+ VideoEditorAudioPlayer *mAudioPlayer;
+ int64_t mDurationUs;
+
+ int32_t mDisplayWidth;
+ int32_t mDisplayHeight;
+
+ uint32_t mFlags;
+ uint32_t mExtractorFlags;
+
+ int64_t mTimeSourceDeltaUs;
+ int64_t mVideoTimeUs;
+
+ enum SeekType {
+ NO_SEEK,
+ SEEK,
+ SEEK_VIDEO_ONLY
+ };
+ SeekType mSeeking;
+
+ bool mSeekNotificationSent;
+ int64_t mSeekTimeUs;
+
+ int64_t mBitrate; // total bitrate of the file (in bps) or -1 if unknown.
+
+ bool mWatchForAudioSeekComplete;
+ bool mWatchForAudioEOS;
+
+ sp<TimedEventQueue::Event> mVideoEvent;
+ bool mVideoEventPending;
+ sp<TimedEventQueue::Event> mStreamDoneEvent;
+ bool mStreamDoneEventPending;
+ sp<TimedEventQueue::Event> mCheckAudioStatusEvent;
+ bool mAudioStatusEventPending;
+ sp<TimedEventQueue::Event> mVideoLagEvent;
+ bool mVideoLagEventPending;
+
+ sp<TimedEventQueue::Event> mAsyncPrepareEvent;
+ Condition mPreparedCondition;
+ bool mIsAsyncPrepare;
+ status_t mPrepareResult;
+ status_t mStreamDoneStatus;
+
+ MediaBuffer *mVideoBuffer;
+ int64_t mLastVideoTimeUs;
+ ARect mCropRect;
+ int32_t mGivenWidth, mGivenHeight;
+
+
+ bool mIsChangeSourceRequired;
+
+ NativeWindowRenderer *mNativeWindowRenderer;
+ RenderInput *mVideoRenderer;
+
+ int32_t mVideoWidth, mVideoHeight;
+
+ //Data structures used for audio and video effects
+ M4VSS3GPP_EffectSettings* mEffectsSettings;
+ M4xVSS_AudioMixingSettings* mPreviewPlayerAudioMixSettings;
+ M4OSA_Context mAudioMixPCMFileHandle;
+ M4OSA_UInt32 mAudioMixStoryBoardTS;
+ M4OSA_UInt32 mCurrentMediaBeginCutTime;
+ M4OSA_UInt32 mCurrentMediaVolumeValue;
+ M4OSA_UInt32 mCurrFramingEffectIndex;
+
+ uint32_t mNumberEffects;
+ uint32_t mPlayBeginTimeMsec;
+ uint32_t mPlayEndTimeMsec;
+ uint64_t mDecodedVideoTs; // timestamp of current decoded video frame buffer
+ uint64_t mDecVideoTsStoryBoard; // timestamp of frame relative to storyboard
+ uint32_t mCurrentVideoEffect;
+ uint32_t mProgressCbInterval;
+ uint32_t mNumberDecVideoFrames; // Counter of number of video frames decoded
+ sp<TimedEventQueue::Event> mProgressCbEvent;
+ bool mProgressCbEventPending;
+ sp<TimedEventQueue::Event> mOverlayUpdateEvent;
+ bool mOverlayUpdateEventPending;
+ bool mOverlayUpdateEventPosted;
+
+ M4xVSS_MediaRendering mRenderingMode;
+ uint32_t mOutputVideoWidth;
+ uint32_t mOutputVideoHeight;
+
+ uint32_t mStoryboardStartTimeMsec;
+
+ bool mIsVideoSourceJpg;
+ bool mIsFiftiesEffectStarted;
+ int64_t mImageFrameTimeUs;
+ bool mStartNextPlayer;
+ mutable Mutex mLockControl;
+
+ M4VIFI_UInt8* mFrameRGBBuffer;
+ M4VIFI_UInt8* mFrameYUVBuffer;
+
+ void cancelPlayerEvents_l(bool updateProgressCb = false);
+ status_t setDataSource_l(const sp<MediaExtractor> &extractor);
+ status_t setDataSource_l(const char *path);
+ void setNativeWindow_l(const sp<ANativeWindow> &native);
+ void reset_l();
+ void clear_l();
+ status_t play_l();
+ status_t pause_l(bool at_eos = false);
+ status_t initRenderer_l();
+ status_t initAudioDecoder_l();
+ status_t initVideoDecoder_l(uint32_t flags = 0);
+ void notifyVideoSize_l();
+ void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0);
+ void onVideoEvent();
+ void onVideoLagUpdate();
+ void onStreamDone();
+ void onCheckAudioStatus();
+ void onPrepareAsyncEvent();
+
+ void finishAsyncPrepare_l();
+ void abortPrepare(status_t err);
+
+ status_t startAudioPlayer_l();
+ void setVideoSource(const sp<MediaSource>& source);
+ status_t finishSetDataSource_l();
+ void setAudioSource(const sp<MediaSource>& source);
+
+ status_t seekTo_l(int64_t timeUs);
+ void seekAudioIfNecessary_l();
+ void finishSeekIfNecessary(int64_t videoTimeUs);
+
+ void postCheckAudioStatusEvent_l(int64_t delayUs);
+ void postVideoLagEvent_l();
+ void postStreamDoneEvent_l(status_t status);
+ void postVideoEvent_l(int64_t delayUs = -1);
+ void setVideoPostProcessingNode(
+ M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable);
+ void postProgressCallbackEvent_l();
+ void shutdownVideoDecoder_l();
+ void onProgressCbEvent();
+
+ void postOverlayUpdateEvent_l();
+ void onUpdateOverlayEvent();
+
+ status_t setDataSource_l_jpg();
+ status_t prepare_l();
+ status_t prepareAsync_l();
+ void updateBatteryUsage_l();
+ void updateSizeToRender(sp<MetaData> meta);
+
+ void setDuration_l(int64_t durationUs);
+ void setPosition_l(int64_t timeUs);
+
+ PreviewPlayer(const PreviewPlayer &);
+ PreviewPlayer &operator=(const PreviewPlayer &);
+};
+
+} // namespace android
+
+#endif // PREVIEW_PLAYER_H_
+
diff --git a/libvideoeditor/lvpp/PreviewRenderer.cpp b/libvideoeditor/lvpp/PreviewRenderer.cpp
new file mode 100755
index 0000000..4aa4eb3
--- /dev/null
+++ b/libvideoeditor/lvpp/PreviewRenderer.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "PreviewRenderer"
+#include <utils/Log.h>
+
+#include "PreviewRenderer.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <gui/Surface.h>
+
+namespace android {
+
+PreviewRenderer* PreviewRenderer::CreatePreviewRenderer (
+ const sp<Surface> &surface, size_t width, size_t height) {
+
+ PreviewRenderer* renderer = new PreviewRenderer(surface, width, height);
+
+ if (renderer->init() != 0) {
+ delete renderer;
+ return NULL;
+ }
+
+ return renderer;
+}
+
+PreviewRenderer::PreviewRenderer(
+ const sp<Surface> &surface,
+ size_t width, size_t height)
+ : mSurface(surface),
+ mWidth(width),
+ mHeight(height) {
+}
+
+int PreviewRenderer::init() {
+ int err = 0;
+ ANativeWindow* anw = mSurface.get();
+
+ err = native_window_api_connect(anw, NATIVE_WINDOW_API_CPU);
+ if (err) goto fail;
+
+ err = native_window_set_usage(
+ anw, GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN);
+ if (err) goto fail;
+
+ err = native_window_set_buffer_count(anw, 3);
+ if (err) goto fail;
+
+ err = native_window_set_scaling_mode(
+ anw, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+ if (err) goto fail;
+
+ err = native_window_set_buffers_geometry(
+ anw, mWidth, mHeight, HAL_PIXEL_FORMAT_YV12);
+ if (err) goto fail;
+
+ err = native_window_set_buffers_transform(anw, 0);
+ if (err) goto fail;
+
+fail:
+ return err;
+}
+
+PreviewRenderer::~PreviewRenderer() {
+ native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_CPU);
+}
+
+
+//
+// Provides a buffer and associated stride
+// This buffer is allocated by the SurfaceFlinger
+//
+// For optimal display performances, you should :
+// 1) call getBufferYV12()
+// 2) fill the buffer with your data
+// 3) call renderYV12() to take these changes into account
+//
+// For each call to getBufferYV12(), you must also call renderYV12()
+// Expected format in the buffer is YV12 formats (similar to YUV420 planar fromat)
+// for more details on this YV12 cf hardware/libhardware/include/hardware/hardware.h
+//
+void PreviewRenderer::getBufferYV12(uint8_t **data, size_t *stride) {
+ int err = OK;
+
+ if ((err = mSurface->ANativeWindow::dequeueBuffer(mSurface.get(), &mBuf)) != 0) {
+ ALOGW("Surface::dequeueBuffer returned error %d", err);
+ return;
+ }
+
+ CHECK_EQ(0, mSurface->ANativeWindow::lockBuffer(mSurface.get(), mBuf));
+
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+ Rect bounds(mWidth, mHeight);
+
+ void *dst;
+ CHECK_EQ(0, mapper.lock(mBuf->handle,
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ bounds, &dst));
+
+ *data = (uint8_t*)dst;
+ *stride = mBuf->stride;
+}
+
+
+//
+// Display the content of the buffer provided by last call to getBufferYV12()
+//
+// See getBufferYV12() for details.
+//
+void PreviewRenderer::renderYV12() {
+ int err = OK;
+
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+ if (mBuf!= NULL) {
+ CHECK_EQ(0, mapper.unlock(mBuf->handle));
+
+ if ((err = mSurface->ANativeWindow::queueBuffer(mSurface.get(), mBuf)) != 0) {
+ ALOGW("Surface::queueBuffer returned error %d", err);
+ }
+ }
+ mBuf = NULL;
+}
+
+} // namespace android
diff --git a/libvideoeditor/lvpp/PreviewRenderer.h b/libvideoeditor/lvpp/PreviewRenderer.h
new file mode 100755
index 0000000..91c2295
--- /dev/null
+++ b/libvideoeditor/lvpp/PreviewRenderer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PREVIEW_RENDERER_H_
+
+#define PREVIEW_RENDERER_H_
+
+#include <media/stagefright/ColorConverter.h>
+#include <utils/RefBase.h>
+#include <system/window.h>
+#include <ui/GraphicBufferMapper.h>
+#include "SoftwareRenderer.h"
+
+
+namespace android {
+
+class Surface;
+
+class PreviewRenderer {
+public:
+
+static PreviewRenderer* CreatePreviewRenderer (
+ const sp<Surface> &surface,
+ size_t width, size_t height);
+
+ ~PreviewRenderer();
+
+ void getBufferYV12(uint8_t **data, size_t *stride);
+
+ void renderYV12();
+
+ static size_t ALIGN(size_t x, size_t alignment) {
+ return (x + alignment - 1) & ~(alignment - 1);
+ }
+
+private:
+ PreviewRenderer(
+ const sp<Surface> &surface,
+ size_t width, size_t height);
+
+ int init();
+
+ sp<Surface> mSurface;
+ size_t mWidth, mHeight;
+
+ ANativeWindowBuffer *mBuf;
+
+ PreviewRenderer(const PreviewRenderer &);
+ PreviewRenderer &operator=(const PreviewRenderer &);
+};
+
+} // namespace android
+
+#endif // PREVIEW_RENDERER_H_
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
new file mode 100755
index 0000000..d0ee51b
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -0,0 +1,896 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VideoEditorAudioPlayer"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+#include <VideoEditorAudioPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+#include <system/audio.h>
+
+#include "PreviewPlayer.h"
+namespace android {
+
+VideoEditorAudioPlayer::VideoEditorAudioPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink,
+ PreviewPlayer *observer)
+ : mAudioTrack(NULL),
+ mInputBuffer(NULL),
+ mSampleRate(0),
+ mLatencyUs(0),
+ mFrameSize(0),
+ mNumFramesPlayed(0),
+ mPositionTimeMediaUs(-1),
+ mPositionTimeRealUs(-1),
+ mSeeking(false),
+ mReachedEOS(false),
+ mFinalStatus(OK),
+ mStarted(false),
+ mIsFirstBuffer(false),
+ mFirstBufferResult(OK),
+ mFirstBuffer(NULL),
+ mAudioSink(audioSink),
+ mObserver(observer) {
+
+ ALOGV("Constructor");
+ mBGAudioPCMFileHandle = NULL;
+ mAudioProcess = NULL;
+ mBGAudioPCMFileLength = 0;
+ mBGAudioPCMFileTrimmedLength = 0;
+ mBGAudioPCMFileDuration = 0;
+ mBGAudioPCMFileSeekPoint = 0;
+ mBGAudioPCMFileOriginalSeekPoint = 0;
+ mBGAudioStoryBoardSkimTimeStamp = 0;
+ mBGAudioStoryBoardCurrentMediaBeginCutTS = 0;
+ mBGAudioStoryBoardCurrentMediaVolumeVal = 0;
+ mSeekTimeUs = 0;
+ mSource = NULL;
+}
+
+VideoEditorAudioPlayer::~VideoEditorAudioPlayer() {
+
+ ALOGV("Destructor");
+ if (mStarted) {
+ reset();
+ }
+ if (mAudioProcess != NULL) {
+ delete mAudioProcess;
+ mAudioProcess = NULL;
+ }
+}
+
+void VideoEditorAudioPlayer::pause(bool playPendingSamples) {
+ ALOGV("pause: playPendingSamples=%d", playPendingSamples);
+ CHECK(mStarted);
+
+ if (playPendingSamples) {
+ if (mAudioSink.get() != NULL) {
+ mAudioSink->stop();
+ } else {
+ mAudioTrack->stop();
+ }
+ } else {
+ if (mAudioSink.get() != NULL) {
+ mAudioSink->pause();
+ } else {
+ mAudioTrack->pause();
+ }
+ }
+}
+
+void VideoEditorAudioPlayer::clear() {
+ ALOGV("clear");
+ if (!mStarted) {
+ return;
+ }
+
+ if (mAudioSink.get() != NULL) {
+ mAudioSink->stop();
+ mAudioSink->close();
+ } else {
+ mAudioTrack->stop();
+
+ delete mAudioTrack;
+ mAudioTrack = NULL;
+ }
+
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayerBase releasing input buffer.");
+
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mSource;
+ mSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+ IPCThreadState::self()->flushCommands();
+
+ mNumFramesPlayed = 0;
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+void VideoEditorAudioPlayer::resume() {
+ ALOGV("resume");
+
+ AudioMixSettings audioMixSettings;
+
+ // Single audio player is used;
+ // Pass on the audio ducking parameters
+ // which might have changed with new audio source
+ audioMixSettings.lvInDucking_threshold =
+ mAudioMixSettings->uiInDucking_threshold;
+ audioMixSettings.lvInDucking_lowVolume =
+ ((M4OSA_Float)mAudioMixSettings->uiInDucking_lowVolume) / 100.0;
+ audioMixSettings.lvInDucking_enable =
+ mAudioMixSettings->bInDucking_enable;
+ audioMixSettings.lvPTVolLevel =
+ ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal) / 100.0;
+ audioMixSettings.lvBTVolLevel =
+ ((M4OSA_Float)mAudioMixSettings->uiAddVolume) / 100.0;
+ audioMixSettings.lvBTChannelCount = mAudioMixSettings->uiBTChannelCount;
+ audioMixSettings.lvPTChannelCount = mAudioMixSettings->uiNbChannels;
+
+ // Call to Audio mix param setting
+ mAudioProcess->setMixParams(audioMixSettings);
+
+ CHECK(mStarted);
+
+ if (mAudioSink.get() != NULL) {
+ mAudioSink->start();
+ } else {
+ mAudioTrack->start();
+ }
+}
+
+status_t VideoEditorAudioPlayer::seekTo(int64_t time_us) {
+ ALOGV("seekTo: %lld", time_us);
+ Mutex::Autolock autoLock(mLock);
+
+ mSeeking = true;
+ mPositionTimeRealUs = mPositionTimeMediaUs = -1;
+ mReachedEOS = false;
+ mSeekTimeUs = time_us;
+
+ if (mAudioSink != NULL) {
+ mAudioSink->flush();
+ } else {
+ mAudioTrack->flush();
+ }
+
+ return OK;
+}
+
+bool VideoEditorAudioPlayer::isSeeking() {
+ Mutex::Autolock lock(mLock);
+ ALOGV("isSeeking: mSeeking=%d", mSeeking);
+ return mSeeking;
+}
+
+bool VideoEditorAudioPlayer::reachedEOS(status_t *finalStatus) {
+ ALOGV("reachedEOS: status=%d", mFinalStatus);
+ *finalStatus = OK;
+
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedEOS;
+}
+
+int64_t VideoEditorAudioPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ return getRealTimeUs_l();
+}
+
+int64_t VideoEditorAudioPlayer::getRealTimeUs_l() {
+ return -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate;
+}
+
+int64_t VideoEditorAudioPlayer::getMediaTimeUs() {
+ ALOGV("getMediaTimeUs");
+ Mutex::Autolock autoLock(mLock);
+
+ if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
+ if (mSeeking) {
+ return mSeekTimeUs;
+ }
+
+ return 0;
+ }
+
+ int64_t realTimeOffset = getRealTimeUs_l() - mPositionTimeRealUs;
+ if (realTimeOffset < 0) {
+ realTimeOffset = 0;
+ }
+
+ return mPositionTimeMediaUs + realTimeOffset;
+}
+
+bool VideoEditorAudioPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ ALOGV("getMediaTimeMapping");
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void VideoEditorAudioPlayer::setSource(const sp<MediaSource> &source) {
+ Mutex::Autolock autoLock(mLock);
+
+ // Before setting source, stop any existing source.
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("VideoEditorAudioPlayer releasing input buffer.");
+
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ if (mSource != NULL) {
+ mSource->stop();
+ mSource.clear();
+ }
+
+ mSource = source;
+ mReachedEOS = false;
+}
+
+sp<MediaSource> VideoEditorAudioPlayer::getSource() {
+ Mutex::Autolock autoLock(mLock);
+ return mSource;
+}
+
+void VideoEditorAudioPlayer::setObserver(PreviewPlayer *observer) {
+ ALOGV("setObserver");
+ //CHECK(!mStarted);
+ mObserver = observer;
+}
+
+bool VideoEditorAudioPlayer::isStarted() {
+ return mStarted;
+}
+
+// static
+void VideoEditorAudioPlayer::AudioCallback(int event, void *user, void *info) {
+ static_cast<VideoEditorAudioPlayer *>(user)->AudioCallback(event, info);
+}
+
+
+void VideoEditorAudioPlayer::AudioCallback(int event, void *info) {
+ if (event != AudioTrack::EVENT_MORE_DATA) {
+ return;
+ }
+
+ AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+ size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
+
+ buffer->size = numBytesWritten;
+}
+
+status_t VideoEditorAudioPlayer::start(bool sourceAlreadyStarted) {
+ Mutex::Autolock autoLock(mLock);
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+ ALOGV("Start");
+ status_t err;
+ M4OSA_ERR result = M4NO_ERROR;
+ M4OSA_UInt32 startTime = 0;
+ M4OSA_UInt32 seekTimeStamp = 0;
+ M4OSA_Bool bStoryBoardTSBeyondBTEndCutTime = M4OSA_FALSE;
+
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ // Create the BG Audio handler
+ mAudioProcess = new VideoEditorBGAudioProcessing();
+ AudioMixSettings audioMixSettings;
+
+ // Pass on the audio ducking parameters
+ audioMixSettings.lvInDucking_threshold =
+ mAudioMixSettings->uiInDucking_threshold;
+ audioMixSettings.lvInDucking_lowVolume =
+ ((M4OSA_Float)mAudioMixSettings->uiInDucking_lowVolume) / 100.0;
+ audioMixSettings.lvInDucking_enable =
+ mAudioMixSettings->bInDucking_enable;
+ audioMixSettings.lvPTVolLevel =
+ ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal) / 100.0;
+ audioMixSettings.lvBTVolLevel =
+ ((M4OSA_Float)mAudioMixSettings->uiAddVolume) / 100.0;
+ audioMixSettings.lvBTChannelCount = mAudioMixSettings->uiBTChannelCount;
+ audioMixSettings.lvPTChannelCount = mAudioMixSettings->uiNbChannels;
+
+ // Call to Audio mix param setting
+ mAudioProcess->setMixParams(audioMixSettings);
+
+ // Get the BG Audio PCM file details
+ if ( mBGAudioPCMFileHandle ) {
+
+ // TODO : 32bits required for OSAL, to be updated once OSAL is updated
+ M4OSA_UInt32 tmp32 = 0;
+ result = M4OSA_fileReadGetOption(mBGAudioPCMFileHandle,
+ M4OSA_kFileReadGetFileSize,
+ (M4OSA_Void**)&tmp32);
+ mBGAudioPCMFileLength = tmp32;
+ mBGAudioPCMFileTrimmedLength = mBGAudioPCMFileLength;
+
+
+ ALOGV("VideoEditorAudioPlayer::start M4OSA_kFileReadGetFileSize = %lld",
+ mBGAudioPCMFileLength);
+
+ // Get the duration in time of the audio BT
+ if ( result == M4NO_ERROR ) {
+ ALOGV("VEAP: channels = %d freq = %d",
+ mAudioMixSettings->uiNbChannels, mAudioMixSettings->uiSamplingFrequency);
+
+ // No trim
+ mBGAudioPCMFileDuration = ((
+ (int64_t)(mBGAudioPCMFileLength/sizeof(M4OSA_UInt16)/
+ mAudioMixSettings->uiNbChannels))*1000 ) /
+ mAudioMixSettings->uiSamplingFrequency;
+
+ ALOGV("VideoEditorAudioPlayer:: beginCutMs %d , endCutMs %d",
+ (unsigned int) mAudioMixSettings->beginCutMs,
+ (unsigned int) mAudioMixSettings->endCutMs);
+
+ // Remove the trim part
+ if ((mAudioMixSettings->beginCutMs == 0) &&
+ (mAudioMixSettings->endCutMs != 0)) {
+ // End time itself the file duration
+ mBGAudioPCMFileDuration = mAudioMixSettings->endCutMs;
+ // Limit the file length also
+ mBGAudioPCMFileTrimmedLength = ((
+ (int64_t)(mBGAudioPCMFileDuration *
+ mAudioMixSettings->uiSamplingFrequency) *
+ mAudioMixSettings->uiNbChannels) *
+ sizeof(M4OSA_UInt16)) / 1000;
+ }
+ else if ((mAudioMixSettings->beginCutMs != 0) &&
+ (mAudioMixSettings->endCutMs == mBGAudioPCMFileDuration)) {
+ // End time itself the file duration
+ mBGAudioPCMFileDuration = mBGAudioPCMFileDuration -
+ mAudioMixSettings->beginCutMs;
+ // Limit the file length also
+ mBGAudioPCMFileTrimmedLength = ((
+ (int64_t)(mBGAudioPCMFileDuration *
+ mAudioMixSettings->uiSamplingFrequency) *
+ mAudioMixSettings->uiNbChannels) *
+ sizeof(M4OSA_UInt16)) / 1000;
+ }
+ else if ((mAudioMixSettings->beginCutMs != 0) &&
+ (mAudioMixSettings->endCutMs != 0)) {
+ // End time itself the file duration
+ mBGAudioPCMFileDuration = mAudioMixSettings->endCutMs -
+ mAudioMixSettings->beginCutMs;
+ // Limit the file length also
+ mBGAudioPCMFileTrimmedLength = ((
+ (int64_t)(mBGAudioPCMFileDuration *
+ mAudioMixSettings->uiSamplingFrequency) *
+ mAudioMixSettings->uiNbChannels) *
+ sizeof(M4OSA_UInt16)) / 1000; /*make to sec from ms*/
+ }
+
+ ALOGV("VideoEditorAudioPlayer: file duration recorded : %lld",
+ mBGAudioPCMFileDuration);
+ }
+
+ // Last played location to be seeked at for next media item
+ if ( result == M4NO_ERROR ) {
+ ALOGV("VideoEditorAudioPlayer::mBGAudioStoryBoardSkimTimeStamp %lld",
+ mBGAudioStoryBoardSkimTimeStamp);
+ ALOGV("VideoEditorAudioPlayer::uiAddCts %d",
+ mAudioMixSettings->uiAddCts);
+ if (mBGAudioStoryBoardSkimTimeStamp >= mAudioMixSettings->uiAddCts) {
+ startTime = (mBGAudioStoryBoardSkimTimeStamp -
+ mAudioMixSettings->uiAddCts);
+ }
+ else {
+ // do nothing
+ }
+
+ ALOGV("VideoEditorAudioPlayer::startTime %d", startTime);
+ seekTimeStamp = 0;
+ if (startTime) {
+ if (startTime >= mBGAudioPCMFileDuration) {
+ // The BG track should be looped and started again
+ if (mAudioMixSettings->bLoop) {
+ // Add begin cut time to the mod value
+ seekTimeStamp = ((startTime%mBGAudioPCMFileDuration) +
+ mAudioMixSettings->beginCutMs);
+ }else {
+ // Looping disabled, donot do BT Mix , set to file end
+ seekTimeStamp = (mBGAudioPCMFileDuration +
+ mAudioMixSettings->beginCutMs);
+ }
+ }else {
+ // BT still present , just seek to story board time
+ seekTimeStamp = startTime + mAudioMixSettings->beginCutMs;
+ }
+ }
+ else {
+ seekTimeStamp = mAudioMixSettings->beginCutMs;
+ }
+
+ // Convert the seekTimeStamp to file location
+ mBGAudioPCMFileOriginalSeekPoint = (
+ (int64_t)(mAudioMixSettings->beginCutMs)
+ * mAudioMixSettings->uiSamplingFrequency
+ * mAudioMixSettings->uiNbChannels
+ * sizeof(M4OSA_UInt16))/ 1000 ; /*make to sec from ms*/
+
+ mBGAudioPCMFileSeekPoint = ((int64_t)(seekTimeStamp)
+ * mAudioMixSettings->uiSamplingFrequency
+ * mAudioMixSettings->uiNbChannels
+ * sizeof(M4OSA_UInt16))/ 1000 ;
+ }
+ }
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ int32_t numChannels;
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if (mAudioSink.get() != NULL) {
+ status_t err = mAudioSink->open(
+ mSampleRate, numChannels, CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &VideoEditorAudioPlayer::AudioSinkCallback, this);
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ return err;
+ }
+
+ mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
+ mFrameSize = mAudioSink->frameSize();
+
+ mAudioSink->start();
+ } else {
+ mAudioTrack = new AudioTrack(
+ AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
+ (numChannels == 2)
+ ? AUDIO_CHANNEL_OUT_STEREO
+ : AUDIO_CHANNEL_OUT_MONO,
+ 0, AUDIO_POLICY_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
+
+ if ((err = mAudioTrack->initCheck()) != OK) {
+ delete mAudioTrack;
+ mAudioTrack = NULL;
+
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ return err;
+ }
+
+ mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
+ mFrameSize = mAudioTrack->frameSize();
+
+ mAudioTrack->start();
+ }
+
+ mStarted = true;
+
+ return OK;
+}
+
+
+void VideoEditorAudioPlayer::reset() {
+
+ ALOGV("reset");
+ clear();
+
+ // Capture the current seek point
+ mBGAudioPCMFileSeekPoint = 0;
+ mBGAudioStoryBoardSkimTimeStamp =0;
+ mBGAudioStoryBoardCurrentMediaBeginCutTS=0;
+}
+
+size_t VideoEditorAudioPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ VideoEditorAudioPlayer *me = (VideoEditorAudioPlayer *)cookie;
+
+ return me->fillBuffer(buffer, size);
+}
+
+
+size_t VideoEditorAudioPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4AM_Buffer16 bgFrame = {NULL, 0};
+ M4AM_Buffer16 mixFrame = {NULL, 0};
+ M4AM_Buffer16 ptFrame = {NULL, 0};
+ int64_t currentSteamTS = 0;
+ int64_t startTimeForBT = 0;
+ M4OSA_Float fPTVolLevel =
+ ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal)/100;
+ M4OSA_Int16 *pPTMdata=NULL;
+ M4OSA_UInt32 uiPCMsize = 0;
+
+ bool postSeekComplete = false;
+ bool postEOS = false;
+
+ while ((size_remaining > 0)&&(err==M4NO_ERROR)) {
+ MediaSource::ReadOptions options;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ options.setSeekTo(mSeekTimeUs);
+
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSeeking = false;
+
+ if (mObserver) {
+ postSeekComplete = true;
+ }
+ }
+ }
+
+ if (mInputBuffer == NULL) {
+ status_t status = OK;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ status = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ status = mSource->read(&mInputBuffer, &options);
+ }
+ // Data is Primary Track, mix with background track
+ // after reading same size from Background track PCM file
+ if (status == OK)
+ {
+ // Mix only when skim point is after startTime of BT
+ if (((mBGAudioStoryBoardSkimTimeStamp* 1000) +
+ (mPositionTimeMediaUs - mSeekTimeUs)) >=
+ (int64_t)(mAudioMixSettings->uiAddCts * 1000)) {
+
+ ALOGV("VideoEditorAudioPlayer::INSIDE MIXING");
+ ALOGV("Checking %lld <= %lld",
+ mBGAudioPCMFileSeekPoint-mBGAudioPCMFileOriginalSeekPoint,
+ mBGAudioPCMFileTrimmedLength);
+
+
+ M4OSA_Void* ptr;
+ ptr = (M4OSA_Void*)((unsigned int)mInputBuffer->data() +
+ mInputBuffer->range_offset());
+
+ M4OSA_UInt32 len = mInputBuffer->range_length();
+ M4OSA_Context fp = M4OSA_NULL;
+
+ uiPCMsize = (mInputBuffer->range_length())/2;
+ pPTMdata = (M4OSA_Int16*) ((uint8_t*) mInputBuffer->data()
+ + mInputBuffer->range_offset());
+
+ ALOGV("mix with background malloc to do len %d", len);
+
+ bgFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc( len, 1,
+ (M4OSA_Char*)"bgFrame");
+ bgFrame.m_bufferSize = len;
+
+ mixFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc(len, 1,
+ (M4OSA_Char*)"mixFrame");
+ mixFrame.m_bufferSize = len;
+
+ ALOGV("mix with bgm with size %lld", mBGAudioPCMFileLength);
+
+ CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime,
+ &mPositionTimeMediaUs));
+
+ if (mBGAudioPCMFileSeekPoint -
+ mBGAudioPCMFileOriginalSeekPoint <=
+ (mBGAudioPCMFileTrimmedLength - len)) {
+
+ ALOGV("Checking mBGAudioPCMFileHandle %d",
+ (unsigned int)mBGAudioPCMFileHandle);
+
+ if (mBGAudioPCMFileHandle != M4OSA_NULL) {
+ ALOGV("fillBuffer seeking file to %lld",
+ mBGAudioPCMFileSeekPoint);
+
+ // TODO : 32bits required for OSAL
+ M4OSA_UInt32 tmp32 =
+ (M4OSA_UInt32)mBGAudioPCMFileSeekPoint;
+ err = M4OSA_fileReadSeek(mBGAudioPCMFileHandle,
+ M4OSA_kFileSeekBeginning,
+ (M4OSA_FilePosition*)&tmp32);
+
+ mBGAudioPCMFileSeekPoint = tmp32;
+
+ if (err != M4NO_ERROR){
+ ALOGE("M4OSA_fileReadSeek err %d",(int)err);
+ }
+
+ err = M4OSA_fileReadData(mBGAudioPCMFileHandle,
+ (M4OSA_Int8*)bgFrame.m_dataAddress,
+ (M4OSA_UInt32*)&len);
+ if (err == M4WAR_NO_DATA_YET ) {
+
+ ALOGV("fillBuffer End of file reached");
+ err = M4NO_ERROR;
+
+ // We reached the end of file
+ // move to begin cut time equal value
+ if (mAudioMixSettings->bLoop) {
+ mBGAudioPCMFileSeekPoint =
+ (((int64_t)(mAudioMixSettings->beginCutMs) *
+ mAudioMixSettings->uiSamplingFrequency) *
+ mAudioMixSettings->uiNbChannels *
+ sizeof(M4OSA_UInt16)) / 1000;
+ ALOGV("fillBuffer Looping \
+ to mBGAudioPCMFileSeekPoint %lld",
+ mBGAudioPCMFileSeekPoint);
+ }
+ else {
+ // No mixing;
+ // take care of volume of primary track
+ if (fPTVolLevel < 1.0) {
+ setPrimaryTrackVolume(pPTMdata,
+ uiPCMsize, fPTVolLevel);
+ }
+ }
+ } else if (err != M4NO_ERROR ) {
+ ALOGV("fileReadData for audio err %d", err);
+ } else {
+ mBGAudioPCMFileSeekPoint += len;
+ ALOGV("fillBuffer mBGAudioPCMFileSeekPoint \
+ %lld", mBGAudioPCMFileSeekPoint);
+
+ // Assign the ptr data to primary track
+ ptFrame.m_dataAddress = (M4OSA_UInt16*)ptr;
+ ptFrame.m_bufferSize = len;
+
+ // Call to mix and duck
+ mAudioProcess->mixAndDuck(
+ &ptFrame, &bgFrame, &mixFrame);
+
+ // Overwrite the decoded buffer
+ memcpy((void *)ptr,
+ (void *)mixFrame.m_dataAddress, len);
+ }
+ }
+ } else if (mAudioMixSettings->bLoop){
+ // Move to begin cut time equal value
+ mBGAudioPCMFileSeekPoint =
+ mBGAudioPCMFileOriginalSeekPoint;
+ } else {
+ // No mixing;
+ // take care of volume level of primary track
+ if(fPTVolLevel < 1.0) {
+ setPrimaryTrackVolume(
+ pPTMdata, uiPCMsize, fPTVolLevel);
+ }
+ }
+ if (bgFrame.m_dataAddress) {
+ free(bgFrame.m_dataAddress);
+ }
+ if (mixFrame.m_dataAddress) {
+ free(mixFrame.m_dataAddress);
+ }
+ } else {
+ // No mixing;
+ // take care of volume level of primary track
+ if(fPTVolLevel < 1.0) {
+ setPrimaryTrackVolume(pPTMdata, uiPCMsize,
+ fPTVolLevel);
+ }
+ }
+ }
+ }
+
+ CHECK((status == OK && mInputBuffer != NULL)
+ || (status != OK && mInputBuffer == NULL));
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (status != OK) {
+ ALOGV("fillBuffer: mSource->read returned err %d", status);
+ if (mObserver && !mReachedEOS) {
+ postEOS = true;
+ }
+
+ mReachedEOS = true;
+ mFinalStatus = status;
+ break;
+ }
+
+ CHECK(mInputBuffer->meta_data()->findInt64(
+ kKeyTime, &mPositionTimeMediaUs));
+
+ mPositionTimeRealUs =
+ ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+ / mSampleRate;
+
+ ALOGV("buffer->size() = %d, "
+ "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+ mInputBuffer->range_length(),
+ mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+ }
+
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ mNumFramesPlayed += size_done / mFrameSize;
+ }
+
+ if (postEOS) {
+ mObserver->postAudioEOS();
+ }
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+void VideoEditorAudioPlayer::setAudioMixSettings(
+ M4xVSS_AudioMixingSettings* pAudioMixSettings) {
+ mAudioMixSettings = pAudioMixSettings;
+}
+
+void VideoEditorAudioPlayer::setAudioMixPCMFileHandle(
+ M4OSA_Context pBGAudioPCMFileHandle){
+ mBGAudioPCMFileHandle = pBGAudioPCMFileHandle;
+}
+
+void VideoEditorAudioPlayer::setAudioMixStoryBoardSkimTimeStamp(
+ M4OSA_UInt32 pBGAudioStoryBoardSkimTimeStamp,
+ M4OSA_UInt32 pBGAudioCurrentMediaBeginCutTS,
+ M4OSA_UInt32 pBGAudioCurrentMediaVolumeVal) {
+
+ mBGAudioStoryBoardSkimTimeStamp = pBGAudioStoryBoardSkimTimeStamp;
+ mBGAudioStoryBoardCurrentMediaBeginCutTS = pBGAudioCurrentMediaBeginCutTS;
+ mBGAudioStoryBoardCurrentMediaVolumeVal = pBGAudioCurrentMediaVolumeVal;
+}
+
+void VideoEditorAudioPlayer::setPrimaryTrackVolume(
+ M4OSA_Int16 *data, M4OSA_UInt32 size, M4OSA_Float volLevel) {
+
+ while(size-- > 0) {
+ *data = (M4OSA_Int16)((*data)*volLevel);
+ data++;
+ }
+}
+
+}
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
new file mode 100755
index 0000000..626df39
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VE_AUDIO_PLAYER_H_
+#define VE_AUDIO_PLAYER_H_
+
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/TimeSource.h>
+#include <utils/threads.h>
+
+#include "M4xVSS_API.h"
+#include "VideoEditorMain.h"
+#include "M4OSA_FileReader.h"
+#include "VideoEditorBGAudioProcessing.h"
+
+
+namespace android {
+
+class MediaSource;
+class AudioTrack;
+class PreviewPlayer;
+
+class VideoEditorAudioPlayer : public TimeSource {
+public:
+ enum {
+ REACHED_EOS,
+ SEEK_COMPLETE
+ };
+
+ VideoEditorAudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
+ PreviewPlayer *audioObserver = NULL);
+
+ ~VideoEditorAudioPlayer();
+
+ // Return time in us.
+ int64_t getRealTimeUs();
+
+ // Returns the timestamp of the last buffer played (in us).
+ int64_t getMediaTimeUs();
+
+ // Returns true iff a mapping is established, i.e. the AudioPlayerBase
+ // has played at least one frame of audio.
+ bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+
+ status_t start(bool sourceAlreadyStarted = false);
+ void pause(bool playPendingSamples = false);
+ void resume();
+ status_t seekTo(int64_t time_us);
+ bool isSeeking();
+ bool reachedEOS(status_t *finalStatus);
+
+ void setAudioMixSettings(M4xVSS_AudioMixingSettings* pAudioMixSettings);
+ void setAudioMixPCMFileHandle(M4OSA_Context pBGAudioPCMFileHandle);
+ void setAudioMixStoryBoardSkimTimeStamp(
+ M4OSA_UInt32 pBGAudioStoryBoardSkimTimeStamp,
+ M4OSA_UInt32 pBGAudioCurrentMediaBeginCutTS,
+ M4OSA_UInt32 pBGAudioCurrentMediaVolumeVal);
+
+ void setObserver(PreviewPlayer *observer);
+ void setSource(const sp<MediaSource> &source);
+ sp<MediaSource> getSource();
+
+ bool isStarted();
+private:
+
+ M4xVSS_AudioMixingSettings *mAudioMixSettings;
+ VideoEditorBGAudioProcessing *mAudioProcess;
+
+ M4OSA_Context mBGAudioPCMFileHandle;
+ int64_t mBGAudioPCMFileLength;
+ int64_t mBGAudioPCMFileTrimmedLength;
+ int64_t mBGAudioPCMFileDuration;
+ int64_t mBGAudioPCMFileSeekPoint;
+ int64_t mBGAudioPCMFileOriginalSeekPoint;
+ int64_t mBGAudioStoryBoardSkimTimeStamp;
+ int64_t mBGAudioStoryBoardCurrentMediaBeginCutTS;
+ int64_t mBGAudioStoryBoardCurrentMediaVolumeVal;
+
+ sp<MediaSource> mSource;
+ AudioTrack *mAudioTrack;
+
+ MediaBuffer *mInputBuffer;
+
+ int mSampleRate;
+ int64_t mLatencyUs;
+ size_t mFrameSize;
+
+ Mutex mLock;
+ int64_t mNumFramesPlayed;
+
+ int64_t mPositionTimeMediaUs;
+ int64_t mPositionTimeRealUs;
+
+ bool mSeeking;
+ bool mReachedEOS;
+ status_t mFinalStatus;
+ int64_t mSeekTimeUs;
+
+ bool mStarted;
+
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+ PreviewPlayer *mObserver;
+
+ static void AudioCallback(int event, void *user, void *info);
+ void AudioCallback(int event, void *info);
+ size_t fillBuffer(void *data, size_t size);
+ static size_t AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *data, size_t size, void *me);
+
+ void reset();
+ void clear();
+ int64_t getRealTimeUs_l();
+ void setPrimaryTrackVolume(
+ M4OSA_Int16 *data, M4OSA_UInt32 size, M4OSA_Float volLevel);
+
+ VideoEditorAudioPlayer(const VideoEditorAudioPlayer &);
+ VideoEditorAudioPlayer &operator=(const VideoEditorAudioPlayer &);
+};
+
+} // namespace android
+
+#endif // VE_AUDIO_PLAYER_H_
diff --git a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp
new file mode 100755
index 0000000..e24fcf4
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.cpp
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoEditorBGAudioProcessing"
+#include <utils/Log.h>
+#include "VideoEditorBGAudioProcessing.h"
+
+namespace android {
+
+VideoEditorBGAudioProcessing::VideoEditorBGAudioProcessing() {
+ ALOGV("Constructor");
+
+ mAudVolArrIndex = 0;
+ mDoDucking = 0;
+ mDucking_enable = 0;
+ mDucking_lowVolume = 0;
+ mDucking_threshold = 0;
+ mDuckingFactor = 0;
+
+ mBTVolLevel = 0;
+ mPTVolLevel = 0;
+
+ mIsSSRCneeded = 0;
+ mChannelConversion = 0;
+
+ mBTFormat = MONO_16_BIT;
+
+ mInSampleRate = 8000;
+ mOutSampleRate = 16000;
+ mPTChannelCount = 2;
+ mBTChannelCount = 1;
+}
+
+M4OSA_Int32 VideoEditorBGAudioProcessing::mixAndDuck(
+ void *primaryTrackBuffer,
+ void *backgroundTrackBuffer,
+ void *outBuffer) {
+
+ ALOGV("mixAndDuck: track buffers (primary: 0x%x and background: 0x%x) "
+ "and out buffer 0x%x",
+ primaryTrackBuffer, backgroundTrackBuffer, outBuffer);
+
+ M4AM_Buffer16* pPrimaryTrack = (M4AM_Buffer16*)primaryTrackBuffer;
+ M4AM_Buffer16* pBackgroundTrack = (M4AM_Buffer16*)backgroundTrackBuffer;
+ M4AM_Buffer16* pMixedOutBuffer = (M4AM_Buffer16*)outBuffer;
+
+ // Output size if same as PT size
+ pMixedOutBuffer->m_bufferSize = pPrimaryTrack->m_bufferSize;
+
+ // Before mixing, we need to have only PT as out buffer
+ memcpy((void *)pMixedOutBuffer->m_dataAddress,
+ (void *)pPrimaryTrack->m_dataAddress, pMixedOutBuffer->m_bufferSize);
+
+ // Initialize ducking variables
+ // Initially contains the input primary track
+ M4OSA_Int16 *pPTMdata2 = (M4OSA_Int16*)pMixedOutBuffer->m_dataAddress;
+
+ // Contains BG track processed data(like channel conversion etc..
+ M4OSA_Int16 *pBTMdata1 = (M4OSA_Int16*) pBackgroundTrack->m_dataAddress;
+
+ // Since we need to give sample count and not buffer size
+ M4OSA_UInt32 uiPCMsize = pMixedOutBuffer->m_bufferSize / 2 ;
+
+ if ((mDucking_enable) && (mPTVolLevel != 0.0)) {
+ M4OSA_Int32 peakDbValue = 0;
+ M4OSA_Int32 previousDbValue = 0;
+ M4OSA_Int16 *pPCM16Sample = (M4OSA_Int16*)pPrimaryTrack->m_dataAddress;
+ const size_t n = pPrimaryTrack->m_bufferSize / sizeof(M4OSA_Int16);
+
+ for (size_t loopIndex = 0; loopIndex < n; ++loopIndex) {
+ if (pPCM16Sample[loopIndex] >= 0) {
+ peakDbValue = previousDbValue > pPCM16Sample[loopIndex] ?
+ previousDbValue : pPCM16Sample[loopIndex];
+ previousDbValue = peakDbValue;
+ } else {
+ peakDbValue = previousDbValue > -pPCM16Sample[loopIndex] ?
+ previousDbValue: -pPCM16Sample[loopIndex];
+ previousDbValue = peakDbValue;
+ }
+ }
+
+ mAudioVolumeArray[mAudVolArrIndex] = getDecibelSound(peakDbValue);
+
+ // Check for threshold is done after kProcessingWindowSize cycles
+ if (mAudVolArrIndex >= kProcessingWindowSize - 1) {
+ mDoDucking = isThresholdBreached(
+ mAudioVolumeArray, mAudVolArrIndex, mDucking_threshold);
+
+ mAudVolArrIndex = 0;
+ } else {
+ mAudVolArrIndex++;
+ }
+
+ //
+ // Below logic controls the mixing weightage
+ // for Background and Primary Tracks
+ // for the duration of window under analysis,
+ // to give fade-out for Background and fade-in for primary
+ // Current fading factor is distributed in equal range over
+ // the defined window size.
+ // For a window size = 25
+ // (500 ms (window under analysis) / 20 ms (sample duration))
+ //
+
+ if (mDoDucking) {
+ if (mDuckingFactor > mDucking_lowVolume) {
+ // FADE OUT BG Track
+ // Increment ducking factor in total steps in factor
+ // of low volume steps to reach low volume level
+ mDuckingFactor -= mDucking_lowVolume;
+ } else {
+ mDuckingFactor = mDucking_lowVolume;
+ }
+ } else {
+ if (mDuckingFactor < 1.0 ) {
+ // FADE IN BG Track
+ // Increment ducking factor in total steps of
+ // low volume factor to reach orig.volume level
+ mDuckingFactor += mDucking_lowVolume;
+ } else {
+ mDuckingFactor = 1.0;
+ }
+ }
+ } // end if - mDucking_enable
+
+
+ // Mixing logic
+ ALOGV("Out of Ducking analysis uiPCMsize %d %f %f",
+ mDoDucking, mDuckingFactor, mBTVolLevel);
+ while (uiPCMsize-- > 0) {
+
+ // Set vol factor for BT and PT
+ *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1*mBTVolLevel);
+ *pPTMdata2 = (M4OSA_Int16)(*pPTMdata2*mPTVolLevel);
+
+ // Mix the two samples
+ if (mDoDucking) {
+
+ // Duck the BG track to ducking factor value before mixing
+ *pBTMdata1 = (M4OSA_Int16)((*pBTMdata1)*(mDuckingFactor));
+
+ // mix as normal case
+ *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1 /2 + *pPTMdata2 /2);
+ } else {
+
+ *pBTMdata1 = (M4OSA_Int16)((*pBTMdata1)*(mDuckingFactor));
+ *pBTMdata1 = (M4OSA_Int16)(*pBTMdata1 /2 + *pPTMdata2 /2);
+ }
+
+ M4OSA_Int32 temp;
+ if (*pBTMdata1 < 0) {
+ temp = -(*pBTMdata1) * 2; // bring to original Amplitude level
+
+ if (temp > 32767) {
+ *pBTMdata1 = -32766; // less then max allowed value
+ } else {
+ *pBTMdata1 = (M4OSA_Int16)(-temp);
+ }
+ } else {
+ temp = (*pBTMdata1) * 2; // bring to original Amplitude level
+ if ( temp > 32768) {
+ *pBTMdata1 = 32767; // less than max allowed value
+ } else {
+ *pBTMdata1 = (M4OSA_Int16)temp;
+ }
+ }
+
+ pBTMdata1++;
+ pPTMdata2++;
+ }
+
+ memcpy((void *)pMixedOutBuffer->m_dataAddress,
+ (void *)pBackgroundTrack->m_dataAddress,
+ pBackgroundTrack->m_bufferSize);
+
+ ALOGV("mixAndDuck: X");
+ return M4NO_ERROR;
+}
+
+M4OSA_Int32 VideoEditorBGAudioProcessing::calculateOutResampleBufSize() {
+
+ // This already takes care of channel count in mBTBuffer.m_bufferSize
+ return (mOutSampleRate / mInSampleRate) * mBTBuffer.m_bufferSize;
+}
+
+void VideoEditorBGAudioProcessing::setMixParams(
+ const AudioMixSettings& setting) {
+ ALOGV("setMixParams");
+
+ mDucking_enable = setting.lvInDucking_enable;
+ mDucking_lowVolume = setting.lvInDucking_lowVolume;
+ mDucking_threshold = setting.lvInDucking_threshold;
+ mPTVolLevel = setting.lvPTVolLevel;
+ mBTVolLevel = setting.lvBTVolLevel ;
+ mBTChannelCount = setting.lvBTChannelCount;
+ mPTChannelCount = setting.lvPTChannelCount;
+ mBTFormat = setting.lvBTFormat;
+ mInSampleRate = setting.lvInSampleRate;
+ mOutSampleRate = setting.lvOutSampleRate;
+
+ // Reset the following params to default values
+ mAudVolArrIndex = 0;
+ mDoDucking = 0;
+ mDuckingFactor = 1.0;
+
+ ALOGV("ducking enable 0x%x lowVolume %f threshold %d "
+ "fPTVolLevel %f BTVolLevel %f",
+ mDucking_enable, mDucking_lowVolume, mDucking_threshold,
+ mPTVolLevel, mPTVolLevel);
+
+ // Decides if SSRC support is needed for this mixing
+ mIsSSRCneeded = (setting.lvInSampleRate != setting.lvOutSampleRate);
+ if (setting.lvBTChannelCount != setting.lvPTChannelCount){
+ if (setting.lvBTChannelCount == 2){
+ mChannelConversion = 1; // convert to MONO
+ } else {
+ mChannelConversion = 2; // Convert to STEREO
+ }
+ } else {
+ mChannelConversion = 0;
+ }
+}
+
+// Fast way to compute 10 * log(value)
+M4OSA_Int32 VideoEditorBGAudioProcessing::getDecibelSound(M4OSA_UInt32 value) {
+ ALOGV("getDecibelSound: %ld", value);
+
+ if (value <= 0 || value > 0x8000) {
+ return 0;
+ } else if (value > 0x4000) { // 32768
+ return 90;
+ } else if (value > 0x2000) { // 16384
+ return 84;
+ } else if (value > 0x1000) { // 8192
+ return 78;
+ } else if (value > 0x0800) { // 4028
+ return 72;
+ } else if (value > 0x0400) { // 2048
+ return 66;
+ } else if (value > 0x0200) { // 1024
+ return 60;
+ } else if (value > 0x0100) { // 512
+ return 54;
+ } else if (value > 0x0080) { // 256
+ return 48;
+ } else if (value > 0x0040) { // 128
+ return 42;
+ } else if (value > 0x0020) { // 64
+ return 36;
+ } else if (value > 0x0010) { // 32
+ return 30;
+ } else if (value > 0x0008) { // 16
+ return 24;
+ } else if (value > 0x0007) { // 8
+ return 24;
+ } else if (value > 0x0003) { // 4
+ return 18;
+ } else if (value > 0x0001) { // 2
+ return 12;
+ } else { // 1
+ return 6;
+ }
+}
+
+M4OSA_Bool VideoEditorBGAudioProcessing::isThresholdBreached(
+ M4OSA_Int32* averageValue,
+ M4OSA_Int32 storeCount,
+ M4OSA_Int32 thresholdValue) {
+
+ ALOGV("isThresholdBreached");
+
+ int totalValue = 0;
+ for (int i = 0; i < storeCount; ++i) {
+ totalValue += averageValue[i];
+ }
+ return (totalValue / storeCount > thresholdValue);
+}
+
+}//namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h
new file mode 100755
index 0000000..cb7a69f
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorBGAudioProcessing.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VE_BACKGROUND_AUDIO_PROC_H
+#define VE_BACKGROUND_AUDIO_PROC_H
+
+#include "M4OSA_Error.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Export.h"
+#include "M4OSA_CoreID.h"
+
+
+namespace android {
+
+typedef struct {
+ M4OSA_UInt16* m_dataAddress; // Android SRC needs a Int16 pointer
+ M4OSA_UInt32 m_bufferSize;
+} M4AM_Buffer16; // Structure contains Int16_t pointer
+
+enum AudioFormat {
+ MONO_16_BIT,
+ STEREO_16_BIT
+};
+
+// Following struct will be used by app to supply the PT and BT properties
+// along with ducking values
+typedef struct {
+ M4OSA_Int32 lvInSampleRate; // Sampling audio freq (8000,16000 or more )
+ M4OSA_Int32 lvOutSampleRate; //Sampling audio freq (8000,16000 or more )
+ AudioFormat lvBTFormat;
+
+ M4OSA_Int32 lvInDucking_threshold;
+ M4OSA_Float lvInDucking_lowVolume;
+ M4OSA_Bool lvInDucking_enable;
+ M4OSA_Float lvPTVolLevel;
+ M4OSA_Float lvBTVolLevel;
+ M4OSA_Int32 lvBTChannelCount;
+ M4OSA_Int32 lvPTChannelCount;
+} AudioMixSettings;
+
+// This class is defined to get SF SRC access
+class VideoEditorBGAudioProcessing {
+public:
+ VideoEditorBGAudioProcessing();
+ ~VideoEditorBGAudioProcessing() {}
+
+ void setMixParams(const AudioMixSettings& params);
+
+ M4OSA_Int32 mixAndDuck(
+ void* primaryTrackBuffer,
+ void* backgroundTrackBuffer,
+ void* mixedOutputBuffer);
+
+private:
+ enum {
+ kProcessingWindowSize = 10,
+ };
+
+ M4OSA_Int32 mInSampleRate;
+ M4OSA_Int32 mOutSampleRate;
+ AudioFormat mBTFormat;
+
+ M4OSA_Bool mIsSSRCneeded;
+ M4OSA_Int32 mBTChannelCount;
+ M4OSA_Int32 mPTChannelCount;
+ M4OSA_UInt8 mChannelConversion;
+
+ M4OSA_UInt32 mDucking_threshold;
+ M4OSA_Float mDucking_lowVolume;
+ M4OSA_Float mDuckingFactor ;
+ M4OSA_Bool mDucking_enable;
+ M4OSA_Int32 mAudioVolumeArray[kProcessingWindowSize];
+ M4OSA_Int32 mAudVolArrIndex;
+ M4OSA_Bool mDoDucking;
+ M4OSA_Float mPTVolLevel;
+ M4OSA_Float mBTVolLevel;
+
+ M4AM_Buffer16 mBTBuffer;
+
+ M4OSA_Int32 getDecibelSound(M4OSA_UInt32 value);
+ M4OSA_Bool isThresholdBreached(M4OSA_Int32* averageValue,
+ M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue);
+
+ // This returns the size of buffer which needs to allocated
+ // before resampling is called
+ M4OSA_Int32 calculateOutResampleBufSize();
+
+ // Don't call me.
+ VideoEditorBGAudioProcessing(const VideoEditorBGAudioProcessing&);
+ VideoEditorBGAudioProcessing& operator=(
+ const VideoEditorBGAudioProcessing&);
+};
+
+} // namespace android
+
+#endif // VE_BACKGROUND_AUDIO_PROC_H
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
new file mode 100755
index 0000000..57cab08
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VideoEditorPlayer"
+#include <utils/Log.h>
+
+#include "VideoEditorPlayer.h"
+#include "PreviewPlayer.h"
+
+#include <media/Metadata.h>
+#include <media/stagefright/MediaExtractor.h>
+
+#include <system/audio.h>
+
+namespace android {
+
+VideoEditorPlayer::VideoEditorPlayer(NativeWindowRenderer* renderer)
+ : mPlayer(new PreviewPlayer(renderer)) {
+
+ ALOGV("VideoEditorPlayer");
+ mPlayer->setListener(this);
+}
+
+VideoEditorPlayer::~VideoEditorPlayer() {
+ ALOGV("~VideoEditorPlayer");
+
+ reset();
+ mVeAudioSink.clear();
+
+ delete mPlayer;
+ mPlayer = NULL;
+}
+
+status_t VideoEditorPlayer::initCheck() {
+ ALOGV("initCheck");
+ return OK;
+}
+
+
+status_t VideoEditorPlayer::setAudioPlayer(VideoEditorAudioPlayer *audioPlayer) {
+ return mPlayer->setAudioPlayer(audioPlayer);
+}
+
+
+status_t VideoEditorPlayer::setDataSource(
+ const char *url, const KeyedVector<String8, String8> *headers) {
+ ALOGI("setDataSource('%s')", url);
+ if (headers != NULL) {
+ ALOGE("Headers parameter is not supported");
+ return INVALID_OPERATION;
+ }
+
+ return mPlayer->setDataSource(url);
+}
+
+//We donot use this in preview, dummy implimentation as this is pure virtual
+status_t VideoEditorPlayer::setDataSource(int fd, int64_t offset,
+ int64_t length) {
+ ALOGE("setDataSource(%d, %lld, %lld) Not supported", fd, offset, length);
+ return (!OK);
+}
+
+status_t VideoEditorPlayer::setVideoSurface(const sp<Surface> &surface) {
+ ALOGV("setVideoSurface");
+
+ mPlayer->setSurface(surface);
+ return OK;
+}
+
+status_t VideoEditorPlayer::setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture) {
+ ALOGV("setVideoSurfaceTexture");
+
+ mPlayer->setSurfaceTexture(surfaceTexture);
+ return OK;
+}
+
+status_t VideoEditorPlayer::prepare() {
+ ALOGV("prepare");
+ return mPlayer->prepare();
+}
+
+status_t VideoEditorPlayer::prepareAsync() {
+ return mPlayer->prepareAsync();
+}
+
+status_t VideoEditorPlayer::start() {
+ ALOGV("start");
+ return mPlayer->play();
+}
+
+status_t VideoEditorPlayer::stop() {
+ ALOGV("stop");
+ return pause();
+}
+
+status_t VideoEditorPlayer::pause() {
+ ALOGV("pause");
+ return mPlayer->pause();
+}
+
+bool VideoEditorPlayer::isPlaying() {
+ ALOGV("isPlaying");
+ return mPlayer->isPlaying();
+}
+
+status_t VideoEditorPlayer::seekTo(int msec) {
+ ALOGV("seekTo");
+ status_t err = mPlayer->seekTo((int64_t)msec * 1000);
+ return err;
+}
+
+status_t VideoEditorPlayer::getCurrentPosition(int *msec) {
+ ALOGV("getCurrentPosition");
+ int64_t positionUs;
+ status_t err = mPlayer->getPosition(&positionUs);
+
+ if (err != OK) {
+ return err;
+ }
+
+ *msec = (positionUs + 500) / 1000;
+ return OK;
+}
+
+status_t VideoEditorPlayer::getDuration(int *msec) {
+ ALOGV("getDuration");
+
+ int64_t durationUs;
+ status_t err = mPlayer->getDuration(&durationUs);
+
+ if (err != OK) {
+ *msec = 0;
+ return OK;
+ }
+
+ *msec = (durationUs + 500) / 1000;
+ return OK;
+}
+
+status_t VideoEditorPlayer::reset() {
+ ALOGV("reset");
+ mPlayer->reset();
+ return OK;
+}
+
+status_t VideoEditorPlayer::setLooping(int loop) {
+ ALOGV("setLooping");
+ return mPlayer->setLooping(loop);
+}
+
+status_t VideoEditorPlayer::setParameter(int key, const Parcel &request) {
+ ALOGE("setParameter not implemented");
+ return INVALID_OPERATION;
+}
+
+status_t VideoEditorPlayer::getParameter(int key, Parcel *reply) {
+ ALOGE("getParameter not implemented");
+ return INVALID_OPERATION;
+}
+
+player_type VideoEditorPlayer::playerType() {
+ ALOGV("playerType");
+ return STAGEFRIGHT_PLAYER;
+}
+
+void VideoEditorPlayer::acquireLock() {
+ ALOGV("acquireLock");
+ mPlayer->acquireLock();
+}
+
+void VideoEditorPlayer::releaseLock() {
+ ALOGV("releaseLock");
+ mPlayer->releaseLock();
+}
+
+status_t VideoEditorPlayer::invoke(const Parcel &request, Parcel *reply) {
+ return INVALID_OPERATION;
+}
+
+void VideoEditorPlayer::setAudioSink(const sp<AudioSink> &audioSink) {
+ MediaPlayerInterface::setAudioSink(audioSink);
+
+ mPlayer->setAudioSink(audioSink);
+}
+
+status_t VideoEditorPlayer::getMetadata(
+ const media::Metadata::Filter& ids, Parcel *records) {
+ using media::Metadata;
+
+ uint32_t flags = mPlayer->getSourceSeekFlags();
+
+ Metadata metadata(records);
+
+ metadata.appendBool(
+ Metadata::kPauseAvailable,
+ flags & MediaExtractor::CAN_PAUSE);
+
+ metadata.appendBool(
+ Metadata::kSeekBackwardAvailable,
+ flags & MediaExtractor::CAN_SEEK_BACKWARD);
+
+ metadata.appendBool(
+ Metadata::kSeekForwardAvailable,
+ flags & MediaExtractor::CAN_SEEK_FORWARD);
+
+ metadata.appendBool(
+ Metadata::kSeekAvailable,
+ flags & MediaExtractor::CAN_SEEK);
+
+ return OK;
+}
+
+status_t VideoEditorPlayer::loadEffectsSettings(
+ M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects) {
+ ALOGV("loadEffectsSettings");
+ return mPlayer->loadEffectsSettings(pEffectSettings, nEffects);
+}
+
+status_t VideoEditorPlayer::loadAudioMixSettings(
+ M4xVSS_AudioMixingSettings* pAudioMixSettings) {
+ ALOGV("VideoEditorPlayer: loadAudioMixSettings");
+ return mPlayer->loadAudioMixSettings(pAudioMixSettings);
+}
+
+status_t VideoEditorPlayer::setAudioMixPCMFileHandle(
+ M4OSA_Context pAudioMixPCMFileHandle) {
+
+ ALOGV("VideoEditorPlayer: loadAudioMixSettings");
+ return mPlayer->setAudioMixPCMFileHandle(pAudioMixPCMFileHandle);
+}
+
+status_t VideoEditorPlayer::setAudioMixStoryBoardParam(
+ M4OSA_UInt32 audioMixStoryBoardTS,
+ M4OSA_UInt32 currentMediaBeginCutTime,
+ M4OSA_UInt32 primaryTrackVolValue) {
+
+ ALOGV("VideoEditorPlayer: loadAudioMixSettings");
+ return mPlayer->setAudioMixStoryBoardParam(audioMixStoryBoardTS,
+ currentMediaBeginCutTime, primaryTrackVolValue);
+}
+
+status_t VideoEditorPlayer::setPlaybackBeginTime(uint32_t msec) {
+ ALOGV("setPlaybackBeginTime");
+ return mPlayer->setPlaybackBeginTime(msec);
+}
+
+status_t VideoEditorPlayer::setPlaybackEndTime(uint32_t msec) {
+ ALOGV("setPlaybackEndTime");
+ return mPlayer->setPlaybackEndTime(msec);
+}
+
+status_t VideoEditorPlayer::setStoryboardStartTime(uint32_t msec) {
+ ALOGV("setStoryboardStartTime");
+ return mPlayer->setStoryboardStartTime(msec);
+}
+
+status_t VideoEditorPlayer::setProgressCallbackInterval(uint32_t cbInterval) {
+ ALOGV("setProgressCallbackInterval");
+ return mPlayer->setProgressCallbackInterval(cbInterval);
+}
+
+status_t VideoEditorPlayer::setMediaRenderingMode(
+ M4xVSS_MediaRendering mode,
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
+
+ ALOGV("setMediaRenderingMode");
+ return mPlayer->setMediaRenderingMode(mode, outputVideoSize);
+}
+
+status_t VideoEditorPlayer::resetJniCallbackTimeStamp() {
+ ALOGV("resetJniCallbackTimeStamp");
+ return mPlayer->resetJniCallbackTimeStamp();
+}
+
+status_t VideoEditorPlayer::setImageClipProperties(
+ uint32_t width, uint32_t height) {
+ return mPlayer->setImageClipProperties(width, height);
+}
+
+status_t VideoEditorPlayer::readFirstVideoFrame() {
+ return mPlayer->readFirstVideoFrame();
+}
+
+status_t VideoEditorPlayer::getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs) {
+ mPlayer->getLastRenderedTimeMs(lastRenderedTimeMs);
+ return NO_ERROR;
+}
+
+/* Implementation of AudioSink interface */
+#undef LOG_TAG
+#define LOG_TAG "VeAudioSink"
+
+int VideoEditorPlayer::VeAudioOutput::mMinBufferCount = 4;
+bool VideoEditorPlayer::VeAudioOutput::mIsOnEmulator = false;
+
+VideoEditorPlayer::VeAudioOutput::VeAudioOutput()
+ : mCallback(NULL),
+ mCallbackCookie(NULL) {
+ mTrack = 0;
+ mStreamType = AUDIO_STREAM_MUSIC;
+ mLeftVolume = 1.0;
+ mRightVolume = 1.0;
+ mLatency = 0;
+ mMsecsPerFrame = 0;
+ mNumFramesWritten = 0;
+ setMinBufferCount();
+}
+
+VideoEditorPlayer::VeAudioOutput::~VeAudioOutput() {
+ close();
+}
+
+void VideoEditorPlayer::VeAudioOutput::setMinBufferCount() {
+
+ mIsOnEmulator = false;
+ mMinBufferCount = 4;
+}
+
+bool VideoEditorPlayer::VeAudioOutput::isOnEmulator() {
+
+ setMinBufferCount();
+ return mIsOnEmulator;
+}
+
+int VideoEditorPlayer::VeAudioOutput::getMinBufferCount() {
+
+ setMinBufferCount();
+ return mMinBufferCount;
+}
+
+ssize_t VideoEditorPlayer::VeAudioOutput::bufferSize() const {
+
+ if (mTrack == 0) return NO_INIT;
+ return mTrack->frameCount() * frameSize();
+}
+
+ssize_t VideoEditorPlayer::VeAudioOutput::frameCount() const {
+
+ if (mTrack == 0) return NO_INIT;
+ return mTrack->frameCount();
+}
+
+ssize_t VideoEditorPlayer::VeAudioOutput::channelCount() const
+{
+ if (mTrack == 0) return NO_INIT;
+ return mTrack->channelCount();
+}
+
+ssize_t VideoEditorPlayer::VeAudioOutput::frameSize() const
+{
+ if (mTrack == 0) return NO_INIT;
+ return mTrack->frameSize();
+}
+
+uint32_t VideoEditorPlayer::VeAudioOutput::latency () const
+{
+ return mLatency;
+}
+
+float VideoEditorPlayer::VeAudioOutput::msecsPerFrame() const
+{
+ return mMsecsPerFrame;
+}
+
+status_t VideoEditorPlayer::VeAudioOutput::getPosition(uint32_t *position) {
+
+ if (mTrack == 0) return NO_INIT;
+ return mTrack->getPosition(position);
+}
+
+status_t VideoEditorPlayer::VeAudioOutput::open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format, int bufferCount,
+ AudioCallback cb, void *cookie) {
+
+ mCallback = cb;
+ mCallbackCookie = cookie;
+
+ // Check argument "bufferCount" against the mininum buffer count
+ if (bufferCount < mMinBufferCount) {
+ ALOGV("bufferCount (%d) is too small and increased to %d",
+ bufferCount, mMinBufferCount);
+ bufferCount = mMinBufferCount;
+
+ }
+ ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
+ if (mTrack) close();
+ int afSampleRate;
+ int afFrameCount;
+ int frameCount;
+
+ if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) !=
+ NO_ERROR) {
+ return NO_INIT;
+ }
+ if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) !=
+ NO_ERROR) {
+ return NO_INIT;
+ }
+
+ frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+
+ if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
+ switch(channelCount) {
+ case 1:
+ channelMask = AUDIO_CHANNEL_OUT_MONO;
+ break;
+ case 2:
+ channelMask = AUDIO_CHANNEL_OUT_STEREO;
+ break;
+ default:
+ return NO_INIT;
+ }
+ }
+
+ AudioTrack *t;
+ if (mCallback != NULL) {
+ t = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ AUDIO_POLICY_OUTPUT_FLAG_NONE,
+ CallbackWrapper,
+ this);
+ } else {
+ t = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount);
+ }
+
+ if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+ ALOGE("Unable to create audio track");
+ delete t;
+ return NO_INIT;
+ }
+
+ ALOGV("setVolume");
+ t->setVolume(mLeftVolume, mRightVolume);
+ mMsecsPerFrame = 1.e3 / (float) sampleRate;
+ mLatency = t->latency();
+ mTrack = t;
+ return NO_ERROR;
+}
+
+void VideoEditorPlayer::VeAudioOutput::start() {
+
+ ALOGV("start");
+ if (mTrack) {
+ mTrack->setVolume(mLeftVolume, mRightVolume);
+ mTrack->start();
+ mTrack->getPosition(&mNumFramesWritten);
+ }
+}
+
+void VideoEditorPlayer::VeAudioOutput::snoopWrite(
+ const void* buffer, size_t size) {
+ // Visualization buffers not supported
+ return;
+
+}
+
+ssize_t VideoEditorPlayer::VeAudioOutput::write(
+ const void* buffer, size_t size) {
+
+ LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+
+ //ALOGV("write(%p, %u)", buffer, size);
+ if (mTrack) {
+ snoopWrite(buffer, size);
+ ssize_t ret = mTrack->write(buffer, size);
+ mNumFramesWritten += ret / 4; // assume 16 bit stereo
+ return ret;
+ }
+ return NO_INIT;
+}
+
+void VideoEditorPlayer::VeAudioOutput::stop() {
+
+ ALOGV("stop");
+ if (mTrack) mTrack->stop();
+}
+
+void VideoEditorPlayer::VeAudioOutput::flush() {
+
+ ALOGV("flush");
+ if (mTrack) mTrack->flush();
+}
+
+void VideoEditorPlayer::VeAudioOutput::pause() {
+
+ ALOGV("VeAudioOutput::pause");
+ if (mTrack) mTrack->pause();
+}
+
+void VideoEditorPlayer::VeAudioOutput::close() {
+
+ ALOGV("close");
+ delete mTrack;
+ mTrack = 0;
+}
+
+void VideoEditorPlayer::VeAudioOutput::setVolume(float left, float right) {
+
+ ALOGV("setVolume(%f, %f)", left, right);
+ mLeftVolume = left;
+ mRightVolume = right;
+ if (mTrack) {
+ mTrack->setVolume(left, right);
+ }
+}
+
+// static
+void VideoEditorPlayer::VeAudioOutput::CallbackWrapper(
+ int event, void *cookie, void *info) {
+ //ALOGV("VeAudioOutput::callbackwrapper");
+ if (event != AudioTrack::EVENT_MORE_DATA) {
+ return;
+ }
+
+ VeAudioOutput *me = (VeAudioOutput *)cookie;
+ AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+
+ size_t actualSize = (*me->mCallback)(
+ me, buffer->raw, buffer->size, me->mCallbackCookie);
+
+ buffer->size = actualSize;
+
+ if (actualSize > 0) {
+ me->snoopWrite(buffer->raw, actualSize);
+ }
+}
+
+status_t VideoEditorPlayer::VeAudioOutput::dump(int fd, const Vector<String16>& args) const
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ result.append(" VeAudioOutput\n");
+ snprintf(buffer, SIZE-1, " stream type(%d), left - right volume(%f, %f)\n",
+ mStreamType, mLeftVolume, mRightVolume);
+ result.append(buffer);
+ snprintf(buffer, SIZE-1, " msec per frame(%f), latency (%d)\n",
+ mMsecsPerFrame, mLatency);
+ result.append(buffer);
+ ::write(fd, result.string(), result.size());
+ if (mTrack != 0) {
+ mTrack->dump(fd, args);
+ }
+ return NO_ERROR;
+}
+
+int VideoEditorPlayer::VeAudioOutput::getSessionId() {
+
+ return mSessionId;
+}
+
+} // namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
new file mode 100755
index 0000000..6962501
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_VIDEOEDITOR_PLAYER_H
+#define ANDROID_VIDEOEDITOR_PLAYER_H
+
+#include <media/MediaPlayerInterface.h>
+#include <media/AudioTrack.h>
+#include "M4xVSS_API.h"
+#include "VideoEditorMain.h"
+#include "VideoEditorTools.h"
+#include "VideoEditorAudioPlayer.h"
+#include "NativeWindowRenderer.h"
+
+namespace android {
+
+struct PreviewPlayer;
+
+class VideoEditorPlayer : public MediaPlayerInterface {
+ public:
+ class VeAudioOutput: public MediaPlayerBase::AudioSink
+ {
+ public:
+ VeAudioOutput();
+ virtual ~VeAudioOutput();
+
+ virtual bool ready() const { return mTrack != NULL; }
+ virtual bool realtime() const { return true; }
+ virtual ssize_t bufferSize() const;
+ virtual ssize_t frameCount() const;
+ virtual ssize_t channelCount() const;
+ virtual ssize_t frameSize() const;
+ virtual uint32_t latency() const;
+ virtual float msecsPerFrame() const;
+ virtual status_t getPosition(uint32_t *position);
+ virtual int getSessionId();
+
+ virtual status_t open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format, int bufferCount,
+ AudioCallback cb, void *cookie);
+
+ virtual void start();
+ virtual ssize_t write(const void* buffer, size_t size);
+ virtual void stop();
+ virtual void flush();
+ virtual void pause();
+ virtual void close();
+ void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
+ void setVolume(float left, float right);
+ virtual status_t dump(int fd,const Vector<String16>& args) const;
+
+ static bool isOnEmulator();
+ static int getMinBufferCount();
+ private:
+ static void setMinBufferCount();
+ static void CallbackWrapper(
+ int event, void *me, void *info);
+
+ AudioTrack* mTrack;
+ AudioCallback mCallback;
+ void * mCallbackCookie;
+ audio_stream_type_t mStreamType;
+ float mLeftVolume;
+ float mRightVolume;
+ float mMsecsPerFrame;
+ uint32_t mLatency;
+ int mSessionId;
+ static bool mIsOnEmulator;
+ static int mMinBufferCount; // 12 for emulator; otherwise 4
+
+ public:
+ uint32_t mNumFramesWritten;
+ void snoopWrite(const void*, size_t);
+ };
+
+public:
+ VideoEditorPlayer(NativeWindowRenderer* renderer);
+ virtual ~VideoEditorPlayer();
+
+ virtual status_t initCheck();
+
+ virtual status_t setDataSource(
+ const char *url, const KeyedVector<String8, String8> *headers);
+
+ virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
+ virtual status_t setVideoSurface(const sp<Surface> &surface);
+ virtual status_t setVideoSurfaceTexture(const sp<ISurfaceTexture> &surfaceTexture);
+ virtual status_t prepare();
+ virtual status_t prepareAsync();
+ virtual status_t start();
+ virtual status_t stop();
+ virtual status_t pause();
+ virtual bool isPlaying();
+ virtual status_t seekTo(int msec);
+ virtual status_t getCurrentPosition(int *msec);
+ virtual status_t getDuration(int *msec);
+ virtual status_t reset();
+ virtual status_t setLooping(int loop);
+ virtual player_type playerType();
+ virtual status_t invoke(const Parcel &request, Parcel *reply);
+ virtual void setAudioSink(const sp<AudioSink> &audioSink);
+ virtual void acquireLock();
+ virtual void releaseLock();
+ virtual status_t setParameter(int key, const Parcel &request);
+ virtual status_t getParameter(int key, Parcel *reply);
+
+ virtual status_t getMetadata(
+ const media::Metadata::Filter& ids, Parcel *records);
+
+ virtual status_t loadEffectsSettings(
+ M4VSS3GPP_EffectSettings* pEffectSettings, int nEffects);
+
+ virtual status_t loadAudioMixSettings(
+ M4xVSS_AudioMixingSettings* pAudioMixSettings);
+
+ virtual status_t setAudioMixPCMFileHandle(
+ M4OSA_Context pAudioMixPCMFileHandle);
+
+ virtual status_t setAudioMixStoryBoardParam(
+ M4OSA_UInt32 x, M4OSA_UInt32 y, M4OSA_UInt32 z);
+
+ virtual status_t setPlaybackBeginTime(uint32_t msec);
+ virtual status_t setPlaybackEndTime(uint32_t msec);
+ virtual status_t setStoryboardStartTime(uint32_t msec);
+ virtual status_t setProgressCallbackInterval(uint32_t cbInterval);
+
+ virtual status_t setMediaRenderingMode(M4xVSS_MediaRendering mode,
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize);
+
+ virtual status_t resetJniCallbackTimeStamp();
+ virtual status_t setImageClipProperties(uint32_t width, uint32_t height);
+ virtual status_t readFirstVideoFrame();
+ virtual status_t getLastRenderedTimeMs(uint32_t *lastRenderedTimeMs);
+
+ status_t setAudioPlayer(VideoEditorAudioPlayer *audioPlayer);
+private:
+ PreviewPlayer *mPlayer;
+ sp<VeAudioOutput> mVeAudioSink;
+
+ VideoEditorPlayer(const VideoEditorPlayer &);
+ VideoEditorPlayer &operator=(const VideoEditorPlayer &);
+};
+
+} // namespace android
+
+#endif // ANDROID_VIDEOEDITOR_PLAYER_H
diff --git a/libvideoeditor/lvpp/VideoEditorPreviewController.cpp b/libvideoeditor/lvpp/VideoEditorPreviewController.cpp
new file mode 100755
index 0000000..149c4ea
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorPreviewController.cpp
@@ -0,0 +1,1467 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "PreviewController"
+#include <utils/Log.h>
+
+#include <gui/Surface.h>
+
+#include "VideoEditorAudioPlayer.h"
+#include "PreviewRenderer.h"
+#include "M4OSA_Semaphore.h"
+#include "M4OSA_Thread.h"
+#include "VideoEditorPreviewController.h"
+
+namespace android {
+
+
+VideoEditorPreviewController::VideoEditorPreviewController()
+ : mCurrentPlayer(0),
+ mThreadContext(NULL),
+ mPlayerState(VePlayerIdle),
+ mPrepareReqest(M4OSA_FALSE),
+ mClipList(NULL),
+ mNumberClipsInStoryBoard(0),
+ mNumberClipsToPreview(0),
+ mStartingClipIndex(0),
+ mPreviewLooping(M4OSA_FALSE),
+ mCallBackAfterFrameCnt(0),
+ mEffectsSettings(NULL),
+ mNumberEffects(0),
+ mCurrentClipNumber(-1),
+ mClipTotalDuration(0),
+ mCurrentVideoEffect(VIDEO_EFFECT_NONE),
+ mBackgroundAudioSetting(NULL),
+ mAudioMixPCMFileHandle(NULL),
+ mTarget(NULL),
+ mJniCookie(NULL),
+ mJniCallback(NULL),
+ mCurrentPlayedDuration(0),
+ mCurrentClipDuration(0),
+ mVideoStoryBoardTimeMsUptoFirstPreviewClip(0),
+ mOverlayState(OVERLAY_CLEAR),
+ mActivePlayerIndex(0),
+ mOutputVideoWidth(0),
+ mOutputVideoHeight(0),
+ bStopThreadInProgress(false),
+ mSemThreadWait(NULL) {
+ ALOGV("VideoEditorPreviewController");
+ mRenderingMode = M4xVSS_kBlackBorders;
+ mIsFiftiesEffectStarted = false;
+
+ for (int i = 0; i < kTotalNumPlayerInstances; ++i) {
+ mVePlayer[i] = NULL;
+ }
+}
+
+VideoEditorPreviewController::~VideoEditorPreviewController() {
+ ALOGV("~VideoEditorPreviewController");
+ M4OSA_UInt32 i = 0;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Stop the thread if its still running
+ if(mThreadContext != NULL) {
+ err = M4OSA_threadSyncStop(mThreadContext);
+ if(err != M4NO_ERROR) {
+ ALOGV("~VideoEditorPreviewController: error 0x%x \
+ in trying to stop thread", err);
+ // Continue even if error
+ }
+
+ err = M4OSA_threadSyncClose(mThreadContext);
+ if(err != M4NO_ERROR) {
+ ALOGE("~VideoEditorPreviewController: error 0x%x \
+ in trying to close thread", (unsigned int) err);
+ // Continue even if error
+ }
+
+ mThreadContext = NULL;
+ }
+
+ for (int playerInst=0; playerInst<kTotalNumPlayerInstances;
+ playerInst++) {
+ if(mVePlayer[playerInst] != NULL) {
+ ALOGV("clearing mVePlayer %d", playerInst);
+ mVePlayer[playerInst].clear();
+ }
+ }
+
+ if(mClipList != NULL) {
+ // Clean up
+ for(i=0;i<mNumberClipsInStoryBoard;i++)
+ {
+ if(mClipList[i]->pFile != NULL) {
+ free(mClipList[i]->pFile);
+ mClipList[i]->pFile = NULL;
+ }
+
+ free(mClipList[i]);
+ }
+ free(mClipList);
+ mClipList = NULL;
+ }
+
+ if(mEffectsSettings) {
+ for(i=0;i<mNumberEffects;i++) {
+ if(mEffectsSettings[i].xVSS.pFramingBuffer != NULL) {
+ free(mEffectsSettings[i].xVSS.pFramingBuffer->pac_data);
+
+ free(mEffectsSettings[i].xVSS.pFramingBuffer);
+
+ mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
+ }
+ }
+ free(mEffectsSettings);
+ mEffectsSettings = NULL;
+ }
+
+ if (mAudioMixPCMFileHandle) {
+ err = M4OSA_fileReadClose (mAudioMixPCMFileHandle);
+ mAudioMixPCMFileHandle = M4OSA_NULL;
+ }
+
+ if (mBackgroundAudioSetting != NULL) {
+ free(mBackgroundAudioSetting);
+ mBackgroundAudioSetting = NULL;
+ }
+
+ if(mTarget != NULL) {
+ delete mTarget;
+ mTarget = NULL;
+ }
+
+ mOverlayState = OVERLAY_CLEAR;
+
+ ALOGV("~VideoEditorPreviewController returns");
+}
+
+M4OSA_ERR VideoEditorPreviewController::loadEditSettings(
+ M4VSS3GPP_EditSettings* pSettings,M4xVSS_AudioMixingSettings* bgmSettings) {
+
+ M4OSA_UInt32 i = 0, iClipDuration = 0, rgbSize = 0;
+ M4VIFI_UInt8 *tmp = NULL;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("loadEditSettings");
+ ALOGV("loadEditSettings Channels = %d, sampling Freq %d",
+ bgmSettings->uiNbChannels, bgmSettings->uiSamplingFrequency );
+ bgmSettings->uiSamplingFrequency = 32000;
+
+ ALOGV("loadEditSettings Channels = %d, sampling Freq %d",
+ bgmSettings->uiNbChannels, bgmSettings->uiSamplingFrequency );
+ Mutex::Autolock autoLock(mLock);
+
+ // Clean up any previous Edit settings before loading new ones
+ mCurrentVideoEffect = VIDEO_EFFECT_NONE;
+
+ if(mAudioMixPCMFileHandle) {
+ err = M4OSA_fileReadClose (mAudioMixPCMFileHandle);
+ mAudioMixPCMFileHandle = M4OSA_NULL;
+ }
+
+ if(mBackgroundAudioSetting != NULL) {
+ free(mBackgroundAudioSetting);
+ mBackgroundAudioSetting = NULL;
+ }
+
+ if(mClipList != NULL) {
+ // Clean up
+ for(i=0;i<mNumberClipsInStoryBoard;i++)
+ {
+ if(mClipList[i]->pFile != NULL) {
+ free(mClipList[i]->pFile);
+ mClipList[i]->pFile = NULL;
+ }
+
+ free(mClipList[i]);
+ }
+ free(mClipList);
+ mClipList = NULL;
+ }
+
+ if(mEffectsSettings) {
+ for(i=0;i<mNumberEffects;i++) {
+ if(mEffectsSettings[i].xVSS.pFramingBuffer != NULL) {
+ free(mEffectsSettings[i].xVSS.pFramingBuffer->pac_data);
+
+ free(mEffectsSettings[i].xVSS.pFramingBuffer);
+
+ mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
+ }
+ }
+ free(mEffectsSettings);
+ mEffectsSettings = NULL;
+ }
+
+ if(mClipList == NULL) {
+ mNumberClipsInStoryBoard = pSettings->uiClipNumber;
+ ALOGV("loadEditSettings: # of Clips = %d", mNumberClipsInStoryBoard);
+
+ mClipList = (M4VSS3GPP_ClipSettings**)M4OSA_32bitAlignedMalloc(
+ sizeof(M4VSS3GPP_ClipSettings*)*pSettings->uiClipNumber, M4VS,
+ (M4OSA_Char*)"LvPP, copy of pClipList");
+
+ if(NULL == mClipList) {
+ ALOGE("loadEditSettings: Malloc error");
+ return M4ERR_ALLOC;
+ }
+ memset((void *)mClipList,0,
+ sizeof(M4VSS3GPP_ClipSettings*)*pSettings->uiClipNumber);
+
+ for(i=0;i<pSettings->uiClipNumber;i++) {
+
+ // Allocate current clip
+ mClipList[i] =
+ (M4VSS3GPP_ClipSettings*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4VSS3GPP_ClipSettings),M4VS,(M4OSA_Char*)"clip settings");
+
+ if(mClipList[i] == NULL) {
+
+ ALOGE("loadEditSettings: Allocation error for mClipList[%d]", (int)i);
+ return M4ERR_ALLOC;
+ }
+ // Copy plain structure
+ memcpy((void *)mClipList[i],
+ (void *)pSettings->pClipList[i],
+ sizeof(M4VSS3GPP_ClipSettings));
+
+ if(NULL != pSettings->pClipList[i]->pFile) {
+ mClipList[i]->pFile = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(
+ pSettings->pClipList[i]->filePathSize, M4VS,
+ (M4OSA_Char*)"pClipSettingsDest->pFile");
+
+ if(NULL == mClipList[i]->pFile)
+ {
+ ALOGE("loadEditSettings : ERROR allocating filename");
+ return M4ERR_ALLOC;
+ }
+
+ memcpy((void *)mClipList[i]->pFile,
+ (void *)pSettings->pClipList[i]->pFile,
+ pSettings->pClipList[i]->filePathSize);
+ }
+ else {
+ ALOGE("NULL file path");
+ return M4ERR_PARAMETER;
+ }
+
+ // Calculate total duration of all clips
+ iClipDuration = pSettings->pClipList[i]->uiEndCutTime -
+ pSettings->pClipList[i]->uiBeginCutTime;
+
+ mClipTotalDuration = mClipTotalDuration+iClipDuration;
+ }
+ }
+
+ if(mEffectsSettings == NULL) {
+ mNumberEffects = pSettings->nbEffects;
+ ALOGV("loadEditSettings: mNumberEffects = %d", mNumberEffects);
+
+ if(mNumberEffects != 0) {
+ mEffectsSettings = (M4VSS3GPP_EffectSettings*)M4OSA_32bitAlignedMalloc(
+ mNumberEffects*sizeof(M4VSS3GPP_EffectSettings),
+ M4VS, (M4OSA_Char*)"effects settings");
+
+ if(mEffectsSettings == NULL) {
+ ALOGE("loadEffectsSettings: Allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ memset((void *)mEffectsSettings,0,
+ mNumberEffects*sizeof(M4VSS3GPP_EffectSettings));
+
+ for(i=0;i<mNumberEffects;i++) {
+
+ mEffectsSettings[i].xVSS.pFramingFilePath = NULL;
+ mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
+ mEffectsSettings[i].xVSS.pTextBuffer = NULL;
+
+ memcpy((void *)&(mEffectsSettings[i]),
+ (void *)&(pSettings->Effects[i]),
+ sizeof(M4VSS3GPP_EffectSettings));
+
+ if(pSettings->Effects[i].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
+ // Allocate the pFraming RGB buffer
+ mEffectsSettings[i].xVSS.pFramingBuffer =
+ (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane),
+ M4VS, (M4OSA_Char*)"lvpp framing buffer");
+
+ if(mEffectsSettings[i].xVSS.pFramingBuffer == NULL) {
+ ALOGE("loadEffectsSettings:Alloc error for pFramingBuf");
+ free(mEffectsSettings);
+ mEffectsSettings = NULL;
+ return M4ERR_ALLOC;
+ }
+
+ // Allocate the pac_data (RGB)
+ if(pSettings->Effects[i].xVSS.rgbType == M4VSS3GPP_kRGB565){
+ rgbSize =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_width *
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_height*2;
+ }
+ else if(
+ pSettings->Effects[i].xVSS.rgbType == M4VSS3GPP_kRGB888) {
+ rgbSize =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_width *
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_height*3;
+ }
+ else {
+ ALOGE("loadEffectsSettings: wrong RGB type");
+ free(mEffectsSettings);
+ mEffectsSettings = NULL;
+ return M4ERR_PARAMETER;
+ }
+
+ tmp = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(rgbSize, M4VS,
+ (M4OSA_Char*)"framing buffer pac_data");
+
+ if(tmp == NULL) {
+ ALOGE("loadEffectsSettings:Alloc error pFramingBuf pac");
+ free(mEffectsSettings);
+ mEffectsSettings = NULL;
+ free(mEffectsSettings[i].xVSS.pFramingBuffer);
+
+ mEffectsSettings[i].xVSS.pFramingBuffer = NULL;
+ return M4ERR_ALLOC;
+ }
+ /* Initialize the pFramingBuffer*/
+ mEffectsSettings[i].xVSS.pFramingBuffer->pac_data = tmp;
+ mEffectsSettings[i].xVSS.pFramingBuffer->u_height =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_height;
+
+ mEffectsSettings[i].xVSS.pFramingBuffer->u_width =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_width;
+
+ mEffectsSettings[i].xVSS.pFramingBuffer->u_stride =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_stride;
+
+ mEffectsSettings[i].xVSS.pFramingBuffer->u_topleft =
+ pSettings->Effects[i].xVSS.pFramingBuffer->u_topleft;
+
+ mEffectsSettings[i].xVSS.uialphaBlendingStart =
+ pSettings->Effects[i].xVSS.uialphaBlendingStart;
+
+ mEffectsSettings[i].xVSS.uialphaBlendingMiddle =
+ pSettings->Effects[i].xVSS.uialphaBlendingMiddle;
+
+ mEffectsSettings[i].xVSS.uialphaBlendingEnd =
+ pSettings->Effects[i].xVSS.uialphaBlendingEnd;
+
+ mEffectsSettings[i].xVSS.uialphaBlendingFadeInTime =
+ pSettings->Effects[i].xVSS.uialphaBlendingFadeInTime;
+ mEffectsSettings[i].xVSS.uialphaBlendingFadeOutTime =
+ pSettings->Effects[i].xVSS.uialphaBlendingFadeOutTime;
+
+ // Copy the pFraming data
+ memcpy((void *)
+ mEffectsSettings[i].xVSS.pFramingBuffer->pac_data,
+ (void *)pSettings->Effects[i].xVSS.pFramingBuffer->pac_data,
+ rgbSize);
+
+ mEffectsSettings[i].xVSS.rgbType =
+ pSettings->Effects[i].xVSS.rgbType;
+ }
+ }
+ }
+ }
+
+ if (mBackgroundAudioSetting == NULL) {
+
+ mBackgroundAudioSetting = (M4xVSS_AudioMixingSettings*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_AudioMixingSettings), M4VS,
+ (M4OSA_Char*)"LvPP, copy of bgmSettings");
+
+ if(NULL == mBackgroundAudioSetting) {
+ ALOGE("loadEditSettings: mBackgroundAudioSetting Malloc failed");
+ return M4ERR_ALLOC;
+ }
+
+ memset((void *)mBackgroundAudioSetting, 0,sizeof(M4xVSS_AudioMixingSettings*));
+ memcpy((void *)mBackgroundAudioSetting, (void *)bgmSettings, sizeof(M4xVSS_AudioMixingSettings));
+
+ if ( mBackgroundAudioSetting->pFile != M4OSA_NULL ) {
+
+ mBackgroundAudioSetting->pFile = (M4OSA_Void*) bgmSettings->pPCMFilePath;
+ mBackgroundAudioSetting->uiNbChannels = 2;
+ mBackgroundAudioSetting->uiSamplingFrequency = 32000;
+ }
+
+ // Open the BG file
+ if ( mBackgroundAudioSetting->pFile != M4OSA_NULL ) {
+ err = M4OSA_fileReadOpen(&mAudioMixPCMFileHandle,
+ mBackgroundAudioSetting->pFile, M4OSA_kFileRead);
+
+ if (err != M4NO_ERROR) {
+ ALOGE("loadEditSettings: mBackgroundAudio PCM File open failed");
+ return M4ERR_PARAMETER;
+ }
+ }
+ }
+
+ mOutputVideoSize = pSettings->xVSS.outputVideoSize;
+ mFrameStr.pBuffer = M4OSA_NULL;
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR VideoEditorPreviewController::setSurface(const sp<Surface> &surface) {
+ ALOGV("setSurface");
+ Mutex::Autolock autoLock(mLock);
+
+ mSurface = surface;
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR VideoEditorPreviewController::startPreview(
+ M4OSA_UInt32 fromMS, M4OSA_Int32 toMs, M4OSA_UInt16 callBackAfterFrameCount,
+ M4OSA_Bool loop) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 i = 0, iIncrementedDuration = 0;
+ ALOGV("startPreview");
+
+ if(fromMS > (M4OSA_UInt32)toMs) {
+ ALOGE("startPreview: fromMS > toMs");
+ return M4ERR_PARAMETER;
+ }
+
+ if(toMs == 0) {
+ ALOGE("startPreview: toMs is 0");
+ return M4ERR_PARAMETER;
+ }
+
+ // If already started, then stop preview first
+ for(int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
+ if(mVePlayer[playerInst] != NULL) {
+ ALOGV("startPreview: stopping previously started preview playback");
+ stopPreview();
+ break;
+ }
+ }
+
+ // If renderPreview was called previously, then delete Renderer object first
+ if(mTarget != NULL) {
+ ALOGV("startPreview: delete previous PreviewRenderer");
+ delete mTarget;
+ mTarget = NULL;
+ }
+
+ // Create Audio player to be used for entire
+ // storyboard duration
+ mVEAudioSink = new VideoEditorPlayer::VeAudioOutput();
+ mVEAudioPlayer = new VideoEditorAudioPlayer(mVEAudioSink);
+ mVEAudioPlayer->setAudioMixSettings(mBackgroundAudioSetting);
+ mVEAudioPlayer->setAudioMixPCMFileHandle(mAudioMixPCMFileHandle);
+
+ // Create Video Renderer to be used for the entire storyboard duration.
+ uint32_t width, height;
+ getVideoSizeByResolution(mOutputVideoSize, &width, &height);
+ mNativeWindowRenderer = new NativeWindowRenderer(mSurface, width, height);
+
+ ALOGV("startPreview: loop = %d", loop);
+ mPreviewLooping = loop;
+
+ ALOGV("startPreview: callBackAfterFrameCount = %d", callBackAfterFrameCount);
+ mCallBackAfterFrameCnt = callBackAfterFrameCount;
+
+ for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
+ mVePlayer[playerInst] = new VideoEditorPlayer(mNativeWindowRenderer);
+ if(mVePlayer[playerInst] == NULL) {
+ ALOGE("startPreview:Error creating VideoEditorPlayer %d",playerInst);
+ return M4ERR_ALLOC;
+ }
+ ALOGV("startPreview: object created");
+
+ mVePlayer[playerInst]->setNotifyCallback(this,(notify_callback_f)notify);
+ ALOGV("startPreview: notify callback set");
+
+ mVePlayer[playerInst]->loadEffectsSettings(mEffectsSettings,
+ mNumberEffects);
+ ALOGV("startPreview: effects settings loaded");
+
+ mVePlayer[playerInst]->loadAudioMixSettings(mBackgroundAudioSetting);
+ ALOGV("startPreview: AudioMixSettings settings loaded");
+
+ mVePlayer[playerInst]->setAudioMixPCMFileHandle(mAudioMixPCMFileHandle);
+ ALOGV("startPreview: AudioMixPCMFileHandle set");
+
+ mVePlayer[playerInst]->setProgressCallbackInterval(
+ mCallBackAfterFrameCnt);
+ ALOGV("startPreview: setProgressCallBackInterval");
+ }
+
+ mPlayerState = VePlayerIdle;
+ mPrepareReqest = M4OSA_FALSE;
+
+ if(fromMS == 0) {
+ mCurrentClipNumber = -1;
+ // Save original value
+ mFirstPreviewClipBeginTime = mClipList[0]->uiBeginCutTime;
+ mVideoStoryBoardTimeMsUptoFirstPreviewClip = 0;
+ }
+ else {
+ ALOGV("startPreview: fromMS=%d", fromMS);
+ if(fromMS >= mClipTotalDuration) {
+ ALOGE("startPreview: fromMS >= mClipTotalDuration");
+ return M4ERR_PARAMETER;
+ }
+ for(i=0;i<mNumberClipsInStoryBoard;i++) {
+ if(fromMS < (iIncrementedDuration + (mClipList[i]->uiEndCutTime -
+ mClipList[i]->uiBeginCutTime))) {
+ // Set to 1 index below,
+ // as threadProcess first increments the clip index
+ // and then processes clip in thread loop
+ mCurrentClipNumber = i-1;
+ ALOGD("startPreview:mCurrentClipNumber = %d fromMS=%d",i,fromMS);
+
+ // Save original value
+ mFirstPreviewClipBeginTime = mClipList[i]->uiBeginCutTime;
+
+ // Set correct begin time to start playback
+ if((fromMS+mClipList[i]->uiBeginCutTime) >
+ (iIncrementedDuration+mClipList[i]->uiBeginCutTime)) {
+
+ mClipList[i]->uiBeginCutTime =
+ mClipList[i]->uiBeginCutTime +
+ (fromMS - iIncrementedDuration);
+ }
+ break;
+ }
+ else {
+ iIncrementedDuration = iIncrementedDuration +
+ (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime);
+ }
+ }
+ mVideoStoryBoardTimeMsUptoFirstPreviewClip = iIncrementedDuration;
+ }
+
+ for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
+ mVePlayer[playerInst]->setAudioMixStoryBoardParam(fromMS,
+ mFirstPreviewClipBeginTime,
+ mClipList[i]->ClipProperties.uiClipAudioVolumePercentage);
+
+ ALOGV("startPreview:setAudioMixStoryBoardSkimTimeStamp set %d cuttime \
+ %d", fromMS, mFirstPreviewClipBeginTime);
+ }
+
+ mStartingClipIndex = mCurrentClipNumber+1;
+
+ // Start playing with player instance 0
+ mCurrentPlayer = 0;
+ mActivePlayerIndex = 0;
+
+ if(toMs == -1) {
+ ALOGV("startPreview: Preview till end of storyboard");
+ mNumberClipsToPreview = mNumberClipsInStoryBoard;
+ // Save original value
+ mLastPreviewClipEndTime =
+ mClipList[mNumberClipsToPreview-1]->uiEndCutTime;
+ }
+ else {
+ ALOGV("startPreview: toMs=%d", toMs);
+ if((M4OSA_UInt32)toMs > mClipTotalDuration) {
+ ALOGE("startPreview: toMs > mClipTotalDuration");
+ return M4ERR_PARAMETER;
+ }
+
+ iIncrementedDuration = 0;
+
+ for(i=0;i<mNumberClipsInStoryBoard;i++) {
+ if((M4OSA_UInt32)toMs <= (iIncrementedDuration +
+ (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime))) {
+ // Save original value
+ mLastPreviewClipEndTime = mClipList[i]->uiEndCutTime;
+ // Set the end cut time of clip index i to toMs
+ mClipList[i]->uiEndCutTime = toMs;
+
+ // Number of clips to be previewed is from index 0 to i
+ // increment by 1 as i starts from 0
+ mNumberClipsToPreview = i+1;
+ break;
+ }
+ else {
+ iIncrementedDuration = iIncrementedDuration +
+ (mClipList[i]->uiEndCutTime - mClipList[i]->uiBeginCutTime);
+ }
+ }
+ }
+
+ // Open the thread semaphore
+ M4OSA_semaphoreOpen(&mSemThreadWait, 1);
+
+ // Open the preview process thread
+ err = M4OSA_threadSyncOpen(&mThreadContext, (M4OSA_ThreadDoIt)threadProc);
+ if (M4NO_ERROR != err) {
+ ALOGE("VideoEditorPreviewController:M4OSA_threadSyncOpen error %d", (int) err);
+ return err;
+ }
+
+ // Set the stacksize
+ err = M4OSA_threadSyncSetOption(mThreadContext, M4OSA_ThreadStackSize,
+ (M4OSA_DataOption) kPreviewThreadStackSize);
+
+ if (M4NO_ERROR != err) {
+ ALOGE("VideoEditorPreviewController: threadSyncSetOption error %d", (int) err);
+ M4OSA_threadSyncClose(mThreadContext);
+ mThreadContext = NULL;
+ return err;
+ }
+
+ // Start the thread
+ err = M4OSA_threadSyncStart(mThreadContext, (M4OSA_Void*)this);
+ if (M4NO_ERROR != err) {
+ ALOGE("VideoEditorPreviewController: threadSyncStart error %d", (int) err);
+ M4OSA_threadSyncClose(mThreadContext);
+ mThreadContext = NULL;
+ return err;
+ }
+ bStopThreadInProgress = false;
+
+ ALOGV("startPreview: process thread started");
+ return M4NO_ERROR;
+}
+
+M4OSA_UInt32 VideoEditorPreviewController::stopPreview() {
+ M4OSA_ERR err = M4NO_ERROR;
+ uint32_t lastRenderedFrameTimeMs = 0;
+ ALOGV("stopPreview");
+
+ // Stop the thread
+ if(mThreadContext != NULL) {
+ bStopThreadInProgress = true;
+ {
+ Mutex::Autolock autoLock(mLockSem);
+ if (mSemThreadWait != NULL) {
+ err = M4OSA_semaphorePost(mSemThreadWait);
+ }
+ }
+
+ err = M4OSA_threadSyncStop(mThreadContext);
+ if(err != M4NO_ERROR) {
+ ALOGV("stopPreview: error 0x%x in trying to stop thread", err);
+ // Continue even if error
+ }
+
+ err = M4OSA_threadSyncClose(mThreadContext);
+ if(err != M4NO_ERROR) {
+ ALOGE("stopPreview: error 0x%x in trying to close thread", (unsigned int)err);
+ // Continue even if error
+ }
+
+ mThreadContext = NULL;
+ }
+
+ // Close the semaphore first
+ {
+ Mutex::Autolock autoLock(mLockSem);
+ if(mSemThreadWait != NULL) {
+ err = M4OSA_semaphoreClose(mSemThreadWait);
+ ALOGV("stopPreview: close semaphore returns 0x%x", err);
+ mSemThreadWait = NULL;
+ }
+ }
+
+ for (int playerInst=0; playerInst<kTotalNumPlayerInstances; playerInst++) {
+ if(mVePlayer[playerInst] != NULL) {
+ if(mVePlayer[playerInst]->isPlaying()) {
+ ALOGV("stop the player first");
+ mVePlayer[playerInst]->stop();
+ }
+ if (playerInst == mActivePlayerIndex) {
+ // Return the last rendered frame time stamp
+ mVePlayer[mActivePlayerIndex]->getLastRenderedTimeMs(&lastRenderedFrameTimeMs);
+ }
+
+ //This is used to syncronize onStreamDone() in PreviewPlayer and
+ //stopPreview() in PreviewController
+ sp<VideoEditorPlayer> temp = mVePlayer[playerInst];
+ temp->acquireLock();
+ ALOGV("stopPreview: clearing mVePlayer");
+ mVePlayer[playerInst].clear();
+ mVePlayer[playerInst] = NULL;
+ temp->releaseLock();
+ }
+ }
+ ALOGV("stopPreview: clear audioSink and audioPlayer");
+ mVEAudioSink.clear();
+ if (mVEAudioPlayer) {
+ delete mVEAudioPlayer;
+ mVEAudioPlayer = NULL;
+ }
+
+ delete mNativeWindowRenderer;
+ mNativeWindowRenderer = NULL;
+
+ // If image file playing, then free the buffer pointer
+ if(mFrameStr.pBuffer != M4OSA_NULL) {
+ free(mFrameStr.pBuffer);
+ mFrameStr.pBuffer = M4OSA_NULL;
+ }
+
+ // Reset original begin cuttime of first previewed clip*/
+ mClipList[mStartingClipIndex]->uiBeginCutTime = mFirstPreviewClipBeginTime;
+ // Reset original end cuttime of last previewed clip*/
+ mClipList[mNumberClipsToPreview-1]->uiEndCutTime = mLastPreviewClipEndTime;
+
+ mPlayerState = VePlayerIdle;
+ mPrepareReqest = M4OSA_FALSE;
+
+ mCurrentPlayedDuration = 0;
+ mCurrentClipDuration = 0;
+ mRenderingMode = M4xVSS_kBlackBorders;
+ mOutputVideoWidth = 0;
+ mOutputVideoHeight = 0;
+
+ ALOGV("stopPreview() lastRenderedFrameTimeMs %ld", lastRenderedFrameTimeMs);
+ return lastRenderedFrameTimeMs;
+}
+
+M4OSA_ERR VideoEditorPreviewController::clearSurface(
+ const sp<Surface> &surface, VideoEditor_renderPreviewFrameStr* pFrameInfo) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditor_renderPreviewFrameStr* pFrameStr = pFrameInfo;
+ M4OSA_UInt32 outputBufferWidth =0, outputBufferHeight=0;
+ M4VIFI_ImagePlane planeOut[3];
+ ALOGV("Inside preview clear frame");
+
+ Mutex::Autolock autoLock(mLock);
+
+ // Delete previous renderer instance
+ if(mTarget != NULL) {
+ delete mTarget;
+ mTarget = NULL;
+ }
+
+ outputBufferWidth = pFrameStr->uiFrameWidth;
+ outputBufferHeight = pFrameStr->uiFrameHeight;
+
+ // Initialize the renderer
+ if(mTarget == NULL) {
+
+ mTarget = PreviewRenderer::CreatePreviewRenderer(
+ surface,
+ outputBufferWidth, outputBufferHeight);
+
+ if(mTarget == NULL) {
+ ALOGE("renderPreviewFrame: cannot create PreviewRenderer");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ // Out plane
+ uint8_t* outBuffer;
+ size_t outBufferStride = 0;
+
+ ALOGV("doMediaRendering CALL getBuffer()");
+ mTarget->getBufferYV12(&outBuffer, &outBufferStride);
+
+ // Set the output YUV420 plane to be compatible with YV12 format
+ //In YV12 format, sizes must be even
+ M4OSA_UInt32 yv12PlaneWidth = ((outputBufferWidth +1)>>1)<<1;
+ M4OSA_UInt32 yv12PlaneHeight = ((outputBufferHeight+1)>>1)<<1;
+
+ prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
+ (M4OSA_UInt32)outBufferStride, (M4VIFI_UInt8 *)outBuffer);
+
+ /* Fill the surface with black frame */
+ memset((void *)planeOut[0].pac_data,0x00,planeOut[0].u_width *
+ planeOut[0].u_height * 1.5);
+ memset((void *)planeOut[1].pac_data,128,planeOut[1].u_width *
+ planeOut[1].u_height);
+ memset((void *)planeOut[2].pac_data,128,planeOut[2].u_width *
+ planeOut[2].u_height);
+
+ mTarget->renderYV12();
+ return err;
+}
+
+M4OSA_ERR VideoEditorPreviewController::renderPreviewFrame(
+ const sp<Surface> &surface,
+ VideoEditor_renderPreviewFrameStr* pFrameInfo,
+ VideoEditorCurretEditInfo *pCurrEditInfo) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 i = 0, iIncrementedDuration = 0, tnTimeMs=0, framesize =0;
+ VideoEditor_renderPreviewFrameStr* pFrameStr = pFrameInfo;
+ M4VIFI_UInt8 *pixelArray = NULL;
+ Mutex::Autolock autoLock(mLock);
+
+ if (pCurrEditInfo != NULL) {
+ pCurrEditInfo->overlaySettingsIndex = -1;
+ }
+ // Delete previous renderer instance
+ if(mTarget != NULL) {
+ delete mTarget;
+ mTarget = NULL;
+ }
+
+ if(mOutputVideoWidth == 0) {
+ mOutputVideoWidth = pFrameStr->uiFrameWidth;
+ }
+
+ if(mOutputVideoHeight == 0) {
+ mOutputVideoHeight = pFrameStr->uiFrameHeight;
+ }
+
+ // Initialize the renderer
+ if(mTarget == NULL) {
+ mTarget = PreviewRenderer::CreatePreviewRenderer(
+ surface,
+ mOutputVideoWidth, mOutputVideoHeight);
+
+ if(mTarget == NULL) {
+ ALOGE("renderPreviewFrame: cannot create PreviewRenderer");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ pixelArray = NULL;
+
+ // Apply rotation if required
+ if (pFrameStr->videoRotationDegree != 0) {
+ err = applyVideoRotation((M4OSA_Void *)pFrameStr->pBuffer,
+ pFrameStr->uiFrameWidth, pFrameStr->uiFrameHeight,
+ pFrameStr->videoRotationDegree);
+ if (M4NO_ERROR != err) {
+ ALOGE("renderPreviewFrame: cannot rotate video, err 0x%x", (unsigned int)err);
+ delete mTarget;
+ mTarget = NULL;
+ return err;
+ } else {
+ // Video rotation done.
+ // Swap width and height if 90 or 270 degrees
+ if (pFrameStr->videoRotationDegree != 180) {
+ int32_t temp = pFrameStr->uiFrameWidth;
+ pFrameStr->uiFrameWidth = pFrameStr->uiFrameHeight;
+ pFrameStr->uiFrameHeight = temp;
+ }
+ }
+ }
+ // Postprocessing (apply video effect)
+ if(pFrameStr->bApplyEffect == M4OSA_TRUE) {
+
+ for(i=0;i<mNumberEffects;i++) {
+ // First check if effect starttime matches the clip being previewed
+ if((mEffectsSettings[i].uiStartTime < pFrameStr->clipBeginCutTime)
+ ||(mEffectsSettings[i].uiStartTime >= pFrameStr->clipEndCutTime)) {
+ // This effect doesn't belong to this clip, check next one
+ continue;
+ }
+ if((mEffectsSettings[i].uiStartTime <= pFrameStr->timeMs) &&
+ ((mEffectsSettings[i].uiStartTime+mEffectsSettings[i].uiDuration) >=
+ pFrameStr->timeMs) && (mEffectsSettings[i].uiDuration != 0)) {
+ setVideoEffectType(mEffectsSettings[i].VideoEffectType, TRUE);
+ }
+ else {
+ setVideoEffectType(mEffectsSettings[i].VideoEffectType, FALSE);
+ }
+ }
+
+ //Provide the overlay Update indication when there is an overlay effect
+ if (mCurrentVideoEffect & VIDEO_EFFECT_FRAMING) {
+ M4OSA_UInt32 index;
+ mCurrentVideoEffect &= ~VIDEO_EFFECT_FRAMING; //never apply framing here.
+
+ // Find the effect in effectSettings array
+ for (index = 0; index < mNumberEffects; index++) {
+ if(mEffectsSettings[index].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
+
+ if((mEffectsSettings[index].uiStartTime <= pFrameInfo->timeMs) &&
+ ((mEffectsSettings[index].uiStartTime+
+ mEffectsSettings[index].uiDuration) >= pFrameInfo->timeMs))
+ {
+ break;
+ }
+ }
+ }
+ if ((index < mNumberEffects) && (pCurrEditInfo != NULL)) {
+ pCurrEditInfo->overlaySettingsIndex = index;
+ ALOGV("Framing index = %d", index);
+ } else {
+ ALOGV("No framing effects found");
+ }
+ }
+
+ if(mCurrentVideoEffect != VIDEO_EFFECT_NONE) {
+ err = applyVideoEffect((M4OSA_Void *)pFrameStr->pBuffer,
+ OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
+ pFrameStr->uiFrameHeight, pFrameStr->timeMs,
+ (M4OSA_Void *)pixelArray);
+
+ if(err != M4NO_ERROR) {
+ ALOGE("renderPreviewFrame: applyVideoEffect error 0x%x", (unsigned int)err);
+ delete mTarget;
+ mTarget = NULL;
+ free(pixelArray);
+ pixelArray = NULL;
+ return err;
+ }
+ mCurrentVideoEffect = VIDEO_EFFECT_NONE;
+ }
+ else {
+ // Apply the rendering mode
+ err = doImageRenderingMode((M4OSA_Void *)pFrameStr->pBuffer,
+ OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
+ pFrameStr->uiFrameHeight, (M4OSA_Void *)pixelArray);
+
+ if(err != M4NO_ERROR) {
+ ALOGE("renderPreviewFrame:doImageRenderingMode error 0x%x", (unsigned int)err);
+ delete mTarget;
+ mTarget = NULL;
+ free(pixelArray);
+ pixelArray = NULL;
+ return err;
+ }
+ }
+ }
+ else {
+ // Apply the rendering mode
+ err = doImageRenderingMode((M4OSA_Void *)pFrameStr->pBuffer,
+ OMX_COLOR_FormatYUV420Planar, pFrameStr->uiFrameWidth,
+ pFrameStr->uiFrameHeight, (M4OSA_Void *)pixelArray);
+
+ if(err != M4NO_ERROR) {
+ ALOGE("renderPreviewFrame: doImageRenderingMode error 0x%x", (unsigned int)err);
+ delete mTarget;
+ mTarget = NULL;
+ free(pixelArray);
+ pixelArray = NULL;
+ return err;
+ }
+ }
+
+ mTarget->renderYV12();
+ return err;
+}
+
+M4OSA_Void VideoEditorPreviewController::setJniCallback(void* cookie,
+ jni_progress_callback_fct callbackFct) {
+ //ALOGV("setJniCallback");
+ mJniCookie = cookie;
+ mJniCallback = callbackFct;
+}
+
+M4OSA_ERR VideoEditorPreviewController::preparePlayer(
+ void* param, int playerInstance, int index) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorPreviewController *pController =
+ (VideoEditorPreviewController *)param;
+
+ ALOGV("preparePlayer: instance %d file %d", playerInstance, index);
+
+ const char* fileName = (const char*) pController->mClipList[index]->pFile;
+ pController->mVePlayer[playerInstance]->setDataSource(fileName, NULL);
+
+ ALOGV("preparePlayer: setDataSource instance %s",
+ (const char *)pController->mClipList[index]->pFile);
+
+ pController->mVePlayer[playerInstance]->setVideoSurface(
+ pController->mSurface);
+ ALOGV("preparePlayer: setVideoSurface");
+
+ pController->mVePlayer[playerInstance]->setMediaRenderingMode(
+ pController->mClipList[index]->xVSS.MediaRendering,
+ pController->mOutputVideoSize);
+ ALOGV("preparePlayer: setMediaRenderingMode");
+
+ if((M4OSA_UInt32)index == pController->mStartingClipIndex) {
+ pController->mVePlayer[playerInstance]->setPlaybackBeginTime(
+ pController->mFirstPreviewClipBeginTime);
+ }
+ else {
+ pController->mVePlayer[playerInstance]->setPlaybackBeginTime(
+ pController->mClipList[index]->uiBeginCutTime);
+ }
+ ALOGV("preparePlayer: setPlaybackBeginTime(%d)",
+ pController->mClipList[index]->uiBeginCutTime);
+
+ pController->mVePlayer[playerInstance]->setPlaybackEndTime(
+ pController->mClipList[index]->uiEndCutTime);
+ ALOGV("preparePlayer: setPlaybackEndTime(%d)",
+ pController->mClipList[index]->uiEndCutTime);
+
+ if(pController->mClipList[index]->FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
+ pController->mVePlayer[playerInstance]->setImageClipProperties(
+ pController->mClipList[index]->ClipProperties.uiVideoWidth,
+ pController->mClipList[index]->ClipProperties.uiVideoHeight);
+ ALOGV("preparePlayer: setImageClipProperties");
+ }
+
+ pController->mVePlayer[playerInstance]->prepare();
+ ALOGV("preparePlayer: prepared");
+
+ if(pController->mClipList[index]->uiBeginCutTime > 0) {
+ pController->mVePlayer[playerInstance]->seekTo(
+ pController->mClipList[index]->uiBeginCutTime);
+
+ ALOGV("preparePlayer: seekTo(%d)",
+ pController->mClipList[index]->uiBeginCutTime);
+ }
+ pController->mVePlayer[pController->mCurrentPlayer]->setAudioPlayer(pController->mVEAudioPlayer);
+
+ pController->mVePlayer[playerInstance]->readFirstVideoFrame();
+ ALOGV("preparePlayer: readFirstVideoFrame of clip");
+
+ return err;
+}
+
+M4OSA_ERR VideoEditorPreviewController::threadProc(M4OSA_Void* param) {
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_Int32 index = 0;
+ VideoEditorPreviewController *pController =
+ (VideoEditorPreviewController *)param;
+
+ ALOGV("inside threadProc");
+ if(pController->mPlayerState == VePlayerIdle) {
+ (pController->mCurrentClipNumber)++;
+
+ ALOGD("threadProc: playing file index %d total clips %d",
+ pController->mCurrentClipNumber, pController->mNumberClipsToPreview);
+
+ if((M4OSA_UInt32)pController->mCurrentClipNumber >=
+ pController->mNumberClipsToPreview) {
+
+ ALOGD("All clips previewed");
+
+ pController->mCurrentPlayedDuration = 0;
+ pController->mCurrentClipDuration = 0;
+ pController->mCurrentPlayer = 0;
+
+ if(pController->mPreviewLooping == M4OSA_TRUE) {
+ pController->mCurrentClipNumber =
+ pController->mStartingClipIndex;
+
+ ALOGD("Preview looping TRUE, restarting from clip index %d",
+ pController->mCurrentClipNumber);
+
+ // Reset the story board timestamp inside the player
+ for (int playerInst=0; playerInst<kTotalNumPlayerInstances;
+ playerInst++) {
+ pController->mVePlayer[playerInst]->resetJniCallbackTimeStamp();
+ }
+ }
+ else {
+ M4OSA_UInt32 endArgs = 0;
+ if(pController->mJniCallback != NULL) {
+ pController->mJniCallback(
+ pController->mJniCookie, MSG_TYPE_PREVIEW_END, &endArgs);
+ }
+ pController->mPlayerState = VePlayerAutoStop;
+
+ // Reset original begin cuttime of first previewed clip
+ pController->mClipList[pController->mStartingClipIndex]->uiBeginCutTime =
+ pController->mFirstPreviewClipBeginTime;
+ // Reset original end cuttime of last previewed clip
+ pController->mClipList[pController->mNumberClipsToPreview-1]->uiEndCutTime =
+ pController->mLastPreviewClipEndTime;
+
+ // Return a warning to M4OSA thread handler
+ // so that thread is moved from executing state to open state
+ return M4WAR_NO_MORE_STREAM;
+ }
+ }
+
+ index=pController->mCurrentClipNumber;
+ if((M4OSA_UInt32)pController->mCurrentClipNumber == pController->mStartingClipIndex) {
+ pController->mCurrentPlayedDuration +=
+ pController->mVideoStoryBoardTimeMsUptoFirstPreviewClip;
+
+ pController->mCurrentClipDuration =
+ pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
+ - pController->mFirstPreviewClipBeginTime;
+
+ preparePlayer((void*)pController, pController->mCurrentPlayer, index);
+ }
+ else {
+ pController->mCurrentPlayedDuration +=
+ pController->mCurrentClipDuration;
+
+ pController->mCurrentClipDuration =
+ pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime -
+ pController->mClipList[pController->mCurrentClipNumber]->uiBeginCutTime;
+ }
+
+ pController->mVePlayer[pController->mCurrentPlayer]->setStoryboardStartTime(
+ pController->mCurrentPlayedDuration);
+ ALOGV("threadProc: setStoryboardStartTime");
+
+ // Set the next clip duration for Audio mix here
+ if((M4OSA_UInt32)pController->mCurrentClipNumber != pController->mStartingClipIndex) {
+
+ pController->mVePlayer[pController->mCurrentPlayer]->setAudioMixStoryBoardParam(
+ pController->mCurrentPlayedDuration,
+ pController->mClipList[index]->uiBeginCutTime,
+ pController->mClipList[index]->ClipProperties.uiClipAudioVolumePercentage);
+
+ ALOGV("threadProc: setAudioMixStoryBoardParam fromMS %d \
+ ClipBeginTime %d", pController->mCurrentPlayedDuration +
+ pController->mClipList[index]->uiBeginCutTime,
+ pController->mClipList[index]->uiBeginCutTime,
+ pController->mClipList[index]->ClipProperties.uiClipAudioVolumePercentage);
+ }
+ // Capture the active player being used
+ pController->mActivePlayerIndex = pController->mCurrentPlayer;
+
+ pController->mVePlayer[pController->mCurrentPlayer]->start();
+ ALOGV("threadProc: started");
+
+ pController->mPlayerState = VePlayerBusy;
+
+ } else if(pController->mPlayerState == VePlayerAutoStop) {
+ ALOGV("Preview completed..auto stop the player");
+ } else if ((pController->mPlayerState == VePlayerBusy) && (pController->mPrepareReqest)) {
+ // Prepare the player here
+ pController->mPrepareReqest = M4OSA_FALSE;
+ preparePlayer((void*)pController, pController->mCurrentPlayer,
+ pController->mCurrentClipNumber+1);
+ if (pController->mSemThreadWait != NULL) {
+ err = M4OSA_semaphoreWait(pController->mSemThreadWait,
+ M4OSA_WAIT_FOREVER);
+ }
+ } else {
+ if (!pController->bStopThreadInProgress) {
+ ALOGV("threadProc: state busy...wait for sem");
+ if (pController->mSemThreadWait != NULL) {
+ err = M4OSA_semaphoreWait(pController->mSemThreadWait,
+ M4OSA_WAIT_FOREVER);
+ }
+ }
+ ALOGV("threadProc: sem wait returned err = 0x%x", err);
+ }
+
+ //Always return M4NO_ERROR to ensure the thread keeps running
+ return M4NO_ERROR;
+}
+
+void VideoEditorPreviewController::notify(
+ void* cookie, int msg, int ext1, int ext2)
+{
+ VideoEditorPreviewController *pController =
+ (VideoEditorPreviewController *)cookie;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ uint32_t clipDuration = 0;
+ switch (msg) {
+ case MEDIA_NOP: // interface test message
+ ALOGV("MEDIA_NOP");
+ break;
+ case MEDIA_PREPARED:
+ ALOGV("MEDIA_PREPARED");
+ break;
+ case MEDIA_PLAYBACK_COMPLETE:
+ {
+ ALOGD("notify:MEDIA_PLAYBACK_COMPLETE, mCurrentClipNumber = %d",
+ pController->mCurrentClipNumber);
+ pController->mPlayerState = VePlayerIdle;
+
+ //send progress callback with last frame timestamp
+ if((M4OSA_UInt32)pController->mCurrentClipNumber ==
+ pController->mStartingClipIndex) {
+ clipDuration =
+ pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
+ - pController->mFirstPreviewClipBeginTime;
+ }
+ else {
+ clipDuration =
+ pController->mClipList[pController->mCurrentClipNumber]->uiEndCutTime
+ - pController->mClipList[pController->mCurrentClipNumber]->uiBeginCutTime;
+ }
+
+ M4OSA_UInt32 playedDuration = clipDuration+pController->mCurrentPlayedDuration;
+ pController->mJniCallback(
+ pController->mJniCookie, MSG_TYPE_PROGRESS_INDICATION,
+ &playedDuration);
+
+ if ((pController->mOverlayState == OVERLAY_UPDATE) &&
+ ((M4OSA_UInt32)pController->mCurrentClipNumber !=
+ (pController->mNumberClipsToPreview-1))) {
+ VideoEditorCurretEditInfo *pEditInfo =
+ (VideoEditorCurretEditInfo*)M4OSA_32bitAlignedMalloc(sizeof(VideoEditorCurretEditInfo),
+ M4VS, (M4OSA_Char*)"Current Edit info");
+ pEditInfo->overlaySettingsIndex = ext2;
+ pEditInfo->clipIndex = pController->mCurrentClipNumber;
+ pController->mOverlayState == OVERLAY_CLEAR;
+ if (pController->mJniCallback != NULL) {
+ pController->mJniCallback(pController->mJniCookie,
+ MSG_TYPE_OVERLAY_CLEAR, pEditInfo);
+ }
+ free(pEditInfo);
+ }
+ {
+ Mutex::Autolock autoLock(pController->mLockSem);
+ if (pController->mSemThreadWait != NULL) {
+ M4OSA_semaphorePost(pController->mSemThreadWait);
+ return;
+ }
+ }
+
+ break;
+ }
+ case MEDIA_ERROR:
+ {
+ int err_val = ext1;
+ // Always log errors.
+ // ext1: Media framework error code.
+ // ext2: Implementation dependant error code.
+ ALOGE("MEDIA_ERROR; error (%d, %d)", ext1, ext2);
+ if(pController->mJniCallback != NULL) {
+ pController->mJniCallback(pController->mJniCookie,
+ MSG_TYPE_PLAYER_ERROR, &err_val);
+ }
+ break;
+ }
+ case MEDIA_INFO:
+ {
+ int info_val = ext2;
+ // ext1: Media framework error code.
+ // ext2: Implementation dependant error code.
+ //ALOGW("MEDIA_INFO; info/warning (%d, %d)", ext1, ext2);
+ if(pController->mJniCallback != NULL) {
+ pController->mJniCallback(pController->mJniCookie,
+ MSG_TYPE_PROGRESS_INDICATION, &info_val);
+ }
+ break;
+ }
+ case MEDIA_SEEK_COMPLETE:
+ ALOGV("MEDIA_SEEK_COMPLETE; Received seek complete");
+ break;
+ case MEDIA_BUFFERING_UPDATE:
+ ALOGV("MEDIA_BUFFERING_UPDATE; buffering %d", ext1);
+ break;
+ case MEDIA_SET_VIDEO_SIZE:
+ ALOGV("MEDIA_SET_VIDEO_SIZE; New video size %d x %d", ext1, ext2);
+ break;
+ case 0xAAAAAAAA:
+ ALOGV("VIDEO PLAYBACK ALMOST over, prepare next player");
+ // Select next player and prepare it
+ // If there is a clip after this one
+ if ((M4OSA_UInt32)(pController->mCurrentClipNumber+1) <
+ pController->mNumberClipsToPreview) {
+ pController->mPrepareReqest = M4OSA_TRUE;
+ pController->mCurrentPlayer++;
+ if (pController->mCurrentPlayer >= kTotalNumPlayerInstances) {
+ pController->mCurrentPlayer = 0;
+ }
+ // Prepare the first clip to be played
+ {
+ Mutex::Autolock autoLock(pController->mLockSem);
+ if (pController->mSemThreadWait != NULL) {
+ M4OSA_semaphorePost(pController->mSemThreadWait);
+ }
+ }
+ }
+ break;
+ case 0xBBBBBBBB:
+ {
+ ALOGV("VIDEO PLAYBACK, Update Overlay");
+ int overlayIndex = ext2;
+ VideoEditorCurretEditInfo *pEditInfo =
+ (VideoEditorCurretEditInfo*)M4OSA_32bitAlignedMalloc(sizeof(VideoEditorCurretEditInfo),
+ M4VS, (M4OSA_Char*)"Current Edit info");
+ //ext1 = 1; start the overlay display
+ // = 2; Clear the overlay.
+ pEditInfo->overlaySettingsIndex = ext2;
+ pEditInfo->clipIndex = pController->mCurrentClipNumber;
+ ALOGV("pController->mCurrentClipNumber = %d",pController->mCurrentClipNumber);
+ if (pController->mJniCallback != NULL) {
+ if (ext1 == 1) {
+ pController->mOverlayState = OVERLAY_UPDATE;
+ pController->mJniCallback(pController->mJniCookie,
+ MSG_TYPE_OVERLAY_UPDATE, pEditInfo);
+ } else {
+ pController->mOverlayState = OVERLAY_CLEAR;
+ pController->mJniCallback(pController->mJniCookie,
+ MSG_TYPE_OVERLAY_CLEAR, pEditInfo);
+ }
+ }
+ free(pEditInfo);
+ break;
+ }
+ default:
+ ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
+ break;
+ }
+}
+
+void VideoEditorPreviewController::setVideoEffectType(
+ M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable) {
+
+ M4OSA_UInt32 effect = VIDEO_EFFECT_NONE;
+
+ // map M4VSS3GPP_VideoEffectType to local enum
+ switch(type) {
+ case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
+ effect = VIDEO_EFFECT_FADEFROMBLACK;
+ break;
+
+ case M4VSS3GPP_kVideoEffectType_FadeToBlack:
+ effect = VIDEO_EFFECT_FADETOBLACK;
+ break;
+
+ case M4xVSS_kVideoEffectType_BlackAndWhite:
+ effect = VIDEO_EFFECT_BLACKANDWHITE;
+ break;
+
+ case M4xVSS_kVideoEffectType_Pink:
+ effect = VIDEO_EFFECT_PINK;
+ break;
+
+ case M4xVSS_kVideoEffectType_Green:
+ effect = VIDEO_EFFECT_GREEN;
+ break;
+
+ case M4xVSS_kVideoEffectType_Sepia:
+ effect = VIDEO_EFFECT_SEPIA;
+ break;
+
+ case M4xVSS_kVideoEffectType_Negative:
+ effect = VIDEO_EFFECT_NEGATIVE;
+ break;
+
+ case M4xVSS_kVideoEffectType_Framing:
+ effect = VIDEO_EFFECT_FRAMING;
+ break;
+
+ case M4xVSS_kVideoEffectType_Fifties:
+ effect = VIDEO_EFFECT_FIFTIES;
+ break;
+
+ case M4xVSS_kVideoEffectType_ColorRGB16:
+ effect = VIDEO_EFFECT_COLOR_RGB16;
+ break;
+
+ case M4xVSS_kVideoEffectType_Gradient:
+ effect = VIDEO_EFFECT_GRADIENT;
+ break;
+
+ default:
+ effect = VIDEO_EFFECT_NONE;
+ break;
+ }
+
+ if(enable == M4OSA_TRUE) {
+ // If already set, then no need to set again
+ if(!(mCurrentVideoEffect & effect))
+ mCurrentVideoEffect |= effect;
+ if(effect == VIDEO_EFFECT_FIFTIES) {
+ mIsFiftiesEffectStarted = true;
+ }
+ }
+ else {
+ // Reset only if already set
+ if(mCurrentVideoEffect & effect)
+ mCurrentVideoEffect &= ~effect;
+ }
+
+ return;
+}
+
+
+M4OSA_ERR VideoEditorPreviewController::applyVideoEffect(
+ M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
+ M4OSA_UInt32 videoHeight, M4OSA_UInt32 timeMs, M4OSA_Void* outPtr) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ vePostProcessParams postProcessParams;
+
+ postProcessParams.vidBuffer = (M4VIFI_UInt8*)dataPtr;
+ postProcessParams.videoWidth = videoWidth;
+ postProcessParams.videoHeight = videoHeight;
+ postProcessParams.timeMs = timeMs;
+ postProcessParams.timeOffset = 0; //Since timeMS already takes care of offset in this case
+ postProcessParams.effectsSettings = mEffectsSettings;
+ postProcessParams.numberEffects = mNumberEffects;
+ postProcessParams.outVideoWidth = mOutputVideoWidth;
+ postProcessParams.outVideoHeight = mOutputVideoHeight;
+ postProcessParams.currentVideoEffect = mCurrentVideoEffect;
+ postProcessParams.renderingMode = mRenderingMode;
+ if(mIsFiftiesEffectStarted == M4OSA_TRUE) {
+ postProcessParams.isFiftiesEffectStarted = M4OSA_TRUE;
+ mIsFiftiesEffectStarted = M4OSA_FALSE;
+ }
+ else {
+ postProcessParams.isFiftiesEffectStarted = M4OSA_FALSE;
+ }
+ //postProcessParams.renderer = mTarget;
+ postProcessParams.overlayFrameRGBBuffer = NULL;
+ postProcessParams.overlayFrameYUVBuffer = NULL;
+
+ mTarget->getBufferYV12(&(postProcessParams.pOutBuffer), &(postProcessParams.outBufferStride));
+
+ err = applyEffectsAndRenderingMode(&postProcessParams, videoWidth, videoHeight);
+ return err;
+}
+
+status_t VideoEditorPreviewController::setPreviewFrameRenderingMode(
+ M4xVSS_MediaRendering mode, M4VIDEOEDITING_VideoFrameSize outputVideoSize) {
+
+ ALOGV("setMediaRenderingMode: outputVideoSize = %d", outputVideoSize);
+ mRenderingMode = mode;
+
+ status_t err = OK;
+ /* get the video width and height by resolution */
+ err = getVideoSizeByResolution(outputVideoSize,
+ &mOutputVideoWidth, &mOutputVideoHeight);
+
+ return err;
+}
+
+M4OSA_ERR VideoEditorPreviewController::doImageRenderingMode(
+ M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
+ M4OSA_UInt32 videoHeight, M4OSA_Void* outPtr) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIFI_ImagePlane planeIn[3], planeOut[3];
+ M4VIFI_UInt8 *inBuffer = M4OSA_NULL;
+ M4OSA_UInt32 outputBufferWidth =0, outputBufferHeight=0;
+
+ //frameSize = (videoWidth*videoHeight*3) >> 1;
+ inBuffer = (M4OSA_UInt8 *)dataPtr;
+
+ // In plane
+ prepareYUV420ImagePlane(planeIn, videoWidth,
+ videoHeight, (M4VIFI_UInt8 *)inBuffer, videoWidth, videoHeight);
+
+ outputBufferWidth = mOutputVideoWidth;
+ outputBufferHeight = mOutputVideoHeight;
+
+ // Out plane
+ uint8_t* outBuffer;
+ size_t outBufferStride = 0;
+
+ ALOGV("doMediaRendering CALL getBuffer()");
+ mTarget->getBufferYV12(&outBuffer, &outBufferStride);
+
+ // Set the output YUV420 plane to be compatible with YV12 format
+ //In YV12 format, sizes must be even
+ M4OSA_UInt32 yv12PlaneWidth = ((mOutputVideoWidth +1)>>1)<<1;
+ M4OSA_UInt32 yv12PlaneHeight = ((mOutputVideoHeight+1)>>1)<<1;
+
+ prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
+ (M4OSA_UInt32)outBufferStride, (M4VIFI_UInt8 *)outBuffer);
+
+ err = applyRenderingMode(planeIn, planeOut, mRenderingMode);
+ if(err != M4NO_ERROR) {
+ ALOGE("doImageRenderingMode: applyRenderingMode returned err=0x%x", (unsigned int)err);
+ }
+ return err;
+}
+
+} //namespace android
diff --git a/libvideoeditor/lvpp/VideoEditorPreviewController.h b/libvideoeditor/lvpp/VideoEditorPreviewController.h
new file mode 100755
index 0000000..1756f32
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorPreviewController.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_VE_PREVIEWCONTROLLER_H
+#define ANDROID_VE_PREVIEWCONTROLLER_H
+
+#include "VideoEditorPlayer.h"
+#include "VideoEditorTools.h"
+
+namespace android {
+
+// Callback mechanism from PreviewController to Jni */
+typedef void (*jni_progress_callback_fct)(void* cookie, M4OSA_UInt32 msgType, void *argc);
+
+struct Surface;
+struct PreviewRenderer;
+
+class VideoEditorPreviewController {
+
+public:
+ VideoEditorPreviewController();
+ ~VideoEditorPreviewController();
+
+ M4OSA_ERR loadEditSettings(
+ M4VSS3GPP_EditSettings* pSettings,
+ M4xVSS_AudioMixingSettings* bgmSettings);
+
+ M4OSA_ERR setSurface(const sp<Surface>& surface);
+
+ M4OSA_ERR startPreview(
+ M4OSA_UInt32 fromMS, M4OSA_Int32 toMs,
+ M4OSA_UInt16 callBackAfterFrameCount,
+ M4OSA_Bool loop) ;
+
+ M4OSA_UInt32 stopPreview();
+
+ M4OSA_ERR renderPreviewFrame(
+ const sp<Surface>& surface,
+ VideoEditor_renderPreviewFrameStr* pFrameInfo,
+ VideoEditorCurretEditInfo *pCurrEditInfo);
+
+ M4OSA_ERR clearSurface(
+ const sp<Surface>& surface,
+ VideoEditor_renderPreviewFrameStr* pFrameInfo);
+
+ M4OSA_Void setJniCallback(
+ void* cookie,
+ jni_progress_callback_fct callbackFct);
+
+ status_t setPreviewFrameRenderingMode(
+ M4xVSS_MediaRendering mode,
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize);
+
+private:
+ enum {
+ kTotalNumPlayerInstances = 2,
+ kPreviewThreadStackSize = 65536,
+ };
+
+ typedef enum {
+ VePlayerIdle = 0,
+ VePlayerBusy,
+ VePlayerAutoStop
+ } PlayerState;
+
+ typedef enum {
+ OVERLAY_UPDATE = 0,
+ OVERLAY_CLEAR
+ } OverlayState;
+
+ sp<VideoEditorPlayer> mVePlayer[kTotalNumPlayerInstances];
+ int mCurrentPlayer; // player instance currently being used
+ sp<Surface> mSurface;
+ mutable Mutex mLock;
+ M4OSA_Context mThreadContext;
+ PlayerState mPlayerState;
+ M4OSA_Bool mPrepareReqest;
+ M4VSS3GPP_ClipSettings **mClipList;
+ M4OSA_UInt32 mNumberClipsInStoryBoard;
+ M4OSA_UInt32 mNumberClipsToPreview;
+ M4OSA_UInt32 mStartingClipIndex;
+ M4OSA_Bool mPreviewLooping;
+ M4OSA_UInt32 mCallBackAfterFrameCnt;
+ M4VSS3GPP_EffectSettings* mEffectsSettings;
+ M4OSA_UInt32 mNumberEffects;
+ M4OSA_Int32 mCurrentClipNumber;
+ M4OSA_UInt32 mClipTotalDuration;
+ M4OSA_UInt32 mCurrentVideoEffect;
+ M4xVSS_AudioMixingSettings* mBackgroundAudioSetting;
+ M4OSA_Context mAudioMixPCMFileHandle;
+ PreviewRenderer *mTarget;
+ M4OSA_Context mJniCookie;
+ jni_progress_callback_fct mJniCallback;
+ VideoEditor_renderPreviewFrameStr mFrameStr;
+ M4OSA_UInt32 mCurrentPlayedDuration;
+ M4OSA_UInt32 mCurrentClipDuration;
+ M4VIDEOEDITING_VideoFrameSize mOutputVideoSize;
+ M4OSA_UInt32 mFirstPreviewClipBeginTime;
+ M4OSA_UInt32 mLastPreviewClipEndTime;
+ M4OSA_UInt32 mVideoStoryBoardTimeMsUptoFirstPreviewClip;
+ OverlayState mOverlayState;
+ int mActivePlayerIndex;
+
+ M4xVSS_MediaRendering mRenderingMode;
+ uint32_t mOutputVideoWidth;
+ uint32_t mOutputVideoHeight;
+ bool bStopThreadInProgress;
+ M4OSA_Context mSemThreadWait;
+ bool mIsFiftiesEffectStarted;
+
+ sp<VideoEditorPlayer::VeAudioOutput> mVEAudioSink;
+ VideoEditorAudioPlayer *mVEAudioPlayer;
+ NativeWindowRenderer* mNativeWindowRenderer;
+
+ M4VIFI_UInt8* mFrameRGBBuffer;
+ M4VIFI_UInt8* mFrameYUVBuffer;
+ mutable Mutex mLockSem;
+
+
+ static M4OSA_ERR preparePlayer(void* param, int playerInstance, int index);
+ static M4OSA_ERR threadProc(M4OSA_Void* param);
+ static void notify(void* cookie, int msg, int ext1, int ext2);
+
+ void setVideoEffectType(M4VSS3GPP_VideoEffectType type, M4OSA_Bool enable);
+
+ M4OSA_ERR applyVideoEffect(
+ M4OSA_Void * dataPtr, M4OSA_UInt32 colorFormat,
+ M4OSA_UInt32 videoWidth, M4OSA_UInt32 videoHeight,
+ M4OSA_UInt32 timeMs, M4OSA_Void* outPtr);
+
+ M4OSA_ERR doImageRenderingMode(
+ M4OSA_Void * dataPtr,
+ M4OSA_UInt32 colorFormat, M4OSA_UInt32 videoWidth,
+ M4OSA_UInt32 videoHeight, M4OSA_Void* outPtr);
+
+ // Don't call me!
+ VideoEditorPreviewController(const VideoEditorPreviewController &);
+ VideoEditorPreviewController &operator=(
+ const VideoEditorPreviewController &);
+};
+
+}
+
+#endif // ANDROID_VE_PREVIEWCONTROLLER_H
diff --git a/libvideoeditor/lvpp/VideoEditorSRC.cpp b/libvideoeditor/lvpp/VideoEditorSRC.cpp
new file mode 100755
index 0000000..4753dd4
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorSRC.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoEditorSRC"
+
+#include <stdlib.h>
+#include <utils/Log.h>
+#include <audio_utils/primitives.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include "VideoEditorSRC.h"
+
+
+namespace android {
+
+VideoEditorSRC::VideoEditorSRC(const sp<MediaSource> &source) {
+ ALOGV("VideoEditorSRC %p(%p)", this, source.get());
+ static const int32_t kDefaultSamplingFreqencyHz = kFreq32000Hz;
+ mSource = source;
+ mResampler = NULL;
+ mChannelCnt = 0;
+ mSampleRate = 0;
+ mOutputSampleRate = kDefaultSamplingFreqencyHz;
+ mStarted = false;
+ mInitialTimeStampUs = -1;
+ mAccuOutBufferSize = 0;
+ mSeekTimeUs = -1;
+ mBuffer = NULL;
+ mLeftover = 0;
+ mFormatChanged = false;
+ mStopPending = false;
+ mSeekMode = ReadOptions::SEEK_PREVIOUS_SYNC;
+
+ // Input Source validation
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ CHECK(format->findCString(kKeyMIMEType, &mime));
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ // Set the metadata of the output after resampling.
+ mOutputFormat = new MetaData;
+ mOutputFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mOutputFormat->setInt32(kKeySampleRate, kDefaultSamplingFreqencyHz);
+ mOutputFormat->setInt32(kKeyChannelCount, 2); // always stereo
+}
+
+VideoEditorSRC::~VideoEditorSRC() {
+ ALOGV("~VideoEditorSRC %p(%p)", this, mSource.get());
+ stop();
+}
+
+status_t VideoEditorSRC::start(MetaData *params) {
+ ALOGV("start %p(%p)", this, mSource.get());
+ CHECK(!mStarted);
+
+ // Set resampler if required
+ checkAndSetResampler();
+
+ mSeekTimeUs = -1;
+ mSeekMode = ReadOptions::SEEK_PREVIOUS_SYNC;
+ mStarted = true;
+ mSource->start();
+
+ return OK;
+}
+
+status_t VideoEditorSRC::stop() {
+ ALOGV("stop %p(%p)", this, mSource.get());
+ if (!mStarted) {
+ return OK;
+ }
+
+ if (mBuffer) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+ mSource->stop();
+ if (mResampler != NULL) {
+ delete mResampler;
+ mResampler = NULL;
+ }
+
+ mStarted = false;
+ mInitialTimeStampUs = -1;
+ mAccuOutBufferSize = 0;
+ mLeftover = 0;
+
+ return OK;
+}
+
+sp<MetaData> VideoEditorSRC::getFormat() {
+ ALOGV("getFormat");
+ return mOutputFormat;
+}
+
+status_t VideoEditorSRC::read(
+ MediaBuffer **buffer_out, const ReadOptions *options) {
+ ALOGV("read %p(%p)", this, mSource.get());
+ *buffer_out = NULL;
+
+ if (!mStarted) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ if (mResampler) {
+ // Store the seek parameters
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode = ReadOptions::SEEK_PREVIOUS_SYNC;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ ALOGV("read Seek %lld", seekTimeUs);
+ mSeekTimeUs = seekTimeUs;
+ mSeekMode = mode;
+ }
+
+ // We ask for 1024 frames in output
+ // resampler output is always 2 channels and 32 bits
+ const size_t kOutputFrameCount = 1024;
+ const size_t kBytes = kOutputFrameCount * 2 * sizeof(int32_t);
+ int32_t *pTmpBuffer = (int32_t *)calloc(1, kBytes);
+ if (!pTmpBuffer) {
+ ALOGE("calloc failed to allocate memory: %d bytes", kBytes);
+ return NO_MEMORY;
+ }
+
+ // Resample to target quality
+ mResampler->resample(pTmpBuffer, kOutputFrameCount, this);
+
+ if (mStopPending) {
+ stop();
+ mStopPending = false;
+ }
+
+ // Change resampler and retry if format change happened
+ if (mFormatChanged) {
+ mFormatChanged = false;
+ checkAndSetResampler();
+ free(pTmpBuffer);
+ return read(buffer_out, NULL);
+ }
+
+ // Create a new MediaBuffer
+ int32_t outBufferSize = kOutputFrameCount * 2 * sizeof(int16_t);
+ MediaBuffer* outBuffer = new MediaBuffer(outBufferSize);
+
+ // Convert back to 2 channels and 16 bits
+ ditherAndClamp(
+ (int32_t *)((uint8_t*)outBuffer->data() + outBuffer->range_offset()),
+ pTmpBuffer, kOutputFrameCount);
+ free(pTmpBuffer);
+
+ // Compute and set the new timestamp
+ sp<MetaData> to = outBuffer->meta_data();
+ int64_t totalOutDurationUs = (mAccuOutBufferSize * 1000000) / (mOutputSampleRate * 2 * 2);
+ int64_t timeUs = mInitialTimeStampUs + totalOutDurationUs;
+ to->setInt64(kKeyTime, timeUs);
+
+ // update the accumulate size
+ mAccuOutBufferSize += outBufferSize;
+ *buffer_out = outBuffer;
+ } else {
+ // Resampling not required. Read and pass-through.
+ MediaBuffer *aBuffer;
+ status_t err = mSource->read(&aBuffer, options);
+ if (err != OK) {
+ ALOGV("read returns err = %d", err);
+ }
+
+ if (err == INFO_FORMAT_CHANGED) {
+ checkAndSetResampler();
+ return read(buffer_out, NULL);
+ }
+
+ // EOS or some other error
+ if(err != OK) {
+ stop();
+ *buffer_out = NULL;
+ return err;
+ }
+ *buffer_out = aBuffer;
+ }
+
+ return OK;
+}
+
+status_t VideoEditorSRC::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) {
+ ALOGV("getNextBuffer %d, chan = %d", pBuffer->frameCount, mChannelCnt);
+ uint32_t done = 0;
+ uint32_t want = pBuffer->frameCount * mChannelCnt * 2;
+ pBuffer->raw = malloc(want);
+
+ while (mStarted && want > 0) {
+ // If we don't have any data left, read a new buffer.
+ if (!mBuffer) {
+ // if we seek, reset the initial time stamp and accumulated time
+ ReadOptions options;
+ if (mSeekTimeUs >= 0) {
+ ALOGV("%p cacheMore_l Seek requested = %lld", this, mSeekTimeUs);
+ ReadOptions::SeekMode mode = mSeekMode;
+ options.setSeekTo(mSeekTimeUs, mode);
+ mSeekTimeUs = -1;
+ mInitialTimeStampUs = -1;
+ mAccuOutBufferSize = 0;
+ }
+
+ status_t err = mSource->read(&mBuffer, &options);
+
+ if (err != OK) {
+ free(pBuffer->raw);
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+ }
+
+ if (err == INFO_FORMAT_CHANGED) {
+ ALOGV("getNextBuffer: source read returned INFO_FORMAT_CHANGED");
+ // At this point we cannot switch to a new AudioResampler because
+ // we are in a callback called by the AudioResampler itself. So
+ // just remember the fact that the format has changed, and let
+ // read() handles this.
+ mFormatChanged = true;
+ return err;
+ }
+
+ // EOS or some other error
+ if (err != OK) {
+ ALOGV("EOS or some err: %d", err);
+ // We cannot call stop() here because stop() will release the
+ // AudioResampler, and we are in a callback of the AudioResampler.
+ // So just remember the fact and let read() call stop().
+ mStopPending = true;
+ return err;
+ }
+
+ CHECK(mBuffer);
+ mLeftover = mBuffer->range_length();
+ if (mInitialTimeStampUs == -1) {
+ int64_t curTS;
+ sp<MetaData> from = mBuffer->meta_data();
+ from->findInt64(kKeyTime, &curTS);
+ ALOGV("setting mInitialTimeStampUs to %lld", mInitialTimeStampUs);
+ mInitialTimeStampUs = curTS;
+ }
+ }
+
+ // Now copy data to the destination
+ uint32_t todo = mLeftover;
+ if (todo > want) {
+ todo = want;
+ }
+
+ uint8_t* end = (uint8_t*)mBuffer->data() + mBuffer->range_offset()
+ + mBuffer->range_length();
+ memcpy((uint8_t*)pBuffer->raw + done, end - mLeftover, todo);
+ done += todo;
+ want -= todo;
+ mLeftover -= todo;
+
+ // Release MediaBuffer as soon as possible.
+ if (mLeftover == 0) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+ }
+
+ pBuffer->frameCount = done / (mChannelCnt * 2);
+ ALOGV("getNextBuffer done %d", pBuffer->frameCount);
+ return OK;
+}
+
+
+void VideoEditorSRC::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
+ ALOGV("releaseBuffer: %p", pBuffers);
+ free(pBuffer->raw);
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+}
+
+void VideoEditorSRC::checkAndSetResampler() {
+ ALOGV("checkAndSetResampler");
+
+ static const uint16_t kUnityGain = 0x1000;
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ CHECK(format->findCString(kKeyMIMEType, &mime));
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ CHECK(format->findInt32(kKeySampleRate, &mSampleRate));
+ CHECK(format->findInt32(kKeyChannelCount, &mChannelCnt));
+
+ // If a resampler exists, delete it first
+ if (mResampler != NULL) {
+ delete mResampler;
+ mResampler = NULL;
+ }
+
+ // Clear previous buffer
+ if (mBuffer) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ if (mSampleRate != mOutputSampleRate || mChannelCnt != 2) {
+ ALOGV("Resampling required (%d => %d Hz, # channels = %d)",
+ mSampleRate, mOutputSampleRate, mChannelCnt);
+
+ mResampler = AudioResampler::create(
+ 16 /* bit depth */,
+ mChannelCnt,
+ mOutputSampleRate,
+ AudioResampler::DEFAULT);
+ CHECK(mResampler);
+ mResampler->setSampleRate(mSampleRate);
+ mResampler->setVolume(kUnityGain, kUnityGain);
+ } else {
+ ALOGV("Resampling not required (%d => %d Hz, # channels = %d)",
+ mSampleRate, mOutputSampleRate, mChannelCnt);
+ }
+}
+
+} //namespce android
diff --git a/libvideoeditor/lvpp/VideoEditorSRC.h b/libvideoeditor/lvpp/VideoEditorSRC.h
new file mode 100755
index 0000000..2b7e9b6
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorSRC.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdint.h>
+#include <media/stagefright/MediaSource.h>
+#include "AudioBufferProvider.h"
+#include "AudioResampler.h"
+
+namespace android {
+
+struct MediaBuffer;
+
+class VideoEditorSRC : public MediaSource , public AudioBufferProvider {
+
+public:
+ VideoEditorSRC(const sp<MediaSource> &source);
+
+ virtual status_t start (MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read (
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+ virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+ virtual void releaseBuffer(Buffer* buffer);
+
+ // Sampling freqencies
+ enum {
+ kFreq8000Hz = 8000,
+ kFreq11025Hz = 11025,
+ kFreq12000Hz = 12000,
+ kFreq16000Hz = 16000,
+ kFreq22050Hz = 22050,
+ kFreq24000Hz = 24000,
+ kFreq32000Hz = 32000,
+ kFreq44100Hz = 44100,
+ kFreq48000Hz = 48000,
+ };
+
+protected :
+ virtual ~VideoEditorSRC();
+
+private:
+ AudioResampler *mResampler;
+ sp<MediaSource> mSource;
+ int mChannelCnt;
+ int mSampleRate;
+ int32_t mOutputSampleRate;
+ bool mStarted;
+ sp<MetaData> mOutputFormat;
+
+ MediaBuffer* mBuffer;
+ int32_t mLeftover;
+ bool mFormatChanged;
+ bool mStopPending;
+
+ int64_t mInitialTimeStampUs;
+ int64_t mAccuOutBufferSize;
+
+ int64_t mSeekTimeUs;
+ ReadOptions::SeekMode mSeekMode;
+
+ VideoEditorSRC();
+ void checkAndSetResampler();
+
+ // Don't call me
+ VideoEditorSRC(const VideoEditorSRC&);
+ VideoEditorSRC &operator=(const VideoEditorSRC &);
+
+};
+
+} //namespce android
+
diff --git a/libvideoeditor/lvpp/VideoEditorTools.cpp b/libvideoeditor/lvpp/VideoEditorTools.cpp
new file mode 100755
index 0000000..2b9fd60
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorTools.cpp
@@ -0,0 +1,3883 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "VideoEditorTools.h"
+#include "PreviewRenderer.h"
+/*+ Handle the image files here */
+#include <utils/Log.h>
+/*- Handle the image files here */
+
+const M4VIFI_UInt8 M4VIFI_ClipTable[1256]
+= {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
+0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
+0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
+0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
+0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
+0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
+0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
+0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
+0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
+0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
+0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
+0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
+0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
+0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
+0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
+0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
+0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
+0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
+0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
+0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
+0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
+0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
+0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
+0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
+0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+/* Division table for ( 65535/x ); x = 0 to 512 */
+const M4VIFI_UInt16 M4VIFI_DivTable[512]
+= {
+0, 65535, 32768, 21845, 16384, 13107, 10922, 9362,
+8192, 7281, 6553, 5957, 5461, 5041, 4681, 4369,
+4096, 3855, 3640, 3449, 3276, 3120, 2978, 2849,
+2730, 2621, 2520, 2427, 2340, 2259, 2184, 2114,
+2048, 1985, 1927, 1872, 1820, 1771, 1724, 1680,
+1638, 1598, 1560, 1524, 1489, 1456, 1424, 1394,
+1365, 1337, 1310, 1285, 1260, 1236, 1213, 1191,
+1170, 1149, 1129, 1110, 1092, 1074, 1057, 1040,
+1024, 1008, 992, 978, 963, 949, 936, 923,
+910, 897, 885, 873, 862, 851, 840, 829,
+819, 809, 799, 789, 780, 771, 762, 753,
+744, 736, 728, 720, 712, 704, 697, 689,
+682, 675, 668, 661, 655, 648, 642, 636,
+630, 624, 618, 612, 606, 601, 595, 590,
+585, 579, 574, 569, 564, 560, 555, 550,
+546, 541, 537, 532, 528, 524, 520, 516,
+512, 508, 504, 500, 496, 492, 489, 485,
+481, 478, 474, 471, 468, 464, 461, 458,
+455, 451, 448, 445, 442, 439, 436, 434,
+431, 428, 425, 422, 420, 417, 414, 412,
+409, 407, 404, 402, 399, 397, 394, 392,
+390, 387, 385, 383, 381, 378, 376, 374,
+372, 370, 368, 366, 364, 362, 360, 358,
+356, 354, 352, 350, 348, 346, 344, 343,
+341, 339, 337, 336, 334, 332, 330, 329,
+327, 326, 324, 322, 321, 319, 318, 316,
+315, 313, 312, 310, 309, 307, 306, 304,
+303, 302, 300, 299, 297, 296, 295, 293,
+292, 291, 289, 288, 287, 286, 284, 283,
+282, 281, 280, 278, 277, 276, 275, 274,
+273, 271, 270, 269, 268, 267, 266, 265,
+264, 263, 262, 261, 260, 259, 258, 257,
+256, 255, 254, 253, 252, 251, 250, 249,
+248, 247, 246, 245, 244, 243, 242, 241,
+240, 240, 239, 238, 237, 236, 235, 234,
+234, 233, 232, 231, 230, 229, 229, 228,
+227, 226, 225, 225, 224, 223, 222, 222,
+221, 220, 219, 219, 218, 217, 217, 216,
+215, 214, 214, 213, 212, 212, 211, 210,
+210, 209, 208, 208, 207, 206, 206, 205,
+204, 204, 203, 202, 202, 201, 201, 200,
+199, 199, 198, 197, 197, 196, 196, 195,
+195, 194, 193, 193, 192, 192, 191, 191,
+190, 189, 189, 188, 188, 187, 187, 186,
+186, 185, 185, 184, 184, 183, 183, 182,
+182, 181, 181, 180, 180, 179, 179, 178,
+178, 177, 177, 176, 176, 175, 175, 174,
+174, 173, 173, 172, 172, 172, 171, 171,
+170, 170, 169, 169, 168, 168, 168, 167,
+167, 166, 166, 165, 165, 165, 164, 164,
+163, 163, 163, 162, 162, 161, 161, 161,
+160, 160, 159, 159, 159, 158, 158, 157,
+157, 157, 156, 156, 156, 155, 155, 154,
+154, 154, 153, 153, 153, 152, 152, 152,
+151, 151, 151, 150, 150, 149, 149, 149,
+148, 148, 148, 147, 147, 147, 146, 146,
+146, 145, 145, 145, 144, 144, 144, 144,
+143, 143, 143, 142, 142, 142, 141, 141,
+141, 140, 140, 140, 140, 139, 139, 139,
+138, 138, 138, 137, 137, 137, 137, 136,
+136, 136, 135, 135, 135, 135, 134, 134,
+134, 134, 133, 133, 133, 132, 132, 132,
+132, 131, 131, 131, 131, 130, 130, 130,
+130, 129, 129, 129, 129, 128, 128, 128
+};
+
+const M4VIFI_Int32 const_storage1[8]
+= {
+0x00002568, 0x00003343,0x00000649,0x00000d0f, 0x0000D86C, 0x0000D83B, 0x00010000, 0x00010000
+};
+
+const M4VIFI_Int32 const_storage[8]
+= {
+0x00002568, 0x00003343, 0x1BF800, 0x00000649, 0x00000d0f, 0x110180, 0x40cf, 0x22BE00
+};
+
+
+const M4VIFI_UInt16 *M4VIFI_DivTable_zero
+ = &M4VIFI_DivTable[0];
+
+const M4VIFI_UInt8 *M4VIFI_ClipTable_zero
+ = &M4VIFI_ClipTable[500];
+
+M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut ) {
+
+ M4VIFI_UInt32 i;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest, *p_buf_src_u, *p_buf_src_v;
+ M4VIFI_UInt8 return_code = M4VIFI_OK;
+
+ /* the filter is implemented with the assumption that the width is equal to stride */
+ if(PlaneIn[0].u_width != PlaneIn[0].u_stride)
+ return M4VIFI_INVALID_PARAM;
+
+ /* The input Y Plane is the same as the output Y Plane */
+ p_buf_src = &(PlaneIn[0].pac_data[PlaneIn[0].u_topleft]);
+ p_buf_dest = &(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]);
+ memcpy((void *)p_buf_dest,(void *)p_buf_src ,
+ PlaneOut[0].u_width * PlaneOut[0].u_height);
+
+ /* The U and V components are planar. The need to be made interleaved */
+ p_buf_src_u = &(PlaneIn[1].pac_data[PlaneIn[1].u_topleft]);
+ p_buf_src_v = &(PlaneIn[2].pac_data[PlaneIn[2].u_topleft]);
+ p_buf_dest = &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
+
+ for(i = 0; i < PlaneOut[1].u_width*PlaneOut[1].u_height; i++)
+ {
+ *p_buf_dest++ = *p_buf_src_u++;
+ *p_buf_dest++ = *p_buf_src_v++;
+ }
+ return return_code;
+}
+
+M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut ) {
+
+ M4VIFI_UInt32 i;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest, *p_buf_src_u, *p_buf_src_v;
+ M4VIFI_UInt8 *p_buf_dest_u,*p_buf_dest_v,*p_buf_src_uv;
+ M4VIFI_UInt8 return_code = M4VIFI_OK;
+
+ /* the filter is implemented with the assumption that the width is equal to stride */
+ if(PlaneIn[0].u_width != PlaneIn[0].u_stride)
+ return M4VIFI_INVALID_PARAM;
+
+ /* The input Y Plane is the same as the output Y Plane */
+ p_buf_src = &(PlaneIn[0].pac_data[PlaneIn[0].u_topleft]);
+ p_buf_dest = &(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]);
+ memcpy((void *)p_buf_dest,(void *)p_buf_src ,
+ PlaneOut[0].u_width * PlaneOut[0].u_height);
+
+ /* The U and V components are planar. The need to be made interleaved */
+ p_buf_src_uv = &(PlaneIn[1].pac_data[PlaneIn[1].u_topleft]);
+ p_buf_dest_u = &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
+ p_buf_dest_v = &(PlaneOut[2].pac_data[PlaneOut[2].u_topleft]);
+
+ for(i = 0; i < PlaneOut[1].u_width*PlaneOut[1].u_height; i++)
+ {
+ *p_buf_dest_u++ = *p_buf_src_uv++;
+ *p_buf_dest_v++ = *p_buf_src_uv++;
+ }
+ return return_code;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind) {
+
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i,j;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+ M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
+
+ for (plane_number = 0; plane_number < 3; plane_number++)
+ {
+ p_buf_src =
+ &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
+
+ p_buf_dest =
+ &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
+ for (i = 0; i < PlaneOut[plane_number].u_height; i++)
+ {
+ /**
+ * Chrominance */
+ if(plane_number==1 || plane_number==2)
+ {
+ //switch ((M4OSA_UInt32)pFunctionContext) // commented because a structure for the effects context exist
+ switch (ColorContext->colorEffectType)
+ {
+ case M4xVSS_kVideoEffectType_BlackAndWhite:
+ memset((void *)p_buf_dest,128,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Pink:
+ memset((void *)p_buf_dest,255,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Green:
+ memset((void *)p_buf_dest,0,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Sepia:
+ if(plane_number==1)
+ {
+ memset((void *)p_buf_dest,117,
+ PlaneIn[plane_number].u_width);
+ }
+ else
+ {
+ memset((void *)p_buf_dest,139,
+ PlaneIn[plane_number].u_width);
+ }
+ break;
+ case M4xVSS_kVideoEffectType_Negative:
+ memcpy((void *)p_buf_dest,
+ (void *)p_buf_src ,PlaneOut[plane_number].u_width);
+ break;
+
+ case M4xVSS_kVideoEffectType_ColorRGB16:
+ {
+ M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+ /*first get the r, g, b*/
+ b = (ColorContext->rgb16ColorData & 0x001f);
+ g = (ColorContext->rgb16ColorData & 0x07e0)>>5;
+ r = (ColorContext->rgb16ColorData & 0xf800)>>11;
+
+ /*keep y, but replace u and v*/
+ if(plane_number==1)
+ {
+ /*then convert to u*/
+ u = U16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)u,
+ PlaneIn[plane_number].u_width);
+ }
+ if(plane_number==2)
+ {
+ /*then convert to v*/
+ v = V16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)v,
+ PlaneIn[plane_number].u_width);
+ }
+ }
+ break;
+ case M4xVSS_kVideoEffectType_Gradient:
+ {
+ M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+ /*first get the r, g, b*/
+ b = (ColorContext->rgb16ColorData & 0x001f);
+ g = (ColorContext->rgb16ColorData & 0x07e0)>>5;
+ r = (ColorContext->rgb16ColorData & 0xf800)>>11;
+
+ /*for color gradation*/
+ b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
+ g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
+ r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
+
+ /*keep y, but replace u and v*/
+ if(plane_number==1)
+ {
+ /*then convert to u*/
+ u = U16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)u,
+ PlaneIn[plane_number].u_width);
+ }
+ if(plane_number==2)
+ {
+ /*then convert to v*/
+ v = V16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)v,
+ PlaneIn[plane_number].u_width);
+ }
+ }
+ break;
+ default:
+ return M4VIFI_INVALID_PARAM;
+ }
+ }
+ /**
+ * Luminance */
+ else
+ {
+ //switch ((M4OSA_UInt32)pFunctionContext)// commented because a structure for the effects context exist
+ switch (ColorContext->colorEffectType)
+ {
+ case M4xVSS_kVideoEffectType_Negative:
+ for(j=0;j<PlaneOut[plane_number].u_width;j++)
+ {
+ p_buf_dest[j] = 255 - p_buf_src[j];
+ }
+ break;
+ default:
+ memcpy((void *)p_buf_dest,
+ (void *)p_buf_src ,PlaneOut[plane_number].u_width);
+ break;
+ }
+ }
+ p_buf_src += PlaneIn[plane_number].u_stride;
+ p_buf_dest += PlaneOut[plane_number].u_stride;
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function add a fixed or animated image on an input YUV420 planar frame
+ * @note
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming(
+ M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn[3],
+ M4VIFI_ImagePlane *PlaneOut, M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind ) {
+
+ M4VIFI_UInt32 x,y;
+
+ M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
+ M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
+ M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
+
+ M4xVSS_FramingStruct* Framing = M4OSA_NULL;
+ M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
+ M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
+
+ M4VIFI_UInt8 *p_out0;
+ M4VIFI_UInt8 *p_out1;
+ M4VIFI_UInt8 *p_out2;
+
+ M4VIFI_UInt32 topleft[2];
+
+ M4OSA_UInt8 transparent1 =
+ (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+ M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+#ifndef DECODE_GIF_ON_SAVING
+ Framing = (M4xVSS_FramingStruct *)userData;
+ currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
+ FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+#ifdef DECODE_GIF_ON_SAVING
+ M4OSA_ERR err;
+ Framing =
+ (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+ if(Framing == M4OSA_NULL)
+ {
+ ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
+ err = M4xVSS_internalDecodeGIF(userData);
+ if(M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming: \
+ Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+ return err;
+ }
+ Framing =
+ (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+ /* Initializes first GIF time */
+ ((M4xVSS_FramingContext*)userData)->current_gif_time =
+ pProgress->uiOutputTime;
+ }
+ currentFraming = (M4xVSS_FramingStruct *)Framing;
+ FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ /**
+ * Initialize input / output plane pointers */
+ p_in_Y += PlaneIn[0].u_topleft;
+ p_in_U += PlaneIn[1].u_topleft;
+ p_in_V += PlaneIn[2].u_topleft;
+
+ p_out0 = PlaneOut[0].pac_data;
+ p_out1 = PlaneOut[1].pac_data;
+ p_out2 = PlaneOut[2].pac_data;
+
+ /**
+ * Depending on time, initialize Framing frame to use */
+ if(Framing->previousClipTime == -1)
+ {
+ Framing->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /**
+ * If the current clip time has reach the duration of one frame of the framing picture
+ * we need to step to next framing picture */
+#ifdef DECODE_GIF_ON_SAVING
+ if(((M4xVSS_FramingContext*)userData)->b_animated == M4OSA_TRUE)
+ {
+ while((((M4xVSS_FramingContext*)userData)->current_gif_time + currentFraming->duration) < pProgress->uiOutputTime)
+ {
+ ((M4xVSS_FramingContext*)userData)->clipTime =
+ pProgress->uiOutputTime;
+
+ err = M4xVSS_internalDecodeGIF(userData);
+ if(M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming: Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+ return err;
+ }
+ if(currentFraming->duration != 0)
+ {
+ ((M4xVSS_FramingContext*)userData)->current_gif_time += currentFraming->duration;
+ }
+ else
+ {
+ ((M4xVSS_FramingContext*)userData)->current_gif_time +=
+ pProgress->uiOutputTime - Framing->previousClipTime;
+ }
+ Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+ currentFraming = (M4xVSS_FramingStruct *)Framing;
+ FramingRGB = Framing->FramingRgb->pac_data;
+ }
+ }
+#else
+ Framing->pCurrent = currentFraming->pNext;
+ currentFraming = (M4xVSS_FramingStruct*)Framing->pCurrent;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ Framing->previousClipTime = pProgress->uiOutputTime;
+ FramingRGB = currentFraming->FramingRgb->pac_data;
+ topleft[0] = currentFraming->topleft_x;
+ topleft[1] = currentFraming->topleft_y;
+
+ for( x=0 ;x < PlaneIn[0].u_height ; x++)
+ {
+ for( y=0 ;y < PlaneIn[0].u_width ; y++)
+ {
+ /**
+ * To handle framing with input size != output size
+ * Framing is applyed if coordinates matches between framing/topleft and input plane */
+ if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+ y >= topleft[0] &&
+ x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
+ x >= topleft[1])
+ {
+
+ /*Alpha blending support*/
+ M4OSA_Float alphaBlending = 1;
+#ifdef DECODE_GIF_ON_SAVING
+ M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct =
+ (M4xVSS_internalEffectsAlphaBlending*)((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
+#else
+ M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct =
+ (M4xVSS_internalEffectsAlphaBlending*)((M4xVSS_FramingStruct*)userData)->alphaBlendingStruct;
+#endif //#ifdef DECODE_GIF_ON_SAVING
+
+ if(alphaBlendingStruct != M4OSA_NULL)
+ {
+ if(pProgress->uiProgress < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
+ {
+ alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle - alphaBlendingStruct->m_start)*pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
+ alphaBlending += alphaBlendingStruct->m_start;
+ alphaBlending /= 100;
+ }
+ else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10) && pProgress->uiProgress < 1000 - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
+ {
+ alphaBlending = (M4OSA_Float)((M4OSA_Float)alphaBlendingStruct->m_middle/100);
+ }
+ else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
+ {
+ alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)/(alphaBlendingStruct->m_fadeOutTime*10);
+ alphaBlending += alphaBlendingStruct->m_end;
+ alphaBlending /= 100;
+ }
+ }
+
+ /**/
+
+ if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=(*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=(*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
+ }
+ else
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=(*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])+(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
+ *( p_out0+y+x*PlaneOut[0].u_stride)+=(*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=(*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)+((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))*alphaBlending;
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=(*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=(*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)+((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))*alphaBlending;
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=(*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
+ }
+ if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+ y == PlaneIn[0].u_width-1)
+ {
+ FramingRGB = FramingRGB + 2 * (topleft[0] + currentFraming->FramingYuv[0].u_width - PlaneIn[0].u_width + 1);
+ }
+ else
+ {
+ FramingRGB = FramingRGB + 2;
+ }
+ }
+ /**
+ * Just copy input plane to output plane */
+ else
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
+ }
+ }
+ }
+
+#ifdef DECODE_GIF_ON_SAVING
+ if(pProgress->bIsLast == M4OSA_TRUE
+ && (M4OSA_Bool)((M4xVSS_FramingContext*)userData)->b_IsFileGif == M4OSA_TRUE)
+ {
+ M4xVSS_internalDecodeGIF_Cleaning((M4xVSS_FramingContext*)userData);
+ }
+#endif /*DECODE_GIF_ON_SAVING*/
+ return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function make a video look as if it was taken in the fifties
+ * @note
+ * @param pUserData (IN) Context
+ * @param pPlaneIn (IN) Input YUV420 planar
+ * @param pPlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ * @return M4ERR_PARAMETER: pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties(
+ M4OSA_Void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut, M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind )
+{
+ M4VIFI_UInt32 x, y, xShift;
+ M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
+ M4VIFI_UInt8 *pOutY, *pInYbegin;
+ M4VIFI_UInt8 *pInCr,* pOutCr;
+ M4VIFI_Int32 plane_number;
+
+ /* Internal context*/
+ M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
+
+ /* Initialize input / output plane pointers */
+ pInY += pPlaneIn[0].u_topleft;
+ pOutY = pPlaneOut[0].pac_data;
+ pInYbegin = pInY;
+
+ /* Initialize the random */
+ if(p_FiftiesData->previousClipTime < 0)
+ {
+ M4OSA_randInit();
+ M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+ M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+ p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /* Choose random values if we have reached the duration of a partial effect */
+ else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime) > p_FiftiesData->fiftiesEffectDuration)
+ {
+ M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+ M4OSA_rand((M4OSA_Int32*)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+ p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /* Put in Sepia the chrominance */
+ for (plane_number = 1; plane_number < 3; plane_number++)
+ {
+ pInCr = pPlaneIn[plane_number].pac_data + pPlaneIn[plane_number].u_topleft;
+ pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
+
+ for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
+ {
+ if (1 == plane_number)
+ memset((void *)pOutCr, 117,pPlaneIn[plane_number].u_width); /* U value */
+ else
+ memset((void *)pOutCr, 139,pPlaneIn[plane_number].u_width); /* V value */
+
+ pInCr += pPlaneIn[plane_number].u_stride;
+ pOutCr += pPlaneOut[plane_number].u_stride;
+ }
+ }
+
+ /* Compute the new pixels values */
+ for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
+ {
+ M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
+
+ /* Compute the xShift (random value) */
+ if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
+ xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
+ else
+ xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) % (pPlaneIn[0].u_height - 1);
+
+ /* Initialize the pointers */
+ p_outYtmp = pOutY + 1; /* yShift of 1 pixel */
+ p_inYtmp = pInYbegin + (xShift * pPlaneIn[0].u_stride); /* Apply the xShift */
+
+ for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
+ {
+ /* Set Y value */
+ if (xShift > (pPlaneIn[0].u_height - 4))
+ *p_outYtmp = 40; /* Add some horizontal black lines between the two parts of the image */
+ else if ( y == p_FiftiesData->stripeRandomValue)
+ *p_outYtmp = 90; /* Add a random vertical line for the bulk */
+ else
+ *p_outYtmp = *p_inYtmp;
+
+
+ /* Go to the next pixel */
+ p_outYtmp++;
+ p_inYtmp++;
+
+ /* Restart at the beginning of the line for the last pixel*/
+ if (y == (pPlaneIn[0].u_width - 2))
+ p_outYtmp = pOutY;
+ }
+
+ /* Go to the next line */
+ pOutY += pPlaneOut[0].u_stride;
+ }
+
+ return M4VIFI_OK;
+}
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in,
+ M4ViComImagePlane *plane_out,
+ unsigned long lum_factor,
+ void *user_data)
+{
+ unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
+ unsigned char *p_csrc, *p_cdest, *p_csrc_line, *p_cdest_line;
+ unsigned long pix_src;
+ unsigned long u_outpx, u_outpx2;
+ unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+ long i, j;
+
+ /* copy or filter chroma */
+ u_width = plane_in[1].u_width;
+ u_height = plane_in[1].u_height;
+ u_stride = plane_in[1].u_stride;
+ u_stride_out = plane_out[1].u_stride;
+ p_cdest_line = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+ p_csrc_line = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
+
+ if (lum_factor > 256)
+ {
+ p_cdest = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+ p_csrc = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+ /* copy chroma */
+ for (j = u_height; j != 0; j--)
+ {
+ for (i = u_width; i != 0; i--)
+ {
+ memcpy((void *)p_cdest_line, (void *)p_csrc_line, u_width);
+ memcpy((void *)p_cdest, (void *)p_csrc, u_width);
+ }
+ p_cdest_line += u_stride_out;
+ p_cdest += u_stride_out;
+ p_csrc_line += u_stride;
+ p_csrc += u_stride;
+ }
+ }
+ else
+ {
+ /* filter chroma */
+ pix = (1024 - lum_factor) << 7;
+ for (j = u_height; j != 0; j--)
+ {
+ p_cdest = p_cdest_line;
+ p_csrc = p_csrc_line;
+ for (i = u_width; i != 0; i--)
+ {
+ *p_cdest++ = ((pix + (*p_csrc++ & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+ }
+ p_cdest_line += u_stride_out;
+ p_csrc_line += u_stride;
+ }
+ p_cdest_line = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+ p_csrc_line = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+ for (j = u_height; j != 0; j--)
+ {
+ p_cdest = p_cdest_line;
+ p_csrc = p_csrc_line;
+ for (i = u_width; i != 0; i--)
+ {
+ *p_cdest++ = ((pix + (*p_csrc & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+ }
+ p_cdest_line += u_stride_out;
+ p_csrc_line += u_stride;
+ }
+ }
+ /* apply luma factor */
+ u_width = plane_in[0].u_width;
+ u_height = plane_in[0].u_height;
+ u_stride = (plane_in[0].u_stride >> 1);
+ u_stride_out = (plane_out[0].u_stride >> 1);
+ p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+ p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+ p_dest_line = p_dest;
+ p_src_line = p_src;
+
+ for (j = u_height; j != 0; j--)
+ {
+ p_dest = p_dest_line;
+ p_src = p_src_line;
+ for (i = (u_width >> 1); i != 0; i--)
+ {
+ pix_src = (unsigned long) *p_src++;
+ pix = pix_src & 0xFF;
+ u_outpx = ((pix * lum_factor) >> LUM_FACTOR_MAX);
+ pix = ((pix_src & 0xFF00) >> 8);
+ u_outpx2 = (((pix * lum_factor) >> LUM_FACTOR_MAX)<< 8) ;
+ *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
+ }
+ p_dest_line += u_stride_out;
+ p_src_line += u_stride;
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+ * @brief This function converts an RGB565 plane to YUV420 planar
+ * @note It is used only for framing effect
+ * It allocates output YUV planes
+ * @param framingCtx (IN) The framing struct containing input RGB565 plane
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+{
+ M4OSA_ERR err;
+
+ /**
+ * Allocate output YUV planes */
+ framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+ if(framingCtx->FramingYuv == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+ framingCtx->FramingYuv[0].u_topleft = 0;
+ framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char*)"Alloc for the Convertion output YUV");;
+ if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[1].u_topleft = 0;
+ framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+ framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[2].u_topleft = 0;
+ framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+ /**
+ * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing effect */
+ err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV: error when converting from RGB to YUV: 0x%x\n", err);
+ }
+
+ framingCtx->duration = 0;
+ framingCtx->previousClipTime = -1;
+ framingCtx->previewOffsetClipTime = -1;
+
+ /**
+ * Only one element in the chained list (no animated image with RGB buffer...) */
+ framingCtx->pCurrent = framingCtx;
+ framingCtx->pNext = framingCtx;
+
+ return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx)
+ * @brief This function converts an RGB888 plane to YUV420 planar
+ * @note It is used only for framing effect
+ * It allocates output YUV planes
+ * @param framingCtx (IN) The framing struct containing input RGB888 plane
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx)
+{
+ M4OSA_ERR err;
+
+ /**
+ * Allocate output YUV planes */
+ framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+ if(framingCtx->FramingYuv == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+ framingCtx->FramingYuv[0].u_topleft = 0;
+ framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char*)"Alloc for the Convertion output YUV");;
+ if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[1].u_topleft = 0;
+ framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+ framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[2].u_topleft = 0;
+ framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+ /**
+ * Convert input RGB888 to YUV 420 to be able to merge it with output video in framing effect */
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV: error when converting from RGB to YUV: 0x%x\n", err);
+ }
+
+ framingCtx->duration = 0;
+ framingCtx->previousClipTime = -1;
+ framingCtx->previewOffsetClipTime = -1;
+
+ /**
+ * Only one element in the chained list (no animated image with RGB buffer...) */
+ framingCtx->pCurrent = framingCtx;
+ framingCtx->pNext = framingCtx;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @author Patrice Martinez / Philips Digital Networks - MP4Net
+ * @brief transform RGB565 image to a YUV420 image.
+ * @note Convert RGB565 to YUV420,
+ * Loop on each row ( 2 rows by 2 rows )
+ * Loop on each column ( 2 col by 2 col )
+ * Get 4 RGB samples from input data and build 4 output Y samples
+ * and each single U & V data
+ * end loop on col
+ * end loop on row
+ * @param pUserData: (IN) User Specific Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param pPlaneOut: (OUT) Pointer to YUV420 buffer Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+ M4VIFI_UInt32 u32_stride_rgb, u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+ M4VIFI_UInt16 u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+ M4VIFI_UInt8 count_null=0;
+
+ /* Check planes height are appropriate */
+ if( (pPlaneIn->u_height != pPlaneOut[0].u_height) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1)) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check planes width are appropriate */
+ if( (pPlaneIn->u_width != pPlaneOut[0].u_width) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the output data buffers */
+ pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input data buffers */
+ pu8_rgbn_data = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+ /* Get the size of the output image */
+ u32_width = pPlaneOut[0].u_width;
+ u32_height = pPlaneOut[0].u_height;
+
+ /* Set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = pPlaneOut[0].u_stride;
+ u32_stride2_Y = u32_stride_Y << 1;
+ u32_stride_U = pPlaneOut[1].u_stride;
+ u32_stride_V = pPlaneOut[2].u_stride;
+
+ /* Set the size of the memory jumps corresponding to row jump in input plane */
+ u32_stride_rgb = pPlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+ /* Loop on each row of the output image, input coordinates are estimated from output ones */
+ /* Two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* Current Y plane row pointers */
+ pu8_yn = pu8_y_data;
+ /* Next Y plane row pointers */
+ pu8_ys = pu8_yn + u32_stride_Y;
+ /* Current U plane row pointer */
+ pu8_u = pu8_u_data;
+ /* Current V plane row pointer */
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn = pu8_rgbn_data;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Get four RGB 565 samples from input data */
+ u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+ u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+ u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+ u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+ /* Unpack RGB565 to 8bit R, G, B */
+ /* (x,y) */
+ GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
+ /* (x+1,y) */
+ GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
+ /* (x,y+1) */
+ GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
+ /* (x+1,y+1) */
+ GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
+ /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+ if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+ {
+ i32_b00 = 31;
+ i32_r00 = 31;
+ }
+ if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+ {
+ i32_b10 = 31;
+ i32_r10 = 31;
+ }
+ if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+ {
+ i32_b01 = 31;
+ i32_r01 = 31;
+ }
+ if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+ {
+ i32_b11 = 31;
+ i32_r11 = 31;
+ }
+ /* Convert RGB value to YUV */
+ i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+ /* luminance value */
+ i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+ i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+ /* luminance value */
+ i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+ i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+ /* luminance value */
+ i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+ i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+ /* luminance value */
+ i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+ /* Store luminance data */
+ pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+ pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+ pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+ /* Prepare for next column */
+ pu8_rgbn += (CST_RGB_16_SIZE<<1);
+ /* Update current Y plane line pointer*/
+ pu8_yn += 2;
+ /* Update next Y plane line pointer*/
+ pu8_ys += 2;
+ /* Update U plane line pointer*/
+ pu8_u ++;
+ /* Update V plane line pointer*/
+ pu8_v ++;
+ } /* End of horizontal scanning */
+
+ /* Prepare pointers for the next row */
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+
+/***************************************************************************
+Proto:
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+Author: Patrice Martinez / Philips Digital Networks - MP4Net
+Purpose: filling of the YUV420 plane from a BGR24 plane
+Abstract: Loop on each row ( 2 rows by 2 rows )
+ Loop on each column ( 2 col by 2 col )
+ Get 4 BGR samples from input data and build 4 output Y samples and each single U & V data
+ end loop on col
+ end loop on row
+
+In: RGB24 plane
+InOut: none
+Out: array of 3 M4VIFI_ImagePlane structures
+Modified: ML: RGB function modified to BGR.
+***************************************************************************/
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3])
+{
+
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V, u32_stride_rgb, u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+
+ /* check sizes */
+ if( (PlaneIn->u_height != PlaneOut[0].u_height) ||
+ (PlaneOut[0].u_height != (PlaneOut[1].u_height<<1)) ||
+ (PlaneOut[0].u_height != (PlaneOut[2].u_height<<1)))
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+
+ if( (PlaneIn->u_width != PlaneOut[0].u_width) ||
+ (PlaneOut[0].u_width != (PlaneOut[1].u_width<<1)) ||
+ (PlaneOut[0].u_width != (PlaneOut[2].u_width<<1)))
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+
+ /* set the pointer to the beginning of the output data buffers */
+ pu8_y_data = PlaneOut[0].pac_data + PlaneOut[0].u_topleft;
+ pu8_u_data = PlaneOut[1].pac_data + PlaneOut[1].u_topleft;
+ pu8_v_data = PlaneOut[2].pac_data + PlaneOut[2].u_topleft;
+
+ /* idem for input buffer */
+ pu8_rgbn_data = PlaneIn->pac_data + PlaneIn->u_topleft;
+
+ /* get the size of the output image */
+ u32_width = PlaneOut[0].u_width;
+ u32_height = PlaneOut[0].u_height;
+
+ /* set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = PlaneOut[0].u_stride;
+ u32_stride2_Y= u32_stride_Y << 1;
+ u32_stride_U = PlaneOut[1].u_stride;
+ u32_stride_V = PlaneOut[2].u_stride;
+
+ /* idem for input plane */
+ u32_stride_rgb = PlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+ /* loop on each row of the output image, input coordinates are estimated from output ones */
+ /* two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* update working pointers */
+ pu8_yn = pu8_y_data;
+ pu8_ys = pu8_yn + u32_stride_Y;
+
+ pu8_u = pu8_u_data;
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn= pu8_rgbn_data;
+
+ /* loop on each column of the output image*/
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* get RGB samples of 4 pixels */
+ GET_RGB24(i32_r00, i32_g00, i32_b00, pu8_rgbn, 0);
+ GET_RGB24(i32_r10, i32_g10, i32_b10, pu8_rgbn, CST_RGB_24_SIZE);
+ GET_RGB24(i32_r01, i32_g01, i32_b01, pu8_rgbn, u32_stride_rgb);
+ GET_RGB24(i32_r11, i32_g11, i32_b11, pu8_rgbn, u32_stride_rgb + CST_RGB_24_SIZE);
+
+ i32_u00 = U24(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V24(i32_r00, i32_g00, i32_b00);
+ i32_y00 = Y24(i32_r00, i32_g00, i32_b00); /* matrix luminance */
+ pu8_yn[0]= (M4VIFI_UInt8)i32_y00;
+
+ i32_u10 = U24(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V24(i32_r10, i32_g10, i32_b10);
+ i32_y10 = Y24(i32_r10, i32_g10, i32_b10);
+ pu8_yn[1]= (M4VIFI_UInt8)i32_y10;
+
+ i32_u01 = U24(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V24(i32_r01, i32_g01, i32_b01);
+ i32_y01 = Y24(i32_r01, i32_g01, i32_b01);
+ pu8_ys[0]= (M4VIFI_UInt8)i32_y01;
+
+ i32_u11 = U24(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V24(i32_r11, i32_g11, i32_b11);
+ i32_y11 = Y24(i32_r11, i32_g11, i32_b11);
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+ pu8_rgbn += (CST_RGB_24_SIZE<<1);
+ pu8_yn += 2;
+ pu8_ys += 2;
+
+ pu8_u ++;
+ pu8_v ++;
+ } /* end of horizontal scanning */
+
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+
+/** YUV420 to YUV420 */
+/**
+ *******************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_YUV420toYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Transform YUV420 image to a YUV420 image.
+ * @param pUserData: (IN) User Specific Data (Unused - could be NULL)
+ * @param pPlaneIn: (IN) Pointer to YUV plane buffer
+ * @param pPlaneOut: (OUT) Pointer to YUV Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in plane height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in plane width
+ *******************************************************************************************
+ */
+
+M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut )
+{
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+
+ for (plane_number = 0; plane_number < 3; plane_number++)
+ {
+ p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
+ p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
+ for (i = 0; i < PlaneOut[plane_number].u_height; i++)
+ {
+ memcpy((void *)p_buf_dest, (void *)p_buf_src ,PlaneOut[plane_number].u_width);
+ p_buf_src += PlaneIn[plane_number].u_stride;
+ p_buf_dest += PlaneOut[plane_number].u_stride;
+ }
+ }
+ return M4VIFI_OK;
+}
+
+/**
+ ***********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @author David Dana (PHILIPS Software)
+ * @brief Resizes YUV420 Planar plane.
+ * @note Basic structure of the function
+ * Loop on each row (step 2)
+ * Loop on each column (step 2)
+ * Get four Y samples and 1 U & V sample
+ * Resize the Y with corresponing U and V samples
+ * Place the YUV in the ouput plane
+ * end loop column
+ * end loop row
+ * For resizing bilinear interpolation linearly interpolates along
+ * each row, and then uses that result in a linear interpolation down each column.
+ * Each estimated pixel in the output image is a weighted
+ * combination of its four neighbours. The ratio of compression
+ * or dilatation is estimated using input and output sizes.
+ * @param pUserData: (IN) User Data
+ * @param pPlaneIn: (IN) Pointer to YUV420 (Planar) plane buffer
+ * @param pPlaneOut: (OUT) Pointer to YUV420 (Planar) plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ ***********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt8 *pu8_data_in, *pu8_data_out, *pu8dum;
+ M4VIFI_UInt32 u32_plane;
+ M4VIFI_UInt32 u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4VIFI_UInt32 u32_stride_in, u32_stride_out;
+ M4VIFI_UInt32 u32_x_inc, u32_y_inc;
+ M4VIFI_UInt32 u32_x_accum, u32_y_accum, u32_x_accum_start;
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_y_frac;
+ M4VIFI_UInt32 u32_x_frac;
+ M4VIFI_UInt32 u32_temp_value;
+ M4VIFI_UInt8 *pu8_src_top;
+ M4VIFI_UInt8 *pu8_src_bottom;
+
+ M4VIFI_UInt8 u8Wflag = 0;
+ M4VIFI_UInt8 u8Hflag = 0;
+ M4VIFI_UInt32 loop = 0;
+
+
+ /*
+ If input width is equal to output width and input height equal to
+ output height then M4VIFI_YUV420toYUV420 is called.
+ */
+ if ((pPlaneIn[0].u_height == pPlaneOut[0].u_height) &&
+ (pPlaneIn[0].u_width == pPlaneOut[0].u_width))
+ {
+ return M4VIFI_YUV420toYUV420(pUserData, pPlaneIn, pPlaneOut);
+ }
+
+ /* Check for the YUV width and height are even */
+ if ((IS_EVEN(pPlaneIn[0].u_height) == FALSE) ||
+ (IS_EVEN(pPlaneOut[0].u_height) == FALSE))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ if ((IS_EVEN(pPlaneIn[0].u_width) == FALSE) ||
+ (IS_EVEN(pPlaneOut[0].u_width) == FALSE))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Loop on planes */
+ for(u32_plane = 0;u32_plane < PLANES;u32_plane++)
+ {
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu8_data_in = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
+ pu8_data_out = pPlaneOut[u32_plane].pac_data + pPlaneOut[u32_plane].u_topleft;
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in = pPlaneIn[u32_plane].u_stride;
+ u32_stride_out = pPlaneOut[u32_plane].u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in = pPlaneIn[u32_plane].u_width;
+ u32_height_in = pPlaneIn[u32_plane].u_height;
+
+ u32_width_out = pPlaneOut[u32_plane].u_width;
+ u32_height_out = pPlaneOut[u32_plane].u_height;
+
+ /*
+ For the case , width_out = width_in , set the flag to avoid
+ accessing one column beyond the input width.In this case the last
+ column is replicated for processing
+ */
+ if (u32_width_out == u32_width_in) {
+ u32_width_out = u32_width_out-1;
+ u8Wflag = 1;
+ }
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in)
+ {
+ u32_x_inc = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
+ }
+ else
+ {
+ u32_x_inc = (u32_width_in * MAX_SHORT) / (u32_width_out);
+ }
+
+ /*
+ For the case , height_out = height_in , set the flag to avoid
+ accessing one row beyond the input height.In this case the last
+ row is replicated for processing
+ */
+ if (u32_height_out == u32_height_in) {
+ u32_height_out = u32_height_out-1;
+ u8Hflag = 1;
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in)
+ {
+ u32_y_inc = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
+ }
+ else
+ {
+ u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value
+ between 0 and 0.5
+ */
+ if (u32_y_inc >= MAX_SHORT)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded
+ on the 16 high bits and the fractional on the 15 low bits
+ */
+ u32_y_accum = u32_y_inc & 0xffff;
+
+ if (!u32_y_accum)
+ {
+ u32_y_accum = MAX_SHORT;
+ }
+
+ u32_y_accum >>= 1;
+ }
+ else
+ {
+ u32_y_accum = 0;
+ }
+
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value
+ between 0 and 0.5
+ */
+ if (u32_x_inc >= MAX_SHORT)
+ {
+ u32_x_accum_start = u32_x_inc & 0xffff;
+
+ if (!u32_x_accum_start)
+ {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ }
+ else
+ {
+ u32_x_accum_start = 0;
+ }
+
+ u32_height = u32_height_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and
+ then uses that result in a linear interpolation donw each column.
+ Each estimated pixel in the output image is a weighted combination
+ of its four neighbours according to the formula:
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+
+ f(p+&,q+1)R(1-a)R(b-1) with R(x) = / x+1 -1 =< x =< 0 \ 1-x
+ 0 =< x =< 1 and a (resp. b)weighting coefficient is the distance
+ from the nearest neighbor in the p (resp. q) direction
+ */
+
+ do { /* Scan all the row */
+
+ /* Vertical weight factor */
+ u32_y_frac = (u32_y_accum>>12)&15;
+
+ /* Reinit accumulator */
+ u32_x_accum = u32_x_accum_start;
+
+ u32_width = u32_width_out;
+
+ do { /* Scan along each row */
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+ pu8_src_bottom = pu8_src_top + u32_stride_in;
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += u32_x_inc;
+ } while(--u32_width);
+
+ /*
+ This u8Wflag flag gets in to effect if input and output
+ width is same, and height may be different. So previous
+ pixel is replicated here
+ */
+ if (u8Wflag) {
+ *pu8_data_out = (M4VIFI_UInt8)u32_temp_value;
+ }
+
+ pu8dum = (pu8_data_out-u32_width_out);
+ pu8_data_out = pu8_data_out + u32_stride_out - u32_width_out;
+
+ /* Update vertical accumulator */
+ u32_y_accum += u32_y_inc;
+ if (u32_y_accum>>16) {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * u32_stride_in;
+ u32_y_accum &= 0xffff;
+ }
+ } while(--u32_height);
+
+ /*
+ This u8Hflag flag gets in to effect if input and output height
+ is same, and width may be different. So previous pixel row is
+ replicated here
+ */
+ if (u8Hflag) {
+ for(loop =0; loop < (u32_width_out+u8Wflag); loop++) {
+ *pu8_data_out++ = (M4VIFI_UInt8)*pu8dum++;
+ }
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+M4OSA_ERR applyRenderingMode(M4VIFI_ImagePlane* pPlaneIn, M4VIFI_ImagePlane* pPlaneOut, M4xVSS_MediaRendering mediaRendering)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ if(mediaRendering == M4xVSS_kResizing)
+ {
+ /**
+ * Call the resize filter. From the intermediate frame to the encoder image plane */
+ err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, pPlaneIn, pPlaneOut);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("applyRenderingMode: M4ViFilResizeBilinearYUV420toYUV420 returns 0x%x!", err);
+ return err;
+ }
+ }
+ else
+ {
+ M4AIR_Params Params;
+ M4OSA_Context m_air_context;
+ M4VIFI_ImagePlane pImagePlanesTemp[3];
+ M4VIFI_ImagePlane* pPlaneTemp;
+ M4OSA_UInt8* pOutPlaneY = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ M4OSA_UInt8* pOutPlaneU = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ M4OSA_UInt8* pOutPlaneV = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+ M4OSA_UInt8* pInPlaneY = NULL;
+ M4OSA_UInt8* pInPlaneU = NULL;
+ M4OSA_UInt8* pInPlaneV = NULL;
+ M4OSA_UInt32 i;
+
+ /*to keep media aspect ratio*/
+ /*Initialize AIR Params*/
+ Params.m_inputCoord.m_x = 0;
+ Params.m_inputCoord.m_y = 0;
+ Params.m_inputSize.m_height = pPlaneIn->u_height;
+ Params.m_inputSize.m_width = pPlaneIn->u_width;
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_bOutputStripe = M4OSA_FALSE;
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+ /**
+ Media rendering: Black borders*/
+ if(mediaRendering == M4xVSS_kBlackBorders)
+ {
+ memset((void *)pPlaneOut[0].pac_data,Y_PLANE_BORDER_VALUE,(pPlaneOut[0].u_height*pPlaneOut[0].u_stride));
+ memset((void *)pPlaneOut[1].pac_data,U_PLANE_BORDER_VALUE,(pPlaneOut[1].u_height*pPlaneOut[1].u_stride));
+ memset((void *)pPlaneOut[2].pac_data,V_PLANE_BORDER_VALUE,(pPlaneOut[2].u_height*pPlaneOut[2].u_stride));
+
+ pImagePlanesTemp[0].u_width = pPlaneOut[0].u_width;
+ pImagePlanesTemp[0].u_height = pPlaneOut[0].u_height;
+ pImagePlanesTemp[0].u_stride = pPlaneOut[0].u_width;
+ pImagePlanesTemp[0].u_topleft = 0;
+ pImagePlanesTemp[0].pac_data = M4OSA_NULL;
+
+ pImagePlanesTemp[1].u_width = pPlaneOut[1].u_width;
+ pImagePlanesTemp[1].u_height = pPlaneOut[1].u_height;
+ pImagePlanesTemp[1].u_stride = pPlaneOut[1].u_width;
+ pImagePlanesTemp[1].u_topleft = 0;
+ pImagePlanesTemp[1].pac_data = M4OSA_NULL;
+
+ pImagePlanesTemp[2].u_width = pPlaneOut[2].u_width;
+ pImagePlanesTemp[2].u_height = pPlaneOut[2].u_height;
+ pImagePlanesTemp[2].u_stride = pPlaneOut[2].u_width;
+ pImagePlanesTemp[2].u_topleft = 0;
+ pImagePlanesTemp[2].pac_data = M4OSA_NULL;
+
+ /* Allocates plan in local image plane structure */
+ pImagePlanesTemp[0].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[0].u_width * pImagePlanesTemp[0].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferY") ;
+ if(pImagePlanesTemp[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[1].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[1].u_width * pImagePlanesTemp[1].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferU") ;
+ if(pImagePlanesTemp[1].pac_data == M4OSA_NULL)
+ {
+
+ M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[2].pac_data = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[2].u_width * pImagePlanesTemp[2].u_height, M4VS, (M4OSA_Char*)"applyRenderingMode: temporary plane bufferV") ;
+ if(pImagePlanesTemp[2].pac_data == M4OSA_NULL)
+ {
+
+ M4OSA_TRACE1_0("Error alloc in applyRenderingMode");
+ return M4ERR_ALLOC;
+ }
+
+ pInPlaneY = pImagePlanesTemp[0].pac_data ;
+ pInPlaneU = pImagePlanesTemp[1].pac_data ;
+ pInPlaneV = pImagePlanesTemp[2].pac_data ;
+
+ memset((void *)pImagePlanesTemp[0].pac_data,Y_PLANE_BORDER_VALUE,(pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
+ memset((void *)pImagePlanesTemp[1].pac_data,U_PLANE_BORDER_VALUE,(pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
+ memset((void *)pImagePlanesTemp[2].pac_data,V_PLANE_BORDER_VALUE,(pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
+
+ if((M4OSA_UInt32)((pPlaneIn->u_height * pPlaneOut->u_width) /pPlaneIn->u_width) <= pPlaneOut->u_height)//Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+ {
+ /*it is height so black borders will be on the top and on the bottom side*/
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ Params.m_outputSize.m_height = (M4OSA_UInt32)((pPlaneIn->u_height * pPlaneOut->u_width) /pPlaneIn->u_width);
+ /*number of lines at the top*/
+ pImagePlanesTemp[0].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_height-Params.m_outputSize.m_height)>>1))*pImagePlanesTemp[0].u_stride;
+ pImagePlanesTemp[0].u_height = Params.m_outputSize.m_height;
+ pImagePlanesTemp[1].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height-(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanesTemp[1].u_stride;
+ pImagePlanesTemp[1].u_height = Params.m_outputSize.m_height>>1;
+ pImagePlanesTemp[2].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height-(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanesTemp[2].u_stride;
+ pImagePlanesTemp[2].u_height = Params.m_outputSize.m_height>>1;
+ }
+ else
+ {
+ /*it is width so black borders will be on the left and right side*/
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_outputSize.m_width = (M4OSA_UInt32)((pPlaneIn->u_width * pPlaneOut->u_height) /pPlaneIn->u_height);
+
+ pImagePlanesTemp[0].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width-Params.m_outputSize.m_width)>>1));
+ pImagePlanesTemp[0].u_width = Params.m_outputSize.m_width;
+ pImagePlanesTemp[1].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width-(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[1].u_width = Params.m_outputSize.m_width>>1;
+ pImagePlanesTemp[2].u_topleft = (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width-(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[2].u_width = Params.m_outputSize.m_width>>1;
+ }
+
+ /*Width and height have to be even*/
+ Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+ Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ pImagePlanesTemp[0].u_width = (pImagePlanesTemp[0].u_width>>1)<<1;
+ pImagePlanesTemp[1].u_width = (pImagePlanesTemp[1].u_width>>1)<<1;
+ pImagePlanesTemp[2].u_width = (pImagePlanesTemp[2].u_width>>1)<<1;
+ pImagePlanesTemp[0].u_height = (pImagePlanesTemp[0].u_height>>1)<<1;
+ pImagePlanesTemp[1].u_height = (pImagePlanesTemp[1].u_height>>1)<<1;
+ pImagePlanesTemp[2].u_height = (pImagePlanesTemp[2].u_height>>1)<<1;
+
+ /*Check that values are coherent*/
+ if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+ {
+ Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+ }
+ else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+ {
+ Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+ }
+ pPlaneTemp = pImagePlanesTemp;
+
+
+ }
+
+ /**
+ Media rendering: Cropping*/
+ if(mediaRendering == M4xVSS_kCropping)
+ {
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ if((Params.m_outputSize.m_height * Params.m_inputSize.m_width) /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+ {
+ /*height will be cropped*/
+ Params.m_inputSize.m_height = (M4OSA_UInt32)((Params.m_outputSize.m_height * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)((pPlaneIn->u_height - Params.m_inputSize.m_height))>>1);
+ }
+ else
+ {
+ /*width will be cropped*/
+ Params.m_inputSize.m_width = (M4OSA_UInt32)((Params.m_outputSize.m_width * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)((pPlaneIn->u_width - Params.m_inputSize.m_width))>>1);
+ }
+ pPlaneTemp = pPlaneOut;
+ }
+
+ /**
+ * Call AIR functions */
+ err = M4AIR_create(&m_air_context, M4AIR_kYUV420P);
+ if(err != M4NO_ERROR)
+ {
+
+ M4OSA_TRACE1_1("applyRenderingMode: Error when initializing AIR: 0x%x", err);
+ for(i=0; i<3; i++)
+ {
+ if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+ {
+ free(pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ return err;
+ }
+
+
+ err = M4AIR_configure(m_air_context, &Params);
+ if(err != M4NO_ERROR)
+ {
+
+ M4OSA_TRACE1_1("applyRenderingMode: Error when configuring AIR: 0x%x", err);
+ M4AIR_cleanUp(m_air_context);
+ for(i=0; i<3; i++)
+ {
+ if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+ {
+ free(pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ return err;
+ }
+
+ err = M4AIR_get(m_air_context, pPlaneIn, pPlaneTemp);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("applyRenderingMode: Error when getting AIR plane: 0x%x", err);
+ M4AIR_cleanUp(m_air_context);
+ for(i=0; i<3; i++)
+ {
+ if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+ {
+ free(pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ return err;
+ }
+
+ if(mediaRendering == M4xVSS_kBlackBorders)
+ {
+ for(i=0; i<pPlaneOut[0].u_height; i++)
+ {
+ memcpy((void *)pOutPlaneY, (void *)pInPlaneY, pPlaneOut[0].u_width);
+ pInPlaneY += pPlaneOut[0].u_width;
+ pOutPlaneY += pPlaneOut[0].u_stride;
+ }
+ for(i=0; i<pPlaneOut[1].u_height; i++)
+ {
+ memcpy((void *)pOutPlaneU, (void *)pInPlaneU, pPlaneOut[1].u_width);
+ pInPlaneU += pPlaneOut[1].u_width;
+ pOutPlaneU += pPlaneOut[1].u_stride;
+ }
+ for(i=0; i<pPlaneOut[2].u_height; i++)
+ {
+ memcpy((void *)pOutPlaneV, (void *)pInPlaneV, pPlaneOut[2].u_width);
+ pInPlaneV += pPlaneOut[2].u_width;
+ pOutPlaneV += pPlaneOut[2].u_stride;
+ }
+
+ for(i=0; i<3; i++)
+ {
+ if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+ {
+ free(pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ }
+
+ if (m_air_context != M4OSA_NULL) {
+ M4AIR_cleanUp(m_air_context);
+ m_air_context = M4OSA_NULL;
+ }
+ }
+
+ return err;
+}
+
+//TODO: remove this code after link with videoartist lib
+/* M4AIR code*/
+#define M4AIR_YUV420_FORMAT_SUPPORTED
+#define M4AIR_YUV420A_FORMAT_SUPPORTED
+
+/************************* COMPILATION CHECKS ***************************/
+#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
+#ifndef M4AIR_JPG_FORMAT_SUPPORTED
+
+#error "Please define at least one input format for the AIR component"
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum M4AIR_States
+ * @brief The following enumeration defines the internal states of the AIR.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4AIR_kCreated, /**< State after M4AIR_create has been called */
+ M4AIR_kConfigured /**< State after M4AIR_configure has been called */
+}M4AIR_States;
+
+
+/**
+ ******************************************************************************
+ * struct M4AIR_InternalContext
+ * @brief The following structure is the internal context of the AIR.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4AIR_States m_state; /**< Internal state */
+ M4AIR_InputFormatType m_inputFormat; /**< Input format like YUV420Planar, RGB565, JPG, etc ... */
+ M4AIR_Params m_params; /**< Current input Parameter of the processing */
+ M4OSA_UInt32 u32_x_inc[4]; /**< ratio between input and ouput width for YUV */
+ M4OSA_UInt32 u32_y_inc[4]; /**< ratio between input and ouput height for YUV */
+ M4OSA_UInt32 u32_x_accum_start[4]; /**< horizontal initial accumulator value */
+ M4OSA_UInt32 u32_y_accum_start[4]; /**< Vertical initial accumulator value */
+ M4OSA_UInt32 u32_x_accum[4]; /**< save of horizontal accumulator value */
+ M4OSA_UInt32 u32_y_accum[4]; /**< save of vertical accumulator value */
+ M4OSA_UInt8* pu8_data_in[4]; /**< Save of input plane pointers in case of stripe mode */
+ M4OSA_UInt32 m_procRows; /**< Number of processed rows, used in stripe mode only */
+ M4OSA_Bool m_bOnlyCopy; /**< Flag to know if we just perform a copy or a bilinear interpolation */
+ M4OSA_Bool m_bFlipX; /**< Depend on output orientation, used during processing to revert processing order in X coordinates */
+ M4OSA_Bool m_bFlipY; /**< Depend on output orientation, used during processing to revert processing order in Y coordinates */
+ M4OSA_Bool m_bRevertXY; /**< Depend on output orientation, used during processing to revert X and Y processing order (+-90° rotation) */
+}M4AIR_InternalContext;
+
+/********************************* MACROS *******************************/
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+
+/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+ * @author Arnaud Collard
+ * @brief This function initialize an instance of the AIR.
+ * @param pContext: (IN/OUT) Address of the context to create
+ * @param inputFormat: (IN) input format type.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+{
+ M4OSA_ERR err = M4NO_ERROR ;
+ M4AIR_InternalContext* pC = M4OSA_NULL ;
+ /* Check that the address on the context is not NULL */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ *pContext = M4OSA_NULL ;
+
+ /* Internal Context creation */
+ pC = (M4AIR_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AIR_InternalContext), M4AIR, (M4OSA_Char*)"AIR internal context") ;
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
+
+
+ /* Check if the input format is supported */
+ switch(inputFormat)
+ {
+#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
+ case M4AIR_kYUV420P:
+ break ;
+#endif
+#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
+ case M4AIR_kYUV420AP:
+ break ;
+#endif
+ default:
+ err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
+ goto M4AIR_create_cleanup ;
+ }
+
+ /**< Save input format and update state */
+ pC->m_inputFormat = inputFormat;
+ pC->m_state = M4AIR_kCreated;
+
+ /* Return the context to the caller */
+ *pContext = pC ;
+
+ return M4NO_ERROR ;
+
+M4AIR_create_cleanup:
+ /* Error management : we destroy the context if needed */
+ if(M4OSA_NULL != pC)
+ {
+ free(pC) ;
+ }
+
+ *pContext = M4OSA_NULL ;
+
+ return err ;
+}
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @author Arnaud Collard
+ * @brief This function destroys an instance of the AIR component
+ * @param pContext: (IN) Context identifying the instance to destroy
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_STATE: Internal state is incompatible with this function call.
+******************************************************************************
+*/
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ /**< Check state */
+ if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+ {
+ return M4ERR_STATE;
+ }
+ free(pC) ;
+
+ return M4NO_ERROR ;
+
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief This function will configure the AIR.
+ * @note It will set the input and output coordinates and sizes,
+ * and indicates if we will proceed in stripe or not.
+ * In case a M4AIR_get in stripe mode was on going, it will cancel this previous processing
+ * and reset the get process.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param pParams->m_inputCoord: (IN) X,Y coordinates of the first valid pixel in input.
+ * @param pParams->m_inputSize: (IN) input ROI size.
+ * @param pParams->m_outputSize: (IN) output size.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+ M4OSA_UInt32 i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4OSA_UInt32 nb_planes;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ if(M4AIR_kYUV420AP == pC->m_inputFormat)
+ {
+ nb_planes = 4;
+ }
+ else
+ {
+ nb_planes = 3;
+ }
+
+ /**< Check state */
+ if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+ {
+ return M4ERR_STATE;
+ }
+
+ /** Save parameters */
+ pC->m_params = *pParams;
+
+ /* Check for the input&output width and height are even */
+ if( ((pC->m_params.m_inputSize.m_height)&0x1) ||
+ ((pC->m_params.m_inputSize.m_height)&0x1))
+ {
+ return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+ }
+
+ if( ((pC->m_params.m_inputSize.m_width)&0x1) ||
+ ((pC->m_params.m_inputSize.m_width)&0x1))
+ {
+ return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+ }
+ if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
+ &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
+ {
+ /**< No resize in this case, we will just copy input in output */
+ pC->m_bOnlyCopy = M4OSA_TRUE;
+ }
+ else
+ {
+ pC->m_bOnlyCopy = M4OSA_FALSE;
+
+ /**< Initialize internal variables used for resize filter */
+ for(i=0;i<nb_planes;i++)
+ {
+
+ u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:(pC->m_params.m_inputSize.m_width+1)>>1;
+ u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:(pC->m_params.m_inputSize.m_height+1)>>1;
+ u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:(pC->m_params.m_outputSize.m_width+1)>>1;
+ u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:(pC->m_params.m_outputSize.m_height+1)>>1;
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in)
+ {
+ pC->u32_x_inc[i] = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
+ }
+ else
+ {
+ pC->u32_x_inc[i] = (u32_width_in * 0x10000) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in)
+ {
+ pC->u32_y_inc[i] = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
+ }
+ else
+ {
+ pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (pC->u32_y_inc[i] >= 0x10000)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded
+ on the 16 high bits and the fractionnal on the 15 low bits
+ */
+ pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
+
+ if (!pC->u32_y_accum_start[i])
+ {
+ pC->u32_y_accum_start[i] = 0x10000;
+ }
+
+ pC->u32_y_accum_start[i] >>= 1;
+ }
+ else
+ {
+ pC->u32_y_accum_start[i] = 0;
+ }
+ /**< Take into account that Y coordinate can be odd
+ in this case we have to put a 0.5 offset
+ for U and V plane as there a 2 times sub-sampled vs Y*/
+ if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
+ {
+ pC->u32_y_accum_start[i] += 0x8000;
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+
+ if (pC->u32_x_inc[i] >= 0x10000)
+ {
+ pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
+
+ if (!pC->u32_x_accum_start[i])
+ {
+ pC->u32_x_accum_start[i] = 0x10000;
+ }
+
+ pC->u32_x_accum_start[i] >>= 1;
+ }
+ else
+ {
+ pC->u32_x_accum_start[i] = 0;
+ }
+ /**< Take into account that X coordinate can be odd
+ in this case we have to put a 0.5 offset
+ for U and V plane as there a 2 times sub-sampled vs Y*/
+ if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
+ {
+ pC->u32_x_accum_start[i] += 0x8000;
+ }
+ }
+ }
+
+ /**< Reset variable used for stripe mode */
+ pC->m_procRows = 0;
+
+ /**< Initialize var for X/Y processing order according to orientation */
+ pC->m_bFlipX = M4OSA_FALSE;
+ pC->m_bFlipY = M4OSA_FALSE;
+ pC->m_bRevertXY = M4OSA_FALSE;
+ switch(pParams->m_outputOrientation)
+ {
+ case M4COMMON_kOrientationTopLeft:
+ break;
+ case M4COMMON_kOrientationTopRight:
+ pC->m_bFlipX = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationBottomRight:
+ pC->m_bFlipX = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationBottomLeft:
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationLeftTop:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationRightTop:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationRightBottom:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipX = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationLeftBottom:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipX = M4OSA_TRUE;
+ break;
+ default:
+ return M4ERR_PARAMETER;
+ }
+ /**< Update state */
+ pC->m_state = M4AIR_kConfigured;
+
+ return M4NO_ERROR ;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief This function will provide the requested resized area of interest according to settings
+ * provided in M4AIR_configure.
+ * @note In case the input format type is JPEG, input plane(s)
+ * in pIn is not used. In normal mode, dimension specified in output plane(s) structure must be the
+ * same than the one specified in M4AIR_configure. In stripe mode, only the width will be the same,
+ * height will be taken as the stripe height (typically 16).
+ * In normal mode, this function is call once to get the full output picture. In stripe mode, it is called
+ * for each stripe till the whole picture has been retrieved,and the position of the output stripe in the output picture
+ * is internally incremented at each step.
+ * Any call to M4AIR_configure during stripe process will reset this one to the beginning of the output picture.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pIn: (IN) Plane structure containing input Plane(s).
+ * @param pOut: (IN/OUT) Plane structure containing output Plane(s).
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+ M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
+ M4OSA_UInt8 *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
+ M4OSA_UInt8 *pu8_src_top;
+ M4OSA_UInt8 *pu8_src_bottom;
+ M4OSA_UInt32 u32_temp_value;
+ M4OSA_Int32 i32_tmp_offset;
+ M4OSA_UInt32 nb_planes;
+
+
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ /**< Check state */
+ if(M4AIR_kConfigured != pC->m_state)
+ {
+ return M4ERR_STATE;
+ }
+
+ if(M4AIR_kYUV420AP == pC->m_inputFormat)
+ {
+ nb_planes = 4;
+ }
+ else
+ {
+ nb_planes = 3;
+ }
+
+ /**< Loop on each Plane */
+ for(i=0;i<nb_planes;i++)
+ {
+
+ /* Set the working pointers at the beginning of the input/output data field */
+
+ u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
+
+ if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
+ {
+ /**< For input, take care about ROI */
+ pu8_data_in = pIn[i].pac_data + pIn[i].u_topleft + (pC->m_params.m_inputCoord.m_x>>u32_shift)
+ + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
+
+ /** Go at end of line/column in case X/Y scanning is flipped */
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+ pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
+ }
+ if(M4OSA_TRUE == pC->m_bFlipY)
+ {
+ pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1) * pIn[i].u_stride;
+ }
+
+ /**< Initialize accumulators in case we are using it (bilinear interpolation) */
+ if( M4OSA_FALSE == pC->m_bOnlyCopy)
+ {
+ pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
+ pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
+ }
+
+ }
+ else
+ {
+ /**< In case of stripe mode for other than first stripe, we need to recover input pointer from internal context */
+ pu8_data_in = pC->pu8_data_in[i];
+ }
+
+ /**< In every mode, output data are at the beginning of the output plane */
+ pu8_data_out = pOut[i].pac_data + pOut[i].u_topleft;
+
+ /**< Initialize input offset applied after each pixel */
+ if(M4OSA_FALSE == pC->m_bFlipY)
+ {
+ i32_tmp_offset = pIn[i].u_stride;
+ }
+ else
+ {
+ i32_tmp_offset = -pIn[i].u_stride;
+ }
+
+ /**< In this case, no bilinear interpolation is needed as input and output dimensions are the same */
+ if( M4OSA_TRUE == pC->m_bOnlyCopy)
+ {
+ /**< No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+ /**< No flip on X abscissa */
+ if(M4OSA_FALSE == pC->m_bFlipX)
+ {
+ M4OSA_UInt32 loc_height = pOut[i].u_height;
+ M4OSA_UInt32 loc_width = pOut[i].u_width;
+ M4OSA_UInt32 loc_stride = pIn[i].u_stride;
+ /**< Loop on each row */
+ for (j=0; j<loc_height; j++)
+ {
+ /**< Copy one whole line */
+ memcpy((void *)pu8_data_out, (void *)pu8_data_in, loc_width);
+
+ /**< Update pointers */
+ pu8_data_out += pOut[i].u_stride;
+ if(M4OSA_FALSE == pC->m_bFlipY)
+ {
+ pu8_data_in += loc_stride;
+ }
+ else
+ {
+ pu8_data_in -= loc_stride;
+ }
+ }
+ }
+ else
+ {
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /**< Loop on each pixel of 1 row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ *pu8_data_out++ = *pu8_data_in--;
+ }
+
+ /**< Update pointers */
+ pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+
+ pu8_data_in += pOut[i].u_width + i32_tmp_offset;
+
+ }
+ }
+ }
+ /**< Here we have a +-90° rotation */
+ else
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ pu8_data_in_tmp = pu8_data_in;
+
+ /**< Loop on each pixel of 1 row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ *pu8_data_out++ = *pu8_data_in_tmp;
+
+ /**< Update input pointer in order to go to next/past line */
+ pu8_data_in_tmp += i32_tmp_offset;
+ }
+
+ /**< Update pointers */
+ pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+ if(M4OSA_FALSE == pC->m_bFlipX)
+ {
+ pu8_data_in ++;
+ }
+ else
+ {
+ pu8_data_in --;
+ }
+ }
+ }
+ }
+ /**< Bilinear interpolation */
+ else
+ {
+
+ if(3 != i) /**< other than alpha plane */
+ {
+ /**No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* Vertical weight factor */
+ u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+ /* Reinit horizontal weight factor */
+ u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
+
+ pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+ }
+
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
+
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+
+ }
+
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update vertical accumulator */
+ pC->u32_y_accum[i] += pC->u32_y_inc[i];
+ if (pC->u32_y_accum[i]>>16)
+ {
+ pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+ pC->u32_y_accum[i] &= 0xffff;
+ }
+ }
+ }
+ /** +-90° rotation */
+ else
+ {
+ pu8_data_in_org = pu8_data_in;
+
+ /**< Loop on each output row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* horizontal weight factor */
+ u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+ /* Reinit accumulator */
+ u32_y_accum = pC->u32_y_accum_start[i];
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+ pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+
+ }
+ }
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+ pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+ }
+ }
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update horizontal accumulator */
+ pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+ pu8_data_in = pu8_data_in_org;
+ }
+
+ }
+ }/** 3 != i */
+ else
+ {
+ /**No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* Vertical weight factor */
+ u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+ /* Reinit horizontal weight factor */
+ u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
+
+ pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+ }
+
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal weight factor */
+
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+
+ }
+
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update vertical accumulator */
+ pC->u32_y_accum[i] += pC->u32_y_inc[i];
+ if (pC->u32_y_accum[i]>>16)
+ {
+ pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+ pC->u32_y_accum[i] &= 0xffff;
+ }
+ }
+
+ } /**< M4OSA_FALSE == pC->m_bRevertXY */
+ /** +-90° rotation */
+ else
+ {
+ pu8_data_in_org = pu8_data_in;
+
+ /**< Loop on each output row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* horizontal weight factor */
+ u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+ /* Reinit accumulator */
+ u32_y_accum = pC->u32_y_accum_start[i];
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+ pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+
+ }
+ }
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+ pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+ }
+ }
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update horizontal accumulator */
+ pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+ pu8_data_in = pu8_data_in_org;
+
+ }
+ } /**< M4OSA_TRUE == pC->m_bRevertXY */
+ }/** 3 == i */
+ }
+ /**< In case of stripe mode, save current input pointer */
+ if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
+ {
+ pC->pu8_data_in[i] = pu8_data_in;
+ }
+ }
+
+ /**< Update number of processed rows, reset it if we have finished with the whole processing */
+ pC->m_procRows += pOut[0].u_height;
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+ if(pC->m_params.m_outputSize.m_height <= pC->m_procRows) pC->m_procRows = 0;
+ }
+ else
+ {
+ if(pC->m_params.m_outputSize.m_width <= pC->m_procRows) pC->m_procRows = 0;
+ }
+
+ return M4NO_ERROR ;
+
+}
+/*+ Handle the image files here */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR LvGetImageThumbNail(M4OSA_UChar *fileName, M4OSA_Void **pBuffer)
+ * @brief This function gives YUV420 buffer of a given image file (in argb888 format)
+ * @Note: The caller of the function is responsible to free the yuv buffer allocated
+ * @param fileName: (IN) Path to the filename of the image argb data
+ * @param height: (IN) Height of the image
+ * @param width: (OUT) pBuffer pointer to the address where the yuv data address needs to be returned.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_FILE_NOT_FOUND: if the file passed does not exists.
+ ******************************************************************************
+*/
+M4OSA_ERR LvGetImageThumbNail(const char *fileName, M4OSA_UInt32 height, M4OSA_UInt32 width, M4OSA_Void **pBuffer) {
+
+ M4VIFI_ImagePlane rgbPlane, *yuvPlane;
+ M4OSA_UInt32 frameSize_argb = (width * height * 4); // argb data
+ M4OSA_Context lImageFileFp = M4OSA_NULL;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb, M4VS, (M4OSA_Char*)"Image argb data");
+ if(pTmpData == M4OSA_NULL) {
+ ALOGE("Failed to allocate memory for Image clip");
+ return M4ERR_ALLOC;
+ }
+
+ /** Read the argb data from the passed file. */
+ M4OSA_ERR lerr = M4OSA_fileReadOpen(&lImageFileFp, (M4OSA_Void *) fileName, M4OSA_kFileRead);
+
+ if((lerr != M4NO_ERROR) || (lImageFileFp == M4OSA_NULL))
+ {
+ ALOGE("LVPreviewController: Can not open the file ");
+ free(pTmpData);
+ return M4ERR_FILE_NOT_FOUND;
+ }
+ lerr = M4OSA_fileReadData(lImageFileFp, (M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+ if(lerr != M4NO_ERROR)
+ {
+ ALOGE("LVPreviewController: can not read the data ");
+ M4OSA_fileReadClose(lImageFileFp);
+ free(pTmpData);
+ return lerr;
+ }
+ M4OSA_fileReadClose(lImageFileFp);
+
+ M4OSA_UInt32 frameSize = (width * height * 3); //Size of YUV420 data.
+ rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS, (M4OSA_Char*)"Image clip RGB888 data");
+ if(rgbPlane.pac_data == M4OSA_NULL)
+ {
+ ALOGE("Failed to allocate memory for Image clip");
+ free(pTmpData);
+ return M4ERR_ALLOC;
+ }
+
+ /** Remove the alpha channel */
+ for (M4OSA_UInt32 i=0, j = 0; i < frameSize_argb; i++) {
+ if ((i % 4) == 0) continue;
+ rgbPlane.pac_data[j] = pTmpData[i];
+ j++;
+ }
+ free(pTmpData);
+
+#ifdef FILE_DUMP
+ FILE *fp = fopen("/sdcard/Input/test_rgb.raw", "wb");
+ if(fp == NULL)
+ ALOGE("Errors file can not be created");
+ else {
+ fwrite(rgbPlane.pac_data, frameSize, 1, fp);
+ fclose(fp);
+ }
+#endif
+ rgbPlane.u_height = height;
+ rgbPlane.u_width = width;
+ rgbPlane.u_stride = width*3;
+ rgbPlane.u_topleft = 0;
+
+ yuvPlane = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
+ M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+ yuvPlane[0].u_height = height;
+ yuvPlane[0].u_width = width;
+ yuvPlane[0].u_stride = width;
+ yuvPlane[0].u_topleft = 0;
+ yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(yuvPlane[0].u_height * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
+
+ yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
+ yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
+ yuvPlane[1].u_stride = yuvPlane[1].u_width;
+ yuvPlane[1].u_topleft = 0;
+ yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height * yuvPlane[0].u_width);
+
+ yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
+ yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
+ yuvPlane[2].u_stride = yuvPlane[2].u_width;
+ yuvPlane[2].u_topleft = 0;
+ yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height * yuvPlane[1].u_width);
+
+
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane, yuvPlane);
+ //err = M4VIFI_BGR888toYUV420(M4OSA_NULL, &rgbPlane, yuvPlane);
+ if(err != M4NO_ERROR)
+ {
+ ALOGE("error when converting from RGB to YUV: 0x%x\n", (unsigned int)err);
+ }
+ free(rgbPlane.pac_data);
+
+ //ALOGE("RGB to YUV done");
+#ifdef FILE_DUMP
+ FILE *fp1 = fopen("/sdcard/Input/test_yuv.raw", "wb");
+ if(fp1 == NULL)
+ ALOGE("Errors file can not be created");
+ else {
+ fwrite(yuvPlane[0].pac_data, yuvPlane[0].u_height * yuvPlane[0].u_width * 1.5, 1, fp1);
+ fclose(fp1);
+ }
+#endif
+ *pBuffer = yuvPlane[0].pac_data;
+ free(yuvPlane);
+ return M4NO_ERROR;
+
+}
+M4OSA_Void prepareYUV420ImagePlane(M4VIFI_ImagePlane *plane,
+ M4OSA_UInt32 width, M4OSA_UInt32 height, M4VIFI_UInt8 *buffer,
+ M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight) {
+
+ //Y plane
+ plane[0].u_width = width;
+ plane[0].u_height = height;
+ plane[0].u_stride = reportedWidth;
+ plane[0].u_topleft = 0;
+ plane[0].pac_data = buffer;
+
+ // U plane
+ plane[1].u_width = width/2;
+ plane[1].u_height = height/2;
+ plane[1].u_stride = reportedWidth >> 1;
+ plane[1].u_topleft = 0;
+ plane[1].pac_data = buffer+(reportedWidth*reportedHeight);
+
+ // V Plane
+ plane[2].u_width = width/2;
+ plane[2].u_height = height/2;
+ plane[2].u_stride = reportedWidth >> 1;
+ plane[2].u_topleft = 0;
+ plane[2].pac_data = plane[1].pac_data + ((reportedWidth/2)*(reportedHeight/2));
+}
+
+M4OSA_Void prepareYV12ImagePlane(M4VIFI_ImagePlane *plane,
+ M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 stride,
+ M4VIFI_UInt8 *buffer) {
+
+ //Y plane
+ plane[0].u_width = width;
+ plane[0].u_height = height;
+ plane[0].u_stride = stride;
+ plane[0].u_topleft = 0;
+ plane[0].pac_data = buffer;
+
+ // U plane
+ plane[1].u_width = width/2;
+ plane[1].u_height = height/2;
+ plane[1].u_stride = android::PreviewRenderer::ALIGN(plane[0].u_stride/2, 16);
+ plane[1].u_topleft = 0;
+ plane[1].pac_data = (buffer
+ + plane[0].u_height * plane[0].u_stride
+ + (plane[0].u_height/2) * android::PreviewRenderer::ALIGN((
+ plane[0].u_stride / 2), 16));
+
+ // V Plane
+ plane[2].u_width = width/2;
+ plane[2].u_height = height/2;
+ plane[2].u_stride = android::PreviewRenderer::ALIGN(plane[0].u_stride/2, 16);
+ plane[2].u_topleft = 0;
+ plane[2].pac_data = (buffer +
+ plane[0].u_height * android::PreviewRenderer::ALIGN(plane[0].u_stride, 16));
+
+
+}
+
+M4OSA_Void swapImagePlanes(
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2) {
+
+ planeIn[0].u_height = planeOut[0].u_height;
+ planeIn[0].u_width = planeOut[0].u_width;
+ planeIn[0].u_stride = planeOut[0].u_stride;
+ planeIn[0].u_topleft = planeOut[0].u_topleft;
+ planeIn[0].pac_data = planeOut[0].pac_data;
+
+ /**
+ * U plane */
+ planeIn[1].u_width = planeOut[1].u_width;
+ planeIn[1].u_height = planeOut[1].u_height;
+ planeIn[1].u_stride = planeOut[1].u_stride;
+ planeIn[1].u_topleft = planeOut[1].u_topleft;
+ planeIn[1].pac_data = planeOut[1].pac_data;
+ /**
+ * V Plane */
+ planeIn[2].u_width = planeOut[2].u_width;
+ planeIn[2].u_height = planeOut[2].u_height;
+ planeIn[2].u_stride = planeOut[2].u_stride;
+ planeIn[2].u_topleft = planeOut[2].u_topleft;
+ planeIn[2].pac_data = planeOut[2].pac_data;
+
+ if(planeOut[0].pac_data == (M4VIFI_UInt8*)buffer1)
+ {
+ planeOut[0].pac_data = (M4VIFI_UInt8*)buffer2;
+ planeOut[1].pac_data = (M4VIFI_UInt8*)(buffer2 +
+ planeOut[0].u_width*planeOut[0].u_height);
+
+ planeOut[2].pac_data = (M4VIFI_UInt8*)(buffer2 +
+ planeOut[0].u_width*planeOut[0].u_height +
+ planeOut[1].u_width*planeOut[1].u_height);
+ }
+ else
+ {
+ planeOut[0].pac_data = (M4VIFI_UInt8*)buffer1;
+ planeOut[1].pac_data = (M4VIFI_UInt8*)(buffer1 +
+ planeOut[0].u_width*planeOut[0].u_height);
+
+ planeOut[2].pac_data = (M4VIFI_UInt8*)(buffer1 +
+ planeOut[0].u_width*planeOut[0].u_height +
+ planeOut[1].u_width*planeOut[1].u_height);
+ }
+
+}
+
+M4OSA_Void computePercentageDone(
+ M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
+ M4OSA_UInt32 effectDuration, M4OSA_Double *percentageDone) {
+
+ M4OSA_Double videoEffectTime =0;
+
+ // Compute how far from the beginning of the effect we are, in clip-base time.
+ videoEffectTime =
+ (M4OSA_Int32)(ctsMs+ 0.5) - effectStartTimeMs;
+
+ // To calculate %, substract timeIncrement
+ // because effect should finish on the last frame
+ // which is from CTS = (eof-timeIncrement) till CTS = eof
+ *percentageDone =
+ videoEffectTime / ((M4OSA_Float)effectDuration);
+
+ if(*percentageDone < 0.0) *percentageDone = 0.0;
+ if(*percentageDone > 1.0) *percentageDone = 1.0;
+
+}
+
+
+M4OSA_Void computeProgressForVideoEffect(
+ M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
+ M4OSA_UInt32 effectDuration, M4VSS3GPP_ExternalProgress* extProgress) {
+
+ M4OSA_Double percentageDone =0;
+
+ computePercentageDone(ctsMs, effectStartTimeMs, effectDuration, &percentageDone);
+
+ extProgress->uiProgress = (M4OSA_UInt32)( percentageDone * 1000 );
+ extProgress->uiOutputTime = (M4OSA_UInt32)(ctsMs + 0.5);
+ extProgress->uiClipTime = extProgress->uiOutputTime;
+ extProgress->bIsLast = M4OSA_FALSE;
+}
+
+M4OSA_ERR prepareFramingStructure(
+ M4xVSS_FramingStruct* framingCtx,
+ M4VSS3GPP_EffectSettings* effectsSettings, M4OSA_UInt32 index,
+ M4VIFI_UInt8* overlayRGB, M4VIFI_UInt8* overlayYUV) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Force input RGB buffer to even size to avoid errors in YUV conversion
+ framingCtx->FramingRgb = effectsSettings[index].xVSS.pFramingBuffer;
+ framingCtx->FramingRgb->u_width = framingCtx->FramingRgb->u_width & ~1;
+ framingCtx->FramingRgb->u_height = framingCtx->FramingRgb->u_height & ~1;
+ framingCtx->FramingYuv = NULL;
+
+ framingCtx->duration = effectsSettings[index].uiDuration;
+ framingCtx->topleft_x = effectsSettings[index].xVSS.topleft_x;
+ framingCtx->topleft_y = effectsSettings[index].xVSS.topleft_y;
+ framingCtx->pCurrent = framingCtx;
+ framingCtx->pNext = framingCtx;
+ framingCtx->previousClipTime = -1;
+
+ framingCtx->alphaBlendingStruct =
+ (M4xVSS_internalEffectsAlphaBlending*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_internalEffectsAlphaBlending), M4VS,
+ (M4OSA_Char*)"alpha blending struct");
+
+ framingCtx->alphaBlendingStruct->m_fadeInTime =
+ effectsSettings[index].xVSS.uialphaBlendingFadeInTime;
+
+ framingCtx->alphaBlendingStruct->m_fadeOutTime =
+ effectsSettings[index].xVSS.uialphaBlendingFadeOutTime;
+
+ framingCtx->alphaBlendingStruct->m_end =
+ effectsSettings[index].xVSS.uialphaBlendingEnd;
+
+ framingCtx->alphaBlendingStruct->m_middle =
+ effectsSettings[index].xVSS.uialphaBlendingMiddle;
+
+ framingCtx->alphaBlendingStruct->m_start =
+ effectsSettings[index].xVSS.uialphaBlendingStart;
+
+ // If new Overlay buffer, convert from RGB to YUV
+ if((overlayRGB != framingCtx->FramingRgb->pac_data) || (overlayYUV == NULL) ) {
+
+ // If YUV buffer exists, delete it
+ if(overlayYUV != NULL) {
+ free(overlayYUV);
+ overlayYUV = NULL;
+ }
+ if(effectsSettings[index].xVSS.rgbType == M4VSS3GPP_kRGB565) {
+ // Input RGB565 plane is provided,
+ // let's convert it to YUV420, and update framing structure
+ err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+ }
+ else if(effectsSettings[index].xVSS.rgbType == M4VSS3GPP_kRGB888) {
+ // Input RGB888 plane is provided,
+ // let's convert it to YUV420, and update framing structure
+ err = M4xVSS_internalConvertRGB888toYUV(framingCtx);
+ }
+ else {
+ err = M4ERR_PARAMETER;
+ }
+ overlayYUV = framingCtx->FramingYuv[0].pac_data;
+ overlayRGB = framingCtx->FramingRgb->pac_data;
+
+ }
+ else {
+ ALOGV(" YUV buffer reuse");
+ framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
+ 3*sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char*)"YUV");
+
+ if(framingCtx->FramingYuv == M4OSA_NULL) {
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+ framingCtx->FramingYuv[0].u_topleft = 0;
+ framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)overlayYUV;
+
+ framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[1].u_topleft = 0;
+ framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data +
+ framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+
+ framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[2].u_topleft = 0;
+ framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data +
+ framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+ framingCtx->duration = 0;
+ framingCtx->previousClipTime = -1;
+ framingCtx->previewOffsetClipTime = -1;
+
+ }
+ return err;
+}
+
+M4OSA_ERR applyColorEffect(M4xVSS_VideoEffectType colorEffect,
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_UInt16 rgbColorData) {
+
+ M4xVSS_ColorStruct colorContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ colorContext.colorEffectType = colorEffect;
+ colorContext.rgb16ColorData = rgbColorData;
+
+ err = M4VSS3GPP_externalVideoEffectColor(
+ (M4OSA_Void *)&colorContext, planeIn, planeOut, NULL,
+ colorEffect);
+
+ if(err != M4NO_ERROR) {
+ ALOGV("M4VSS3GPP_externalVideoEffectColor(%d) error %d",
+ colorEffect, err);
+
+ if(NULL != buffer1) {
+ free(buffer1);
+ buffer1 = NULL;
+ }
+ if(NULL != buffer2) {
+ free(buffer2);
+ buffer2 = NULL;
+ }
+ return err;
+ }
+
+ // The out plane now becomes the in plane for adding other effects
+ swapImagePlanes(planeIn, planeOut, buffer1, buffer2);
+
+ return err;
+}
+
+M4OSA_ERR applyLumaEffect(M4VSS3GPP_VideoEffectType videoEffect,
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_Int32 lum_factor) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ err = M4VFL_modifyLumaWithScale(
+ (M4ViComImagePlane*)planeIn,(M4ViComImagePlane*)planeOut,
+ lum_factor, NULL);
+
+ if(err != M4NO_ERROR) {
+ ALOGE("M4VFL_modifyLumaWithScale(%d) error %d", videoEffect, (int)err);
+
+ if(NULL != buffer1) {
+ free(buffer1);
+ buffer1= NULL;
+ }
+ if(NULL != buffer2) {
+ free(buffer2);
+ buffer2= NULL;
+ }
+ return err;
+ }
+
+ // The out plane now becomes the in plane for adding other effects
+ swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)buffer1,
+ (M4VIFI_UInt8 *)buffer2);
+
+ return err;
+}
+
+M4OSA_ERR applyEffectsAndRenderingMode(vePostProcessParams *params,
+ M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIFI_ImagePlane planeIn[3], planeOut[3];
+ M4VIFI_UInt8 *finalOutputBuffer = NULL, *tempOutputBuffer= NULL;
+ M4OSA_Double percentageDone =0;
+ M4OSA_Int32 lum_factor;
+ M4VSS3GPP_ExternalProgress extProgress;
+ M4xVSS_FiftiesStruct fiftiesCtx;
+ M4OSA_UInt32 frameSize = 0, i=0;
+
+ frameSize = (params->videoWidth*params->videoHeight*3) >> 1;
+
+ finalOutputBuffer = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
+ (M4OSA_Char*)("lvpp finalOutputBuffer"));
+
+ if(finalOutputBuffer == NULL) {
+ ALOGE("applyEffectsAndRenderingMode: malloc error");
+ return M4ERR_ALLOC;
+ }
+
+ // allocate the tempOutputBuffer
+ tempOutputBuffer = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
+ ((params->videoHeight*params->videoWidth*3)>>1), M4VS, (M4OSA_Char*)("lvpp colorBuffer"));
+
+ if(tempOutputBuffer == NULL) {
+ ALOGE("applyEffectsAndRenderingMode: malloc error tempOutputBuffer");
+ if(NULL != finalOutputBuffer) {
+ free(finalOutputBuffer);
+ finalOutputBuffer = NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ // Initialize the In plane
+ prepareYUV420ImagePlane(planeIn, params->videoWidth, params->videoHeight,
+ params->vidBuffer, reportedWidth, reportedHeight);
+
+ // Initialize the Out plane
+ prepareYUV420ImagePlane(planeOut, params->videoWidth, params->videoHeight,
+ (M4VIFI_UInt8 *)tempOutputBuffer, params->videoWidth, params->videoHeight);
+
+ // The planeIn contains the YUV420 input data to postprocessing node
+ // and planeOut will contain the YUV420 data with effect
+ // In each successive if condition, apply filter to successive
+ // output YUV frame so that concurrent effects are both applied
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_BLACKANDWHITE) {
+ err = applyColorEffect(M4xVSS_kVideoEffectType_BlackAndWhite,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, 0);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_PINK) {
+ err = applyColorEffect(M4xVSS_kVideoEffectType_Pink,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, 0);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_GREEN) {
+ err = applyColorEffect(M4xVSS_kVideoEffectType_Green,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, 0);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_SEPIA) {
+ err = applyColorEffect(M4xVSS_kVideoEffectType_Sepia,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, 0);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_NEGATIVE) {
+ err = applyColorEffect(M4xVSS_kVideoEffectType_Negative,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, 0);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_GRADIENT) {
+ // find the effect in effectSettings array
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Gradient)
+ break;
+ }
+ err = applyColorEffect(M4xVSS_kVideoEffectType_Gradient,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer,
+ params->effectsSettings[i].xVSS.uiRgb16InputColor);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_COLOR_RGB16) {
+ // Find the effect in effectSettings array
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_ColorRGB16)
+ break;
+ }
+ err = applyColorEffect(M4xVSS_kVideoEffectType_ColorRGB16,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer,
+ params->effectsSettings[i].xVSS.uiRgb16InputColor);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_FIFTIES) {
+ // Find the effect in effectSettings array
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Fifties)
+ break;
+ }
+ if(i < params->numberEffects) {
+ computeProgressForVideoEffect(params->timeMs,
+ params->effectsSettings[i].uiStartTime,
+ params->effectsSettings[i].uiDuration, &extProgress);
+
+ if(params->isFiftiesEffectStarted) {
+ fiftiesCtx.previousClipTime = -1;
+ }
+ fiftiesCtx.fiftiesEffectDuration =
+ 1000/params->effectsSettings[i].xVSS.uiFiftiesOutFrameRate;
+
+ fiftiesCtx.shiftRandomValue = 0;
+ fiftiesCtx.stripeRandomValue = 0;
+
+ err = M4VSS3GPP_externalVideoEffectFifties(
+ (M4OSA_Void *)&fiftiesCtx, planeIn, planeOut, &extProgress,
+ M4xVSS_kVideoEffectType_Fifties);
+
+ if(err != M4NO_ERROR) {
+ ALOGE("M4VSS3GPP_externalVideoEffectFifties error 0x%x", (unsigned int)err);
+
+ if(NULL != finalOutputBuffer) {
+ free(finalOutputBuffer);
+ finalOutputBuffer = NULL;
+ }
+ if(NULL != tempOutputBuffer) {
+ free(tempOutputBuffer);
+ tempOutputBuffer = NULL;
+ }
+ return err;
+ }
+
+ // The out plane now becomes the in plane for adding other effects
+ swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer);
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_FRAMING) {
+
+ M4xVSS_FramingStruct framingCtx;
+ // Find the effect in effectSettings array
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ (M4VSS3GPP_VideoEffectType)M4xVSS_kVideoEffectType_Framing) {
+ if((params->effectsSettings[i].uiStartTime <= params->timeMs + params->timeOffset) &&
+ ((params->effectsSettings[i].uiStartTime+
+ params->effectsSettings[i].uiDuration) >= params->timeMs + params->timeOffset))
+ {
+ break;
+ }
+ }
+ }
+ if(i < params->numberEffects) {
+ computeProgressForVideoEffect(params->timeMs,
+ params->effectsSettings[i].uiStartTime,
+ params->effectsSettings[i].uiDuration, &extProgress);
+
+ err = prepareFramingStructure(&framingCtx,
+ params->effectsSettings, i, params->overlayFrameRGBBuffer,
+ params->overlayFrameYUVBuffer);
+
+ if(err == M4NO_ERROR) {
+ err = M4VSS3GPP_externalVideoEffectFraming(
+ (M4OSA_Void *)&framingCtx, planeIn, planeOut, &extProgress,
+ M4xVSS_kVideoEffectType_Framing);
+ }
+
+ free(framingCtx.alphaBlendingStruct);
+
+ if(framingCtx.FramingYuv != NULL) {
+ free(framingCtx.FramingYuv);
+ framingCtx.FramingYuv = NULL;
+ }
+ //If prepareFramingStructure / M4VSS3GPP_externalVideoEffectFraming
+ // returned error, then return from function
+ if(err != M4NO_ERROR) {
+
+ if(NULL != finalOutputBuffer) {
+ free(finalOutputBuffer);
+ finalOutputBuffer = NULL;
+ }
+ if(NULL != tempOutputBuffer) {
+ free(tempOutputBuffer);
+ tempOutputBuffer = NULL;
+ }
+ return err;
+ }
+
+ // The out plane now becomes the in plane for adding other effects
+ swapImagePlanes(planeIn, planeOut,(M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer);
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_FADEFROMBLACK) {
+ /* find the effect in effectSettings array*/
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ M4VSS3GPP_kVideoEffectType_FadeFromBlack)
+ break;
+ }
+
+ if(i < params->numberEffects) {
+ computePercentageDone(params->timeMs,
+ params->effectsSettings[i].uiStartTime,
+ params->effectsSettings[i].uiDuration, &percentageDone);
+
+ // Compute where we are in the effect (scale is 0->1024)
+ lum_factor = (M4OSA_Int32)( percentageDone * 1024 );
+ // Apply the darkening effect
+ err = applyLumaEffect(M4VSS3GPP_kVideoEffectType_FadeFromBlack,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, lum_factor);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+ }
+
+ if(params->currentVideoEffect & VIDEO_EFFECT_FADETOBLACK) {
+ // Find the effect in effectSettings array
+ for(i=0;i<params->numberEffects;i++) {
+ if(params->effectsSettings[i].VideoEffectType ==
+ M4VSS3GPP_kVideoEffectType_FadeToBlack)
+ break;
+ }
+ if(i < params->numberEffects) {
+ computePercentageDone(params->timeMs,
+ params->effectsSettings[i].uiStartTime,
+ params->effectsSettings[i].uiDuration, &percentageDone);
+
+ // Compute where we are in the effect (scale is 0->1024)
+ lum_factor = (M4OSA_Int32)( (1.0-percentageDone) * 1024 );
+ // Apply the darkening effect
+ err = applyLumaEffect(M4VSS3GPP_kVideoEffectType_FadeToBlack,
+ planeIn, planeOut, (M4VIFI_UInt8 *)finalOutputBuffer,
+ (M4VIFI_UInt8 *)tempOutputBuffer, lum_factor);
+ if(err != M4NO_ERROR) {
+ return err;
+ }
+ }
+ }
+
+ ALOGV("doMediaRendering CALL getBuffer()");
+ // Set the output YUV420 plane to be compatible with YV12 format
+ // W & H even
+ // YVU instead of YUV
+ // align buffers on 32 bits
+
+ // Y plane
+ //in YV12 format, sizes must be even
+ M4OSA_UInt32 yv12PlaneWidth = ((params->outVideoWidth +1)>>1)<<1;
+ M4OSA_UInt32 yv12PlaneHeight = ((params->outVideoHeight+1)>>1)<<1;
+
+ prepareYV12ImagePlane(planeOut, yv12PlaneWidth, yv12PlaneHeight,
+ (M4OSA_UInt32)params->outBufferStride, (M4VIFI_UInt8 *)params->pOutBuffer);
+
+ err = applyRenderingMode(planeIn, planeOut, params->renderingMode);
+
+ if(M4OSA_NULL != finalOutputBuffer) {
+ free(finalOutputBuffer);
+ finalOutputBuffer= M4OSA_NULL;
+ }
+ if(M4OSA_NULL != tempOutputBuffer) {
+ free(tempOutputBuffer);
+ tempOutputBuffer = M4OSA_NULL;
+ }
+ if(err != M4NO_ERROR) {
+ ALOGV("doVideoPostProcessing: applyRenderingMode returned err=%d",err);
+ return err;
+ }
+ return M4NO_ERROR;
+}
+
+android::status_t getVideoSizeByResolution(
+ M4VIDEOEDITING_VideoFrameSize resolution,
+ uint32_t *pWidth, uint32_t *pHeight) {
+
+ uint32_t frameWidth, frameHeight;
+
+ if (pWidth == NULL) {
+ ALOGE("getVideoFrameSizeByResolution invalid pointer for pWidth");
+ return android::BAD_VALUE;
+ }
+ if (pHeight == NULL) {
+ ALOGE("getVideoFrameSizeByResolution invalid pointer for pHeight");
+ return android::BAD_VALUE;
+ }
+
+ switch (resolution) {
+ case M4VIDEOEDITING_kSQCIF:
+ frameWidth = 128;
+ frameHeight = 96;
+ break;
+
+ case M4VIDEOEDITING_kQQVGA:
+ frameWidth = 160;
+ frameHeight = 120;
+ break;
+
+ case M4VIDEOEDITING_kQCIF:
+ frameWidth = 176;
+ frameHeight = 144;
+ break;
+
+ case M4VIDEOEDITING_kQVGA:
+ frameWidth = 320;
+ frameHeight = 240;
+ break;
+
+ case M4VIDEOEDITING_kCIF:
+ frameWidth = 352;
+ frameHeight = 288;
+ break;
+
+ case M4VIDEOEDITING_kVGA:
+ frameWidth = 640;
+ frameHeight = 480;
+ break;
+
+ case M4VIDEOEDITING_kWVGA:
+ frameWidth = 800;
+ frameHeight = 480;
+ break;
+
+ case M4VIDEOEDITING_kNTSC:
+ frameWidth = 720;
+ frameHeight = 480;
+ break;
+
+ case M4VIDEOEDITING_k640_360:
+ frameWidth = 640;
+ frameHeight = 360;
+ break;
+
+ case M4VIDEOEDITING_k854_480:
+ frameWidth = 854;
+ frameHeight = 480;
+ break;
+
+ case M4VIDEOEDITING_k1280_720:
+ frameWidth = 1280;
+ frameHeight = 720;
+ break;
+
+ case M4VIDEOEDITING_k1080_720:
+ frameWidth = 1080;
+ frameHeight = 720;
+ break;
+
+ case M4VIDEOEDITING_k960_720:
+ frameWidth = 960;
+ frameHeight = 720;
+ break;
+
+ case M4VIDEOEDITING_k1920_1080:
+ frameWidth = 1920;
+ frameHeight = 1080;
+ break;
+
+ default:
+ ALOGE("Unsupported video resolution %d.", resolution);
+ return android::BAD_VALUE;
+ }
+
+ *pWidth = frameWidth;
+ *pHeight = frameHeight;
+
+ return android::OK;
+}
+
+M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
+
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i,j, u_stride;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+
+ /**< Loop on Y,U and V planes */
+ for (plane_number = 0; plane_number < 3; plane_number++) {
+ /**< Get adresses of first valid pixel in input and output buffer */
+ /**< As we have a -90° rotation, first needed pixel is the upper-right one */
+ p_buf_src =
+ &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]) +
+ pPlaneOut[plane_number].u_height - 1 ;
+ p_buf_dest =
+ &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
+ u_stride = pPlaneIn[plane_number].u_stride;
+ /**< Loop on output rows */
+ for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
+ /**< Loop on all output pixels in a row */
+ for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
+ *p_buf_dest++= *p_buf_src;
+ p_buf_src += u_stride; /**< Go to the next row */
+ }
+
+ /**< Go on next row of the output frame */
+ p_buf_dest +=
+ pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ /**< Go to next pixel in the last row of the input frame*/
+ p_buf_src -=
+ pPlaneIn[plane_number].u_stride * pPlaneOut[plane_number].u_width + 1 ;
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
+
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i,j, u_stride;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+
+ /**< Loop on Y,U and V planes */
+ for (plane_number = 0; plane_number < 3; plane_number++) {
+ /**< Get adresses of first valid pixel in input and output buffer */
+ /**< As we have a +90° rotation, first needed pixel is the left-down one */
+ p_buf_src =
+ &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]) +
+ (pPlaneIn[plane_number].u_stride * (pPlaneOut[plane_number].u_width - 1));
+ p_buf_dest =
+ &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
+ u_stride = pPlaneIn[plane_number].u_stride;
+ /**< Loop on output rows */
+ for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
+ /**< Loop on all output pixels in a row */
+ for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
+ *p_buf_dest++= *p_buf_src;
+ p_buf_src -= u_stride; /**< Go to the previous row */
+ }
+
+ /**< Go on next row of the output frame */
+ p_buf_dest +=
+ pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ /**< Go to next pixel in the last row of the input frame*/
+ p_buf_src +=
+ pPlaneIn[plane_number].u_stride * pPlaneOut[plane_number].u_width +1 ;
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i,j;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest, temp_pix1;
+
+ /**< Loop on Y,U and V planes */
+ for (plane_number = 0; plane_number < 3; plane_number++) {
+ /**< Get adresses of first valid pixel in input and output buffer */
+ p_buf_src =
+ &(pPlaneIn[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]);
+ p_buf_dest =
+ &(pPlaneOut[plane_number].pac_data[pPlaneOut[plane_number].u_topleft]);
+
+ /**< If pPlaneIn = pPlaneOut, the algorithm will be different */
+ if (p_buf_src == p_buf_dest) {
+ /**< Get Address of last pixel in the last row of the frame */
+ p_buf_dest +=
+ pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height-1) +
+ pPlaneOut[plane_number].u_width - 1;
+
+ /**< We loop (height/2) times on the rows.
+ * In case u_height is odd, the row at the middle of the frame
+ * has to be processed as must be mirrored */
+ for (i = 0; i < ((pPlaneOut[plane_number].u_height)>>1); i++) {
+ for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
+ temp_pix1= *p_buf_dest;
+ *p_buf_dest--= *p_buf_src;
+ *p_buf_src++ = temp_pix1;
+ }
+ /**< Go on next row in top of frame */
+ p_buf_src +=
+ pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ /**< Go to the last pixel in previous row in bottom of frame*/
+ p_buf_dest -=
+ pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ }
+
+ /**< Mirror middle row in case height is odd */
+ if ((pPlaneOut[plane_number].u_height%2)!= 0) {
+ p_buf_src =
+ &(pPlaneOut[plane_number].pac_data[pPlaneIn[plane_number].u_topleft]);
+ p_buf_src +=
+ pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height>>1);
+ p_buf_dest =
+ p_buf_src + pPlaneOut[plane_number].u_width;
+
+ /**< We loop u_width/2 times on this row.
+ * In case u_width is odd, the pixel at the middle of this row
+ * remains unchanged */
+ for (j = 0; j < (pPlaneOut[plane_number].u_width>>1); j++) {
+ temp_pix1= *p_buf_dest;
+ *p_buf_dest--= *p_buf_src;
+ *p_buf_src++ = temp_pix1;
+ }
+ }
+ } else {
+ /**< Get Address of last pixel in the last row of the output frame */
+ p_buf_dest +=
+ pPlaneOut[plane_number].u_stride*(pPlaneOut[plane_number].u_height-1) +
+ pPlaneIn[plane_number].u_width - 1;
+
+ /**< Loop on rows */
+ for (i = 0; i < pPlaneOut[plane_number].u_height; i++) {
+ for (j = 0; j < pPlaneOut[plane_number].u_width; j++) {
+ *p_buf_dest--= *p_buf_src++;
+ }
+
+ /**< Go on next row in top of input frame */
+ p_buf_src +=
+ pPlaneIn[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ /**< Go to last pixel of previous row in bottom of input frame*/
+ p_buf_dest -=
+ pPlaneOut[plane_number].u_stride - pPlaneOut[plane_number].u_width;
+ }
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+M4OSA_ERR applyVideoRotation(M4OSA_Void* pBuffer, M4OSA_UInt32 width,
+ M4OSA_UInt32 height, M4OSA_UInt32 rotation) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIFI_ImagePlane planeIn[3], planeOut[3];
+
+ if (pBuffer == M4OSA_NULL) {
+ ALOGE("applyVideoRotation: NULL input frame");
+ return M4ERR_PARAMETER;
+ }
+ M4OSA_UInt8* outPtr = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
+ (width*height*1.5), M4VS, (M4OSA_Char*)("rotation out ptr"));
+ if (outPtr == M4OSA_NULL) {
+ return M4ERR_ALLOC;
+ }
+
+ // In plane
+ prepareYUV420ImagePlane(planeIn, width,
+ height, (M4VIFI_UInt8 *)pBuffer, width, height);
+
+ // Out plane
+ if (rotation != 180) {
+ prepareYUV420ImagePlane(planeOut, height,
+ width, outPtr, height, width);
+ }
+
+ switch(rotation) {
+ case 90:
+ M4VIFI_Rotate90RightYUV420toYUV420(M4OSA_NULL, planeIn, planeOut);
+ memcpy(pBuffer, (void *)outPtr, (width*height*1.5));
+ break;
+
+ case 180:
+ // In plane rotation, so planeOut = planeIn
+ M4VIFI_Rotate180YUV420toYUV420(M4OSA_NULL, planeIn, planeIn);
+ break;
+
+ case 270:
+ M4VIFI_Rotate90LeftYUV420toYUV420(M4OSA_NULL, planeIn, planeOut);
+ memcpy(pBuffer, (void *)outPtr, (width*height*1.5));
+ break;
+
+ default:
+ ALOGE("invalid rotation param %d", (int)rotation);
+ err = M4ERR_PARAMETER;
+ break;
+ }
+
+ free((void *)outPtr);
+ return err;
+
+}
+
diff --git a/libvideoeditor/lvpp/VideoEditorTools.h b/libvideoeditor/lvpp/VideoEditorTools.h
new file mode 100755
index 0000000..9b464da
--- /dev/null
+++ b/libvideoeditor/lvpp/VideoEditorTools.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_VE_TOOLS_H
+#define ANDROID_VE_TOOLS_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+#include "M4VFL_transition.h"
+#include "M4VSS3GPP_API.h"
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+#include "M4AIR_API.h"
+#include "PreviewRenderer.h"
+
+#define MEDIA_RENDERING_INVALID 255
+#define TRANSPARENT_COLOR 0x7E0
+#define LUM_FACTOR_MAX 10
+enum {
+ VIDEO_EFFECT_NONE = 0,
+ VIDEO_EFFECT_BLACKANDWHITE = 1,
+ VIDEO_EFFECT_PINK = 2,
+ VIDEO_EFFECT_GREEN = 4,
+ VIDEO_EFFECT_SEPIA = 8,
+ VIDEO_EFFECT_NEGATIVE = 16,
+ VIDEO_EFFECT_FRAMING = 32,
+ VIDEO_EFFECT_FIFTIES = 64,
+ VIDEO_EFFECT_COLOR_RGB16 = 128,
+ VIDEO_EFFECT_GRADIENT = 256,
+ VIDEO_EFFECT_FADEFROMBLACK = 512,
+ VIDEO_EFFECT_FADETOBLACK = 2048,
+};
+
+typedef struct {
+ M4VIFI_UInt8 *vidBuffer;
+ M4OSA_UInt32 videoWidth;
+ M4OSA_UInt32 videoHeight;
+ M4OSA_UInt32 timeMs;
+ M4OSA_UInt32 timeOffset; //has the duration of clips played.
+ //The flag shall be used for Framing.
+ M4VSS3GPP_EffectSettings* effectsSettings;
+ M4OSA_UInt32 numberEffects;
+ M4OSA_UInt32 outVideoWidth;
+ M4OSA_UInt32 outVideoHeight;
+ M4OSA_UInt32 currentVideoEffect;
+ M4OSA_Bool isFiftiesEffectStarted;
+ M4xVSS_MediaRendering renderingMode;
+ uint8_t *pOutBuffer;
+ size_t outBufferStride;
+ M4VIFI_UInt8* overlayFrameRGBBuffer;
+ M4VIFI_UInt8* overlayFrameYUVBuffer;
+} vePostProcessParams;
+
+M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut );
+M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut );
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext, M4VIFI_ImagePlane *PlaneIn,
+ M4VIFI_ImagePlane *PlaneOut,M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind);
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut, M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind );
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData, M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut, M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiEffectKind );
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out, unsigned long lum_factor, void *user_data);
+
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx);
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut);
+
+M4OSA_ERR M4xVSS_internalConvertRGB888toYUV(M4xVSS_FramingStruct* framingCtx);
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+
+/*+ Handle the image files here */
+M4OSA_ERR LvGetImageThumbNail(const char *fileName, M4OSA_UInt32 height, M4OSA_UInt32 width, M4OSA_Void **pBuffer);
+/*- Handle the image files here */
+
+M4OSA_ERR applyRenderingMode(M4VIFI_ImagePlane* pPlaneIn, M4VIFI_ImagePlane* pPlaneOut, M4xVSS_MediaRendering mediaRendering);
+
+
+M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data, M4VIFI_ImagePlane PlaneIn[3], M4VIFI_ImagePlane *PlaneOut );
+M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut);
+
+M4OSA_Void prepareYUV420ImagePlane(M4VIFI_ImagePlane *plane,
+ M4OSA_UInt32 width, M4OSA_UInt32 height, M4VIFI_UInt8 *buffer,
+ M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight);
+
+M4OSA_Void prepareYV12ImagePlane(M4VIFI_ImagePlane *plane,
+ M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 stride, M4VIFI_UInt8 *buffer);
+
+M4OSA_Void swapImagePlanes(
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2);
+
+M4OSA_Void computePercentageDone(
+ M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
+ M4OSA_UInt32 effectDuration, M4OSA_Double *percentageDone);
+
+M4OSA_Void computeProgressForVideoEffect(
+ M4OSA_UInt32 ctsMs, M4OSA_UInt32 effectStartTimeMs,
+ M4OSA_UInt32 effectDuration, M4VSS3GPP_ExternalProgress* extProgress);
+
+M4OSA_ERR prepareFramingStructure(
+ M4xVSS_FramingStruct* framingCtx,
+ M4VSS3GPP_EffectSettings* effectsSettings, M4OSA_UInt32 index,
+ M4VIFI_UInt8* overlayRGB, M4VIFI_UInt8* overlayYUV);
+
+M4OSA_ERR applyColorEffect(M4xVSS_VideoEffectType colorEffect,
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_UInt16 rgbColorData);
+
+M4OSA_ERR applyLumaEffect(M4VSS3GPP_VideoEffectType videoEffect,
+ M4VIFI_ImagePlane *planeIn, M4VIFI_ImagePlane *planeOut,
+ M4VIFI_UInt8 *buffer1, M4VIFI_UInt8 *buffer2, M4OSA_Int32 lum_factor);
+
+M4OSA_ERR applyEffectsAndRenderingMode(vePostProcessParams *params,
+ M4OSA_UInt32 reportedWidth, M4OSA_UInt32 reportedHeight);
+
+android::status_t getVideoSizeByResolution(M4VIDEOEDITING_VideoFrameSize resolution,
+ uint32_t *pWidth, uint32_t *pHeight);
+
+M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void* pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+M4OSA_ERR applyVideoRotation(M4OSA_Void* pBuffer,
+ M4OSA_UInt32 width, M4OSA_UInt32 height, M4OSA_UInt32 rotation);
+#endif // ANDROID_VE_TOOLS_H
diff --git a/libvideoeditor/osal/Android.mk b/libvideoeditor/osal/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/osal/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h b/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h
new file mode 100755
index 0000000..237376d
--- /dev/null
+++ b/libvideoeditor/osal/inc/LVOSA_FileReader_optim.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4OSA_FileReader_optim.h
+ * @brief File reader for Symbian
+ * @note This file declares functions and types to read a file.
+ ******************************************************************************
+*/
+
+
+
+#ifndef M4OSA_FILEREADER_OPTIM_H
+#define M4OSA_FILEREADER_OPTIM_H
+
+#define M4OSA_READER_OPTIM_USE_OSAL_IF
+
+/**/
+#ifndef M4OSA_READER_OPTIM_USE_OSAL_IF
+ typedef struct
+ {
+ M4OSA_Void* (*pFctPtr_Open)( M4OSA_Void* fd,
+ M4OSA_UInt32 FileModeAccess,
+ M4OSA_UInt16* errno );
+ M4OSA_FilePosition (*pFctPtr_Read)( M4OSA_Void* fd,
+ M4OSA_UInt8* data,
+ M4OSA_FilePosition size,
+ M4OSA_UInt16* errno );
+ M4OSA_FilePosition (*pFctPtr_Seek)( M4OSA_Void* fd,
+ M4OSA_FilePosition pos,
+ M4OSA_FileSeekAccessMode mode,
+ M4OSA_UInt16* errno );
+ M4OSA_FilePosition (*pFctPtr_Tell)( M4OSA_Void* fd,
+ M4OSA_UInt16* errno );
+ M4OSA_Int32 (*pFctPtr_Close)( M4OSA_Void* fd,
+ M4OSA_UInt16* errno );
+ M4OSA_Void (*pFctPtr_AccessType)( M4OSA_UInt32 FileModeAccess_In,
+ M4OSA_Void* FileModeAccess_Out );
+
+ } M4OSA_FileSystem_FctPtr;
+#endif
+/**/
+
+
+/* Reader API : bufferized functions */
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR M4OSA_fileReadOpen_optim( M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess);
+#else
+ M4OSA_ERR M4OSA_fileReadOpen_optim( M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess,
+ M4OSA_FileSystem_FctPtr *FS);
+#endif
+
+M4OSA_ERR M4OSA_fileReadData_optim( M4OSA_Context context,
+ M4OSA_MemAddr8 buffer,
+ M4OSA_UInt32* size );
+M4OSA_ERR M4OSA_fileReadSeek_optim( M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position );
+M4OSA_ERR M4OSA_fileReadClose_optim( M4OSA_Context context );
+M4OSA_ERR M4OSA_fileReadGetOption_optim( M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption *optionValue );
+M4OSA_ERR M4OSA_fileReadSetOption_optim( M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption optionValue );
+
+#endif /* M4OSA_FILEREADER_OPTIM_H */
diff --git a/libvideoeditor/osal/inc/LV_Macros.h b/libvideoeditor/osal/inc/LV_Macros.h
new file mode 100755
index 0000000..b8d7e85
--- /dev/null
+++ b/libvideoeditor/osal/inc/LV_Macros.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*******************************************************************************
+* @file LV_Macros.h
+* @par NXP Software
+* @brief Macros definition for Smartphone team
+*******************************************************************************/
+
+#ifndef LV_MACROS_H
+#define LV_MACROS_H
+
+/*------------*/
+/* INCLUDES */
+/*------------*/
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+
+/******************************************************************************
+*
+* CHECK_PTR(fct, p, err, errValue)
+* @note This macro checks the value p. If it is NULL, it sets the variable err
+* to errValue and jumps to the label <fct>_cleanUp. A trace is displayed
+* signalling the error, the function name and the line number.
+*
+******************************************************************************/
+#define CHECK_PTR(fct, p, err, errValue) \
+{ \
+ if(M4OSA_NULL == (p)) \
+ { \
+ (err) = (errValue) ; \
+ M4OSA_TRACE1_1((M4OSA_Char*)"" #fct "(L%d): " #p " is NULL, returning " #errValue "",__LINE__) ; \
+ goto fct##_cleanUp; \
+ } \
+}
+
+/******************************************************************************
+*
+* CHECK_ERR(fct, err)
+* @note This macro checks the value err. If it is not NULL, a trace is displayed
+* signalling the error, the function name and the line number. The macro
+* jumps to the label <fct>_cleanUp.
+*
+******************************************************************************/
+#define CHECK_ERR(fct, err) \
+{ \
+ if(M4NO_ERROR != (err)) \
+ { \
+ M4OSA_TRACE1_2((M4OSA_Char*)"!!! " #fct "(L%d): ERROR 0x%.8x returned",\
+ __LINE__,err) ; \
+ goto fct##_cleanUp; \
+ } \
+}
+
+
+/******************************************************************************
+*
+* CHECK_ERR(fct, err)
+* @note This macro compares a current state with a state value. If they are different,
+* err is set to M4ERR_STATE.
+* A trace is displayed signalling the error, the function name and the line number.
+* The macro jumps to the label <fct>_cleanUp.
+*
+******************************************************************************/
+#define CHECK_STATE(fct, stateValue, state) \
+{ \
+ if((stateValue) != (state)) \
+ { \
+ M4OSA_TRACE1_1("" #fct " called in bad state %d", state) ; \
+ (err) = M4ERR_STATE ; \
+ goto fct##_cleanUp; \
+ } \
+}
+
+/******************************************************************************
+*
+* SAFE_FREE(p)
+* @note This macro checks the value of p is not NULL. If it is NULL, it does
+* nothing. Else, p is de allocated and set to NULL.
+*
+******************************************************************************/
+#define SAFE_FREE(p) \
+{ \
+ if(M4OSA_NULL != (p)) \
+ { \
+ free((p)) ; \
+ (p) = M4OSA_NULL ; \
+ } \
+}
+
+
+
+#endif /*--- LV_MACROS_H ---*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_CharStar.h b/libvideoeditor/osal/inc/M4OSA_CharStar.h
new file mode 100755
index 0000000..06316f0
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_CharStar.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_CharStar.h
+ * @ingroup
+ * @brief external API of the Char Star set of functions.
+ ************************************************************************
+*/
+
+#ifndef M4OSA_CHARSTAR_H
+#define M4OSA_CHARSTAR_H
+
+/* general OSAL types and prototypes inclusion */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Time.h"
+#include "M4OSA_FileCommon.h"
+
+/* types definition */
+typedef enum
+{
+ M4OSA_kchrDec = 0x01,
+ M4OSA_kchrHexa = 0x02,
+ M4OSA_kchrOct = 0x03
+} M4OSA_chrNumBase;
+
+/* error and warning codes */
+#define M4ERR_CHR_STR_OVERFLOW M4OSA_ERR_CREATE(M4_ERR,M4OSA_CHARSTAR,0x000001)
+#define M4ERR_CHR_CONV_FAILED M4OSA_ERR_CREATE(M4_ERR,M4OSA_CHARSTAR,0x000002)
+#define M4WAR_CHR_NOT_FOUND M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000001)
+#define M4WAR_CHR_NUM_RANGE M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000002)
+#define M4WAR_CHR_NEGATIVE M4OSA_ERR_CREATE(M4_WAR,M4OSA_CHARSTAR,0x000003)
+
+/* prototypes of the Char Star functions */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrNCopy (M4OSA_Char *strOut,
+ M4OSA_Char *strIn,
+ M4OSA_UInt32 len2Copy);
+M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrAreIdentical (M4OSA_Char *strIn1,
+ M4OSA_Char *strIn2,
+ M4OSA_Bool *result);
+M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrGetUInt32 (M4OSA_Char *strIn,
+ M4OSA_UInt32 *val,
+ M4OSA_Char **strOut,
+ M4OSA_chrNumBase base);
+M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrGetUInt16 (M4OSA_Char *strIn,
+ M4OSA_UInt16 *val,
+ M4OSA_Char **strOut,
+ M4OSA_chrNumBase base);
+M4OSAL_CHARSTAR_EXPORT_TYPE M4OSA_ERR M4OSA_chrSPrintf (M4OSA_Char *strOut,
+ M4OSA_UInt32 strOutMaxLen,
+ M4OSA_Char *format,
+ ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Clock.h b/libvideoeditor/osal/inc/M4OSA_Clock.h
new file mode 100755
index 0000000..db753a5
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Clock.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Clock.h
+ * @ingroup OSAL
+ * @brief clock API
+ ************************************************************************
+*/
+
+#ifndef M4OSA_CLOCH_H
+#define M4OSA_CLOCK_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Time.h"
+
+
+#define M4WAR_TIMESCALE_TOO_BIG M4OSA_ERR_CREATE(M4_WAR,M4OSA_CLOCK,0x000001) /**< Time precision too high for the system*/
+#define M4ERR_CLOCK_BAD_REF_YEAR M4OSA_ERR_CREATE(M4_ERR,M4OSA_CLOCK,0x000001) /**< Input year of reference is neither 1900, nor 1970 nor 2000*/
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_CLOCK_EXPORT_TYPE M4OSA_ERR M4OSA_clockGetTime(M4OSA_Time* pTime,
+ M4OSA_UInt32 timescale);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*M4OSA_CLOCK_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_CoreID.h b/libvideoeditor/osal/inc/M4OSA_CoreID.h
new file mode 100755
index 0000000..9172800
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_CoreID.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_CoreID.h
+ * @brief defines the uniques component identifiers used for memory management
+ * and optionID mechanism
+ * @note
+ ************************************************************************
+*/
+#ifndef __M4OSA_COREID_H__
+#define __M4OSA_COREID_H__
+
+/* CoreId are defined on 14 bits */
+/* we start from 0x0100, lower values are reserved for osal core components */
+
+/* reader shells*/
+#define M4READER_COMMON 0x0100
+#define M4READER_AVI 0x0101
+#define M4READER_AMR 0x0102
+#define M4READER_3GP 0x0103
+#define M4READER_NET 0x0104
+#define M4READER_3GP_HTTP 0x0105
+#define M4READER_MP3 0x0106
+#define M4READER_WAV 0x0107
+#define M4READER_MIDI 0x0108
+#define M4READER_ASF 0x0109
+#define M4READER_REAL 0x010A
+#define M4READER_AAC 0x010B
+#define M4READER_FLEX 0x010C
+#define M4READER_BBA 0x010D
+#define M4READER_SYNTHESIS_AUDIO 0x010E
+#define M4READER_JPEG 0x010F
+
+
+/* writer shells*/
+#define M4WRITER_COMMON 0x0110
+#define M4WRITER_AVI 0x0111
+#define M4WRITER_AMR 0x0112
+#define M4WRITER_3GP 0x0113
+#define M4WRITER_JPEG 0x0116
+#define M4WRITER_MP3 0x0117
+
+/* decoder shells */
+#define M4DECODER_COMMON 0x0120
+#define M4DECODER_JPEG 0x0121
+#define M4DECODER_MPEG4 0x0122
+#define M4DECODER_AUDIO 0x0123
+#define M4DECODER_AVC 0x0124
+#define M4DECODER_MIDI 0x0125
+#define M4DECODER_WMA 0x0126
+#define M4DECODER_WMV 0x0127
+#define M4DECODER_RMV 0x0128
+#define M4DECODER_RMA 0x0129
+#define M4DECODER_AAC 0x012A
+#define M4DECODER_BEATBREW 0x012B
+#define M4DECODER_EXTERNAL 0x012C
+
+/* encoder shells */
+#define M4ENCODER_COMMON 0x0130
+#define M4ENCODER_JPEG 0x0131
+#define M4ENCODER_MPEG4 0x0132
+#define M4ENCODER_AUDIO 0x0133
+#define M4ENCODER_VID_NULL 0x0134
+#define M4ENCODER_MJPEG 0x0135
+#define M4ENCODER_MP3 0x0136
+#define M4ENCODER_H264 0x0137
+#define M4ENCODER_AAC 0x0138
+#define M4ENCODER_AMRNB 0x0139
+#define M4ENCODER_AUD_NULL 0x013A
+#define M4ENCODER_EXTERNAL 0x013B
+
+/* cores */
+#define M4JPG_DECODER 0x0140
+#define M4JPG_ENCODER 0x0141
+
+#define M4MP4_DECODER 0x0142
+#define M4MP4_ENCODER 0x0143
+
+#define M4AVI_COMMON 0x0144
+#define M4AVI_READER 0x0145
+#define M4AVI_WRITER 0x0146
+
+#define M4HTTP_ENGINE 0x0147
+
+#define M4OSA_TMPFILE 0x0148
+#define M4TOOL_TIMER 0x0149
+
+#define M4AMR_READER 0x014A
+
+#define M4MP3_READER 0x014B
+
+#define M4WAV_READER 0x014C
+#define M4WAV_WRITER 0x014D
+#define M4WAV_COMMON 0x014E
+
+#define M4ADTS_READER 0x014F
+#define M4ADIF_READER 0x016A
+
+#define M4SPS 0x0150
+#define M4EXIF_DECODER 0x0151
+#define M4EXIF_ENCODER 0x0152
+#define M4GIF_DECODER 0x0153
+#define M4GIF_ENCODER 0x0154
+#define M4PNG_DECODER 0x0155
+#define M4PNG_ENCODER 0x0156
+#define M4WBMP_DECODER 0x0157
+#define M4WBMP_ENCODER 0x0158
+
+#define M4AMR_WRITER 0x0159 /**< no room to put it along M4AMR_READER */
+
+
+#define M4AVC_DECODER 0x015A
+#define M4AVC_ENCODER 0x015B
+
+#define M4ASF_READER 0x015C
+#define M4WMDRM_AGENT 0x015D
+#define M4MIDI_READER 0x0162 /**< no room before the presenters */
+#define M4RM_READER 0x163
+#define M4RMV_DECODER 0x164
+#define M4RMA_DECODER 0x165
+
+#define M4TOOL_XML 0x0166
+#define M4TOOL_EFR 0x0167 /**< Decryption module for Video Artist */
+#define M4IAL_FTN 0x0168 /* FTN implementation of the IAL */
+#define M4FTN 0x0169 /* FTN library */
+
+/* presenter */
+#define M4PRESENTER_AUDIO 0x0160
+#define M4PRESENTER_VIDEO 0x0161
+
+/* high level interfaces (vps, etc..)*/
+#define M4VPS 0x0170
+#define M4VTS 0x0171
+#define M4VXS 0x0172
+#define M4CALLBACK 0x0173
+#define M4VES 0x0174
+#define M4PREPROCESS_VIDEO 0x0175
+#define M4GRAB_AUDIO 0x0176
+#define M4GRAB_VIDEO 0x0177
+#define M4VSSAVI 0x0178
+#define M4VSS3GPP 0x0179
+#define M4PTO3GPP 0x017A
+#define M4PVX_PARSER 0x017B
+#define M4VCS 0x017C
+#define M4MCS 0x017D
+#define M4MNMC 0x0180 /**< mnm controller */
+#define M4TTEXT_PARSER 0x0181 /**< timed text */
+#define M4MM 0x0182 /**< Music manager */
+#define M4MDP 0x0183 /**< Metadata parser */
+#define M4MMSQLCORE 0x0184
+#define M4VPSIL 0x0185
+#define M4FILEIL 0x0186 /* IL file Interface */
+#define M4MU 0x0187
+#define M4VEE 0x0188 /**< Video effect engine */
+#define M4VA 0x0189 /* VideoArtist */
+#define M4JTS 0x018A
+#define M4JTSIL 0x018B
+#define M4AIR 0x018C /**< AIR */
+#define M4SPE 0x018D /**< Still picture editor */
+#define M4VS 0x018E /**< Video Studio (xVSS) */
+#define M4VESIL 0x018F /**< VES il */
+#define M4ID3 0x0190 /**< ID3 Tag Module */
+#define M4SC 0x0191 /**< Media Scanner */
+#define M4TG 0x0192 /**< Thumbnail Generator*/
+#define M4TS 0x0193 /**< Thumbnail storage */
+#define M4MB 0x0194 /**< Media browser */
+
+/* high level application (test or client app) */
+#define M4APPLI 0x0200
+#define M4VA_APPLI 0x0201 /**< Video Artist test application */
+
+/* external components (HW video codecs, etc.) */
+#define M4VD_EXTERNAL 0x0300
+#define M4VE_EXTERNAL 0x0301
+
+
+/* priority to combine with module ids */
+#define M4HIGH_PRIORITY 0xC000
+#define M4MEDIUM_PRIORITY 0x8000
+#define M4LOW_PRIORITY 0x4000
+#define M4DEFAULT_PRIORITY 0x0000
+
+
+#endif /*__M4OSA_COREID_H__*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Debug.h b/libvideoeditor/osal/inc/M4OSA_Debug.h
new file mode 100755
index 0000000..b06183d
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Debug.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Debug.h
+ * @brief Debug and Trace Macro
+ ************************************************************************
+*/
+
+
+#ifndef _M4OSA_DEBUG_H_
+#define _M4OSA_DEBUG_H_
+
+#include "M4OSA_Error.h"
+#include "M4OSA_Types.h"
+
+
+/* defaut value, defined only if not defined already. */
+#ifndef M4TRACE_ID
+#define M4TRACE_ID M4UNKNOWN_COREID
+#endif /* M4TRACE_ID undefined */
+
+
+#define M4OSA_SUPER_DEBUG_LEVEL 0
+
+#ifndef M4OSA_DEBUG_LEVEL
+#define M4OSA_DEBUG_LEVEL 0
+#endif
+
+
+#define M4OSA_SUPER_TRACE_LEVEL 0
+
+#ifndef M4OSA_TRACE_LEVEL
+#define M4OSA_TRACE_LEVEL 0
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+#if (M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)
+
+/* Debug macros */
+extern M4OSA_Void M4OSA_DEBUG_traceFunction(M4OSA_UInt32 line,
+ M4OSA_Char* fileName,
+ M4OSA_UInt32 level,
+ M4OSA_Char* stringCondition,
+ M4OSA_Char* message,
+ M4OSA_ERR returnedError);
+
+
+#define M4OSA_DEBUG_IFx(cond, errorCode, msg, level)\
+ if(cond)\
+ {\
+ M4OSA_DEBUG_traceFunction(__LINE__, (M4OSA_Char*)__FILE__, level,\
+ (M4OSA_Char*)#cond, (M4OSA_Char*)msg,
+ (errorCode));\
+ return(errorCode);\
+ }
+
+#define M4OSA_DEBUG(errorCode, msg)\
+ M4OSA_DEBUG_traceFunction(__LINE__, (M4OSA_Char*)__FILE__, 1,\
+ (M4OSA_Char*)#errorCode, (M4OSA_Char*)msg,
+ (errorCode));
+
+#else /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
+
+
+#define M4OSA_DEBUG(errorCode, msg)
+
+#endif /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
+
+
+
+#if (M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)
+ #define M4OSA_DEBUG_IF1(cond, errorCode, msg)\
+ M4OSA_DEBUG_IFx(cond, errorCode, msg, 1)
+#else
+ #define M4OSA_DEBUG_IF1(cond, errorCode, msg)
+#endif /*(M4OSA_DEBUG_LEVEL >= 1) || (M4OSA_SUPER_DEBUG_LEVEL >= 1)*/
+
+
+#if (M4OSA_DEBUG_LEVEL >= 2) || (M4OSA_SUPER_DEBUG_LEVEL >= 2)
+ #define M4OSA_DEBUG_IF2(cond, errorCode, msg)\
+ M4OSA_DEBUG_IFx(cond, errorCode, msg, 2)
+#else
+ #define M4OSA_DEBUG_IF2(cond, errorCode, msg)
+#endif /*(M4OSA_DEBUG_LEVEL >= 2) || (M4OSA_SUPER_DEBUG_LEVEL >= 2)*/
+
+
+#if (M4OSA_DEBUG_LEVEL >= 3) || (M4OSA_SUPER_DEBUG_LEVEL >= 3)
+ #define M4OSA_DEBUG_IF3(cond, errorCode, msg)\
+ M4OSA_DEBUG_IFx(cond, errorCode, msg, 3)
+#else
+ #define M4OSA_DEBUG_IF3(cond, errorCode, msg)
+#endif /*(M4OSA_DEBUG_LEVEL >= 3) || (M4OSA_SUPER_DEBUG_LEVEL >= 3)*/
+
+
+
+/* Trace macros */
+
+#if (M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)
+
+extern M4OSA_Void M4OSA_TRACE_traceFunction(M4OSA_UInt32 line,
+ M4OSA_Char* fileName,
+ M4OSA_CoreID coreID,
+ M4OSA_UInt32 level,
+ M4OSA_Char* stringMsg,
+ ... );
+
+
+
+#define M4OSA_TRACEx_0(msg, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg);
+
+
+#define M4OSA_TRACEx_1(msg, param1, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg, param1);
+
+
+#define M4OSA_TRACEx_2(msg, param1, param2, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level,\
+ (M4OSA_Char*)msg, param1,\
+ param2);
+
+
+#define M4OSA_TRACEx_3(msg, param1, param2, param3, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level, (M4OSA_Char*)msg,\
+ param1,param2, param3);
+
+
+#define M4OSA_TRACEx_4(msg, param1, param2, param3, param4, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level,\
+ (M4OSA_Char*)msg, param1,\
+ param2, param3, param4);
+
+
+#define M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, level)\
+ M4OSA_TRACE_traceFunction(__LINE__, (M4OSA_Char*)__FILE__,\
+ (M4OSA_CoreID)M4TRACE_ID, level,\
+ (M4OSA_Char*)msg, param1,\
+ param2, param3, param4, param5);
+
+#endif /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
+
+
+
+#if (M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)
+#define M4OSA_TRACE1_0(msg)\
+ M4OSA_TRACEx_0(msg, 1)
+
+#define M4OSA_TRACE1_1(msg, param1)\
+ M4OSA_TRACEx_1(msg, param1, 1)
+
+#define M4OSA_TRACE1_2(msg, param1, param2)\
+ M4OSA_TRACEx_2(msg, param1, param2, 1)
+
+#define M4OSA_TRACE1_3(msg, param1, param2, param3)\
+ M4OSA_TRACEx_3(msg, param1, param2, param3, 1)
+
+#define M4OSA_TRACE1_4(msg, param1, param2, param3, param4)\
+ M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 1)
+
+#define M4OSA_TRACE1_5(msg, param1, param2, param3, param4, param5)\
+ M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 1)
+
+#else /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
+
+#define M4OSA_TRACE1_0(msg)
+#define M4OSA_TRACE1_1(msg, param1)
+#define M4OSA_TRACE1_2(msg, param1, param2)
+#define M4OSA_TRACE1_3(msg, param1, param2, param3)
+#define M4OSA_TRACE1_4(msg, param1, param2, param3, param4)
+#define M4OSA_TRACE1_5(msg, param1, param2, param3, param4, param5)
+
+#endif /*(M4OSA_TRACE_LEVEL >= 1) || (M4OSA_SUPER_TRACE_LEVEL >= 1)*/
+
+
+#if (M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)
+#define M4OSA_TRACE2_0(msg)\
+ M4OSA_TRACEx_0(msg, 2)
+
+#define M4OSA_TRACE2_1(msg, param1)\
+ M4OSA_TRACEx_1(msg, param1, 2)
+
+#define M4OSA_TRACE2_2(msg, param1, param2)\
+ M4OSA_TRACEx_2(msg, param1, param2, 2)
+
+#define M4OSA_TRACE2_3(msg, param1, param2, param3)\
+ M4OSA_TRACEx_3(msg, param1, param2, param3, 2)
+
+#define M4OSA_TRACE2_4(msg, param1, param2, param3, param4)\
+ M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 2)
+
+#define M4OSA_TRACE2_5(msg, param1, param2, param3, param4, param5)\
+ M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 2)
+
+#else /*(M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)*/
+
+#define M4OSA_TRACE2_0(msg)
+#define M4OSA_TRACE2_1(msg, param1)
+#define M4OSA_TRACE2_2(msg, param1, param2)
+#define M4OSA_TRACE2_3(msg, param1, param2, param3)
+#define M4OSA_TRACE2_4(msg, param1, param2, param3, param4)
+#define M4OSA_TRACE2_5(msg, param1, param2, param3, param4, param5)
+#endif /*(M4OSA_TRACE_LEVEL >= 2) || (M4OSA_SUPER_TRACE_LEVEL >= 2)*/
+
+
+#if (M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)
+#define M4OSA_TRACE3_0(msg)\
+ M4OSA_TRACEx_0(msg, 3)
+
+#define M4OSA_TRACE3_1(msg, param1)\
+ M4OSA_TRACEx_1(msg, param1, 3)
+
+#define M4OSA_TRACE3_2(msg, param1, param2)\
+ M4OSA_TRACEx_2(msg, param1, param2, 3)
+
+#define M4OSA_TRACE3_3(msg, param1, param2, param3)\
+ M4OSA_TRACEx_3(msg, param1, param2, param3, 3)
+
+#define M4OSA_TRACE3_4(msg, param1, param2, param3, param4)\
+ M4OSA_TRACEx_4(msg, param1, param2, param3, param4, 3)
+
+#define M4OSA_TRACE3_5(msg, param1, param2, param3, param4, param5)\
+ M4OSA_TRACEx_5(msg, param1, param2, param3, param4, param5, 3)
+
+#else /*(M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)*/
+
+#define M4OSA_TRACE3_0(msg)
+#define M4OSA_TRACE3_1(msg, param1)
+#define M4OSA_TRACE3_2(msg, param1, param2)
+#define M4OSA_TRACE3_3(msg, param1, param2, param3)
+#define M4OSA_TRACE3_4(msg, param1, param2, param3, param4)
+#define M4OSA_TRACE3_5(msg, param1, param2, param3, param4, param5)
+
+#endif /*(M4OSA_TRACE_LEVEL >= 3) || (M4OSA_SUPER_TRACE_LEVEL >= 3)*/
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _M4OSA_DEBUG_H_ */
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Error.h b/libvideoeditor/osal/inc/M4OSA_Error.h
new file mode 100755
index 0000000..4d59529
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Error.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Error.h
+ * @ingroup OSAL
+ * @brief Definition of common error types
+ * @note This file contains macros to generate and analyze error codes.
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_ERROR_H
+#define M4OSA_ERROR_H
+
+#include "M4OSA_Types.h"
+
+/** M4OSA_ERR is a 32 bits unsigned integer.
+ * To sort returned code, a specific naming convention must be followed:
+ * - Severity (2 bits): It may br either 0b00 (no error), 0b01 (warning) or
+ * 0b01 (fatal error)
+ * - Core ID (14 bits): It is a unique ID for each core component
+ * - ErrorID (16 bits): It is the specific error code
+
+ * EACH CORE COMPONENT FUNCTION SHOULD RETURN AN M4OSA_ERR
+*/
+typedef M4OSA_UInt32 M4OSA_ERR;
+
+#define M4_OK 0
+#define M4_WAR 1
+#define M4_ERR 2
+
+
+/* Macro to process M4OSA_ERR */
+
+/** This macro tests if the provided M4OSA_ERR is a warning or not*/
+#define M4OSA_ERR_IS_WARNING(error) ((((error)>>30) == M4_WAR) ? 1:0)
+
+/** This macro tests if the provided M4OSA_ERR is a fatal error or not*/
+#define M4OSA_ERR_IS_ERROR(error) ((((error)>>30) == M4_ERR) ? 1:0)
+
+/** This macro returns an error code accroding to the 3 provided fields:
+ * @arg severity: (IN) [M4OSA_UInt32] Severity to put in the error code
+ * @arg coreID: (IN) [M4OSA_UInt32] CoreID to put in the error code
+ * @arg errorID: (IN) [M4OSA_UInt32] ErrorID to put in the error code*/
+#define M4OSA_ERR_CREATE(severity, coreID, errorID)\
+ (M4OSA_Int32)((((M4OSA_UInt32)severity)<<30)+((((M4OSA_UInt32)coreID)&0x003FFF)<<16)+(((M4OSA_UInt32)errorID)&0x00FFFF))
+
+/** This macro extracts the 3 fields from the error:
+ * @arg error: (IN) [M4OSA_ERR] Error code
+ * @arg severity: (OUT) [M4OSA_UInt32] Severity to put in the error code
+ * @arg coreID: (OUT) [M4OSA_UInt32] CoreID to put in the error code
+ * @arg errorID: (OUT) [M4OSA_UInt32] ErrorID to put in the error code*/
+#define M4OSA_ERR_SPLIT(error, severity, coreID, errorID)\
+ { severity=(M4OSA_UInt32)((error)>>30);\
+ coreID=(M4OSA_UInt32)(((error)>>16)&0x003FFF);\
+ (M4OSA_UInt32)(errorID=(error)&0x00FFFF); }
+
+
+/* "fake" CoreID, is used to report an unknown CoreID. Used by the trace system
+when the core ID macro isn't defined. Defined here instead of CoreID.h to avoid
+introducing dependencies to common/inc. */
+
+#define M4UNKNOWN_COREID 0x3FFF /* max possible CoreID */
+
+#define M4_COMMON 0x00 /**<Common*/
+#define M4MP4_COMMON 0x01 /**<Core MP4 (common)*/
+#define M4MP4_WRITER 0x02 /**<Core MP4 writer*/
+#define M4MP4_READER 0x03 /**<Core MP4 reader*/
+#define M4RTSP_COMMON 0x11 /**<Core RTSP common*/
+#define M4RTSP_WRITER 0x12 /**<Core RTSP transmitter*/
+#define M4RTSP_READER 0x13 /**<Core RTSP receiver*/
+#define M4RTP_WRITER 0x14 /**<Core RTP/RTCP receiver*/
+#define M4RTP_READER 0x15 /**<Core RTP/RTCP transmitter*/
+#define M4SAP_WRITER 0x16 /**<Core SAP transmitter*/
+#define M4SAP_READER 0x17 /**<Core SAP receiver*/
+#define M4DVBH_READER 0x18 /**<Core DVBH receiver*/
+#define M4SDP_WRITER 0x22 /**<Core SDP writer*/
+#define M4SDP_READER 0x31 /**<Core SDP reader*/
+#define M4PAK_AMR 0x32 /**<Core packetizer AMR (RFC3267)*/
+#define M4DEPAK_AMR 0x33 /**<Core de-packetizer AMR (RFC3267)*/
+#define M4PAK_H263 0x34 /**<Core packetizer H263 (RFC2429)*/
+#define M4DEPAK_H263 0x35 /**<Core de-packetizer H263(RFC2429)*/
+#define M4PAK_SIMPLE 0x36 /**<Core packetizer SimpleDraft (RFC xxxx)*/
+#define M4DEPAK_SIMPLE 0x37 /**<Core de-packetizer SimpleDraft (RFC xxxx)*/
+#define M4PAK_3016_VIDEO 0x38 /**<Core packetizer RFC3016 video*/
+#define M4DEPAK_3016_VIDEO 0x39 /**<Core de-packetizer RFC3016 video*/
+#define M4PAK_3016_AUDIO 0x3A /**<Core packetizer RFC3016 audio (LATM)*/
+#define M4DEPAK_3016_AUDIO 0x3B /**<Core de-packetizer RFC3016 audio (LATM)*/
+#define M4DEPAK_H264 0x3C /**<Core de-packetizer H264*/
+#define M4DEPAK_REALV 0x3D /**<Core de-packetizer Real Video */
+#define M4DEPAK_REALA 0x3E /**<Core de-packetizer Real Audio */
+#define M4RDT_READER 0x3F /**<Core RDT receiver*/
+#define M4TCP_DMUX 0x50 /**<Core TCP demux*/
+#define M4IOD_PARSER 0x51 /**<Core IOD parser*/
+#define M4OSA_FILE_COMMON 0x61 /**<OSAL file common*/
+#define M4OSA_FILE_WRITER 0x62 /**<OSAL file writer*/
+#define M4OSA_FILE_READER 0x63 /**<OSAL file reader*/
+#define M4OSA_FILE_EXTRA 0x64 /**<OSAL file extra*/
+#define M4OSA_DIRECTORY 0x65 /**<OSAL directory*/
+#define M4OSA_SOCKET 0x71 /**<OSAL socket (both reader and writer)*/
+#define M4OSA_THREAD 0x81 /**<OSAL thread*/
+#define M4OSA_MUTEX 0x82 /**<OSAL mutex*/
+#define M4OSA_SEMAPHORE 0x83 /**<OSAL semaphore*/
+#define M4OSA_CLOCK 0x84 /**<OSAL clock*/
+#define M4OSA_MEMORY 0x91 /**<OSAL memory*/
+#define M4CALL_BACK 0xA1 /**<Call Back error*/
+#define M4OSA_URI 0xB1 /**<OSAL URI handler*/
+#define M4OSA_STRING 0xB2 /**<OSAL string*/
+#define M4SYS_CMAPI 0xB3 /**<SYSTEM Common Medi API*/
+#define M4OSA_CHARSTAR 0xB4 /**<OSAL CharStar*/
+#define M4REACTOR 0xC1 /**<Core reactor*/
+#define M4TEST 0xD1 /**<Test component*/
+#define M4STACK 0xE1 /**< Core ID of the integrated stack*/
+#define M4STACK_REAL 0xE2 /**<Core ID of the Real integrated stack */
+#define M4TOOL_LBVT_PARAM 0xF1 /**<LB_VT config file manager*/
+#define M4TOOL_LINK_LIST 0xF2 /**<Tool linked list*/
+#define M4TOOL_BASE64 0xF3 /**<Core base64 encoder/decoder*/
+
+
+
+/* Definition of common error codes */
+/** there is no error*/
+#define M4NO_ERROR 0x00000000
+
+/** At least one parameter is NULL*/
+#define M4ERR_PARAMETER M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000001)
+/** This function cannot be called now*/
+#define M4ERR_STATE M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000002)
+/** There is no more memory available*/
+#define M4ERR_ALLOC M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000003)
+/** Provided context is not a valid one*/
+#define M4ERR_BAD_CONTEXT M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000004)
+#define M4ERR_CONTEXT_FAILED M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000005)
+#define M4ERR_BAD_STREAM_ID M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000006)
+/** The optionID is not a valid one*/
+#define M4ERR_BAD_OPTION_ID M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000007)
+/** This option is a write only one*/
+#define M4ERR_WRITE_ONLY M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000008)
+/** This option is a read only one*/
+#define M4ERR_READ_ONLY M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x000009)
+/** This function is not supported yet*/
+#define M4ERR_NOT_IMPLEMENTED M4OSA_ERR_CREATE(M4_ERR,M4_COMMON,0x00000A)
+
+#define M4ERR_UNSUPPORTED_MEDIA_TYPE M4OSA_ERR_CREATE(M4_ERR, M4_COMMON, 0x00000B)
+
+#define M4WAR_NO_DATA_YET M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000001)
+#define M4WAR_NO_MORE_STREAM M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000002)
+#define M4WAR_INVALID_TIME M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000003)
+#define M4WAR_NO_MORE_AU M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000004)
+#define M4WAR_TIME_OUT M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000005)
+/** The buffer is full*/
+#define M4WAR_BUFFER_FULL M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000006)
+/* The server asks for a redirection */
+#define M4WAR_REDIRECT M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000007)
+#define M4WAR_TOO_MUCH_STREAMS M4OSA_ERR_CREATE(M4_WAR,M4_COMMON,0x000008)
+/* SF Codec detected INFO_FORMAT_CHANGE during decode */
+#define M4WAR_INFO_FORMAT_CHANGE M4OSA_ERR_CREATE(M4_WAR, M4_COMMON, 0x000009)
+
+#endif /*M4OSA_ERROR_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Export.h b/libvideoeditor/osal/inc/M4OSA_Export.h
new file mode 100755
index 0000000..b7a6e81
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Export.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Export.h
+ * @brief Data access export types for Android
+ * @note This file defines types which must be
+ * used to import or export any function.
+ ************************************************************************
+*/
+
+#ifndef M4OSA_EXPORT_H
+#define M4OSA_EXPORT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /*__cplusplus*/
+
+/************************************/
+/* OSAL EXPORTS */
+/************************************/
+
+#define M4OSAL_CHARSTAR_EXPORT_TYPE /**< OSAL CHAR_STAR */
+#define M4OSAL_CLOCK_EXPORT_TYPE /**< OSAL CLOCK */
+#define M4OSAL_DATE_EXPORT_TYPE /**< OSAL DATE */
+#define M4OSAL_FILE_EXPORT_TYPE /**< OSAL FILE */
+#define M4OSAL_REALTIME_EXPORT_TYPE /**< OSAL REAL TIME */
+#define M4OSAL_SOCKET_EXPORT_TYPE /**< SOCKET */
+#define M4OSAL_STRING_EXPORT_TYPE /**< OSAL STRING */
+#define M4OSAL_URI_EXPORT_TYPE /**< OSAL URI */
+#define M4OSAL_MEMORY_EXPORT_TYPE /**< OSAL MEMORY */
+#define M4OSAL_TRACE_EXPORT_TYPE /**< OSAL TRACE */
+#define M4OSAL_TOOL_TIMER_EXPORT_TYPE /**< OSAL TOOL TIMER */
+#define M4OSAL_SYSTEM_CM_EXPORT_TYPE /**< SYSTEM COMMON API */
+#define M4OSAL_LINKED_LIST_EXPORT_TYPE /**< TOOL LINKED LIST */
+#define M4OSAL_MEMORY_MANAGER_EXPORT_TYPE /**< MEMORY MANAGER */
+#define M4OSAL_TRACE_MANAGER_EXPORT_TYPE /**< TOOL TRACE MANAGER */
+#define M4VPS_EXPORT_TYPE /**< VPS API */
+#define M4AP_EXPORT_TYPE /**< AUDIO PRESENTERS */
+#define M4VP_EXPORT_TYPE /**< VIDEO PRESENTERS */
+#define M4CB_EXPORT_TYPE /**< Call back */
+
+#ifdef __cplusplus
+}
+#endif /*__cplusplus*/
+
+#endif /*M4OSA_EXPORT_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileCommon.h b/libvideoeditor/osal/inc/M4OSA_FileCommon.h
new file mode 100755
index 0000000..f2afb8c
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileCommon.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileCommon.h
+ * @ingroup OSAL
+ * @brief File common
+ * @note This file declares functions and types used by both the file
+ * writer and file reader.
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_FILECOMMON_H
+#define M4OSA_FILECOMMON_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Time.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+
+
+typedef M4OSA_Int32 M4OSA_FilePosition;
+
+/** This enum defines the application mode access.
+ * ie, the application uses a file descriptor to read or to write or
+ * both to read and write at the same time.
+ * This structure is used for MM project only. It enables to read and write to a file
+ * with one descriptor.
+ */
+typedef enum
+{
+ M4OSA_kDescNoneAccess = 0x00,
+ M4OSA_kDescReadAccess = 0x01, /** The Descriptor reads only from the file */
+ M4OSA_kDescWriteAccess = 0x02, /** The Descriptor writes only from the file*/
+ M4OSA_kDescRWAccess = 0x03 /** The Descriptor reads and writes from/in the file*/
+} M4OSA_DescrModeAccess;
+
+
+/** This enum defines the file mode access. Both text mode as binary mode
+ cannot be set together.*/
+typedef enum
+{
+ /** The file must be accessed in read only mode*/
+ M4OSA_kFileRead = 0x01,
+ /** The file must be accessed in write only mode*/
+ M4OSA_kFileWrite = 0x02,
+ /** The file must be accessed in append mode (An existing file must
+ be available to append data)*/
+ M4OSA_kFileAppend = 0x04,
+ /** If the file does not exist, it will be created*/
+ M4OSA_kFileCreate = 0x08,
+ /** Data are processed as binary one, there is no data management*/
+ M4OSA_kFileIsTextMode = 0x10
+} M4OSA_FileModeAccess;
+
+
+/** This type is used to store a date.*/
+typedef struct
+{
+ /** Time scale (tick number per second)*/
+ M4OSA_UInt32 timeScale;
+ /** Date expressed in the time scale*/
+ M4OSA_Time time;
+ /** Year of the absolute time (1900, 1970 or 2000)*/
+ M4OSA_UInt32 referenceYear;
+} M4OSA_Date;
+
+
+/** This strucure defines the file attributes*/
+typedef struct
+{
+ /** The file mode access*/
+ M4OSA_FileModeAccess modeAccess;
+ /** The creation date*/
+ M4OSA_Date creationDate;
+ /** The last modification date*/
+ M4OSA_Date modifiedDate;
+ /** The last access date (read)*/
+ M4OSA_Date lastAccessDate;
+} M4OSA_FileAttribute;
+
+
+
+/** This enum defines the seek behavior*/
+typedef enum M4OSA_FileSeekAccessMode
+{
+ /** Relative to the beginning of the file*/
+ M4OSA_kFileSeekBeginning = 0x01,
+ /** Relative to the end of the file*/
+ M4OSA_kFileSeekEnd = 0x02,
+ /** Relative to the current file position*/
+ M4OSA_kFileSeekCurrent = 0x03
+} M4OSA_FileSeekAccessMode;
+
+
+/* Error codes */
+#define M4ERR_FILE_NOT_FOUND M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000001)
+#define M4ERR_FILE_LOCKED M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000002)
+#define M4ERR_FILE_BAD_MODE_ACCESS M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000003)
+#define M4ERR_FILE_INVALID_POSITION M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_COMMON, 0x000004)
+
+
+#endif /*M4OSA_FILECOMMON_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h b/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h
new file mode 100755
index 0000000..1eba456
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileCommon_priv.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileCommon_priv.h
+ * @ingroup OSAL
+ * @brief File common private for Android
+ * @note This file declares functions and types used by both the file
+ * writer and file reader.
+ ************************************************************************
+*/
+
+#ifndef M4OSA_FILECOMMON_PRIV_H
+#define M4OSA_FILECOMMON_PRIV_H
+
+
+#include "M4OSA_FileCommon.h"
+#include <stdio.h>
+
+#define M4OSA_isAccessModeActived(compound_mode_access,elementary_mode_access)\
+ (((compound_mode_access)&(elementary_mode_access))? 1:0)
+
+
+typedef enum M4OSA_LastSeek
+{
+ SeekNone,
+ SeekRead,
+ SeekWrite
+} M4OSA_LastSeek;
+
+/** This structure defines the file context*/
+typedef struct {
+ M4OSA_UInt32 coreID_read;
+ M4OSA_UInt32 coreID_write;
+ FILE* file_desc;
+ /** The name of the URL */
+ M4OSA_Char* url_name;
+ /** The name of the file */
+ M4OSA_Char* file_name;
+ /** The size in bytes of the file */
+ M4OSA_FilePosition file_size;
+ /** The file mode access used to open the file */
+ M4OSA_FileModeAccess access_mode;
+ M4OSA_LastSeek current_seek;
+ M4OSA_FilePosition read_position;
+ M4OSA_FilePosition write_position;
+ M4OSA_Bool b_is_end_of_file;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_Context semaphore_context;
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+
+ /* These two variables were added to manage case where a file
+ * is opened in read and write mode with one descriptor */
+ M4OSA_DescrModeAccess m_DescrModeAccess;
+ M4OSA_UInt32 m_uiLockMode;
+
+
+} M4OSA_FileContext;
+
+
+
+M4OSA_ERR M4OSA_fileCommonOpen(M4OSA_UInt16 core_id,
+ M4OSA_Context* context,
+ M4OSA_Char* URL,
+ M4OSA_FileModeAccess fileModeAccess);
+
+M4OSA_ERR M4OSA_fileCommonClose(M4OSA_UInt16 core_id,
+ M4OSA_Context context);
+
+M4OSA_ERR M4OSA_fileCommonGetAttribute(M4OSA_Context context,
+ M4OSA_FileAttribute* attribute);
+
+M4OSA_ERR M4OSA_fileCommonGetURL(M4OSA_Context context,
+ M4OSA_Char** url);
+
+M4OSA_ERR M4OSA_fileCommonGetFilename(M4OSA_Char* url,
+ M4OSA_Char** filename);
+
+M4OSA_ERR M4OSA_fileCommonSeek(M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position);
+
+#ifdef UTF_CONVERSION
+M4OSA_ERR M4OSA_ToUTF8_OSAL (M4OSA_Void *pBufferIn,
+ M4OSA_UInt8 *pBufferOut,
+ M4OSA_UInt32 *bufferOutSize);
+#endif /*UTF_CONVERSION*/
+
+
+#endif /*M4OSA_FILECOMMON_PRIV_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileReader.h b/libvideoeditor/osal/inc/M4OSA_FileReader.h
new file mode 100755
index 0000000..c22756d
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileReader.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileReader.h
+ * @ingroup OSAL
+ * @brief File reader
+ * @note This file declares functions and types to read a file.
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_FILEREADER_H
+#define M4OSA_FILEREADER_H
+
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileCommon.h"
+#include "M4OSA_Memory.h"
+
+
+
+/** This enum defines the option ID to be used in M4OSA_FileReadGetOption()
+ and M4OSA_FileReadSetOption()*/
+typedef enum M4OSA_FileReadOptionID
+{
+ /** Get the file size (M4OSA_fpos*)*/
+ M4OSA_kFileReadGetFileSize
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x01),
+
+ /** Get the file attributes (M4OSA_FileAttribute*)*/
+ M4OSA_kFileReadGetFileAttribute
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x02),
+
+ /** Get the file URL, provided by the M4OSA_FileReadOpen (M4OSA_Char*)*/
+ M4OSA_kFileReadGetURL
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x03),
+
+ /** Get the file position (M4OSA_fpos*)*/
+ M4OSA_kFileReadGetFilePosition
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x04),
+
+ /** Check end of file: TRUE if the EOF has been reached, FALSE else
+ (M4OSA_Bool*)*/
+ M4OSA_kFileReadIsEOF
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_READER, 0x05),
+
+ /** Check lock of file */
+ M4OSA_kFileReadLockMode
+ = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_READER, 0x06)
+
+} M4OSA_FileReadOptionID;
+
+
+
+
+
+/** This structure stores the set of the function pointer to access to a
+ file in read mode*/
+typedef struct
+{
+ M4OSA_ERR (*openRead) (M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess);
+
+ M4OSA_ERR (*readData) (M4OSA_Context context,
+ M4OSA_MemAddr8 buffer,
+ M4OSA_UInt32* size);
+
+ M4OSA_ERR (*seek) (M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position);
+
+ M4OSA_ERR (*closeRead) (M4OSA_Context context);
+
+ M4OSA_ERR (*setOption) (M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption optionValue);
+
+ M4OSA_ERR (*getOption) (M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption *optionValue);
+} M4OSA_FileReadPointer;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadOpen (M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadData (M4OSA_Context context,
+ M4OSA_MemAddr8 buffer,
+ M4OSA_UInt32* size);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadSeek (M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadClose (M4OSA_Context context);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadGetOption (M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption *optionValue);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileReadSetOption (M4OSA_Context context,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption optionValue);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*M4OSA_FILEREADER_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h b/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h
new file mode 100755
index 0000000..327b086
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileReader_priv.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileReader_priv.h
+ * @ingroup OSAL
+ * @brief File reader private for Android
+ * @note
+ ************************************************************************
+*/
+
+#ifndef M4OSA_FILEREADER_PRIV_H
+#define M4OSA_FILEREADER_PRIV_H
+
+
+/** Those define enable/disable option ID*/
+
+#define M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_READ_GET_URL M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_READ_IS_EOF M4OSA_TRUE
+
+#endif /*M4OSA_FILEREADER_PRIV_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileWriter.h b/libvideoeditor/osal/inc/M4OSA_FileWriter.h
new file mode 100755
index 0000000..9a11331
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileWriter.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileWriter.h
+ * @ingroup OSAL
+ * @brief File writer
+ * @note This file declares functions and types to write in a file.
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_FILEWRITER_H
+#define M4OSA_FILEWRITER_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileCommon.h"
+#include "M4OSA_Memory.h"
+
+
+/** This enum defines the option ID to be used in M4OSA_FileWriteGetOption()
+and M4OSA_FileWriteSetOption()*/
+typedef enum
+{
+ /** Get the file URL, provided by the M4OSA_FileWriteOpen (M4OSA_Char*)*/
+ M4OSA_kFileWriteGetURL
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x01),
+
+ /** Get the file attributes (M4OSA_FileAttribute*)*/
+ M4OSA_kFileWriteGetAttribute
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x02),
+
+ /** Get the reader context for read & write file. (M4OSA_Context*)*/
+ M4OSA_kFileWriteGetReaderContext
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x03),
+
+ M4OSA_kFileWriteGetFilePosition
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x04),
+
+ M4OSA_kFileWriteGetFileSize
+ = M4OSA_OPTION_ID_CREATE(M4_READ, M4OSA_FILE_WRITER, 0x05),
+
+
+ M4OSA_kFileWriteLockMode
+ = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_WRITER, 0x06),
+
+
+ /** Check lock of file */
+ M4OSA_kFileWriteDescMode
+ = M4OSA_OPTION_ID_CREATE(M4_READWRITE, M4OSA_FILE_WRITER, 0x07)
+} M4OSA_FileWriteOptionID;
+
+
+/** This structure stores the set of the function pointer to access to a file
+ in read mode*/
+typedef struct
+{
+ M4OSA_ERR (*openWrite) (M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess);
+
+ M4OSA_ERR (*writeData) (M4OSA_Context context,
+ M4OSA_MemAddr8 data,
+ M4OSA_UInt32 size);
+
+ M4OSA_ERR (*seek) (M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position);
+
+ M4OSA_ERR (*Flush) (M4OSA_Context context);
+ M4OSA_ERR (*closeWrite) (M4OSA_Context context);
+ M4OSA_ERR (*setOption) (M4OSA_Context context,
+ M4OSA_OptionID optionID,
+ M4OSA_DataOption optionValue);
+
+ M4OSA_ERR (*getOption) (M4OSA_Context context,
+ M4OSA_OptionID optionID,
+ M4OSA_DataOption* optionValue);
+} M4OSA_FileWriterPointer;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteOpen (M4OSA_Context* context,
+ M4OSA_Void* fileDescriptor,
+ M4OSA_UInt32 fileModeAccess);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteData (M4OSA_Context context,
+ M4OSA_MemAddr8 data,
+ M4OSA_UInt32 size);
+
+/* Pierre Lebeaupin 2008/04/29: WARNING! the feature of file*Seek which returns
+the position in the file (from the beginning) after the seek in the "position"
+pointer has been found to be unreliably (or sometimes not at all) implemented
+in some OSALs, so relying on it is strongly discouraged, unless you really want
+to have a pizza evening. */
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteSeek (M4OSA_Context context,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* position);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteClose (M4OSA_Context context);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteFlush (M4OSA_Context context);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteGetOption (M4OSA_Context context,
+ M4OSA_OptionID optionID,
+ M4OSA_DataOption* optionValue);
+
+M4OSAL_FILE_EXPORT_TYPE M4OSA_ERR M4OSA_fileWriteSetOption (M4OSA_Context context,
+ M4OSA_OptionID optionID,
+ M4OSA_DataOption optionValue);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /*M4OSA_FILEWRITER_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h b/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h
new file mode 100755
index 0000000..9d972f4
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_FileWriter_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileWriter_priv.h
+ * @ingroup OSAL
+ * @brief File writer private for Android
+************************************************************************
+*/
+
+#ifndef M4OSA_FILEWRITER_PRIV_H
+#define M4OSA_FILEWRITER_PRIV_H
+
+
+/** Those define enable/disable option ID*/
+
+#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION M4OSA_TRUE
+#define M4OSA_OPTIONID_FILE_WRITE_GET_URL M4OSA_TRUE
+
+#endif /*M4OSA_FILEWRITER_PRIV_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Memory.h b/libvideoeditor/osal/inc/M4OSA_Memory.h
new file mode 100755
index 0000000..a4d15cc
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Memory.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Memory.h
+ * @ingroup OSAL
+ * @brief Memory allocation
+ * @note This file defines function prototypes to allocate
+ * and free memory.
+ ************************************************************************
+*/
+
+#ifndef M4OSA_MEMORY_H
+#define M4OSA_MEMORY_H
+
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h" /*for M4OSA_CoreID definition*/
+
+typedef M4OSA_Int32* M4OSA_MemAddr32;
+typedef M4OSA_Int8* M4OSA_MemAddr8;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc (M4OSA_UInt32 size,
+ M4OSA_CoreID coreID,
+ M4OSA_Char* string);
+
+M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_ERR M4OSA_randInit(void);
+
+
+M4OSAL_MEMORY_EXPORT_TYPE extern M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value,
+ M4OSA_UInt32 max_value);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Mutex.h b/libvideoeditor/osal/inc/M4OSA_Mutex.h
new file mode 100755
index 0000000..d496bdd
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Mutex.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Mutex.h
+ * @ingroup OSAL
+ * @brief mutex API
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_MUTEX_H
+#define M4OSA_MUTEX_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+
+#endif
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexOpen( M4OSA_Context* context );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexLock( M4OSA_Context context,
+ M4OSA_UInt32 timeout );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexUnlock( M4OSA_Context context );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_mutexClose( M4OSA_Context context );
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /*M4OSA_MUTEX_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_OptionID.h b/libvideoeditor/osal/inc/M4OSA_OptionID.h
new file mode 100755
index 0000000..61b9044
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_OptionID.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_OptionID.h
+ * @ingroup OSAL
+ * @brief Option ID macros
+ * @note This file defines macros to generate and analyze option ID.
+ * Option ID is used by M4YYY_ZZsetOption() and
+ * M4YYY_ZZgetOption() functions.
+ ************************************************************************
+*/
+
+#ifndef M4OSA_OPTIONID_H
+#define M4OSA_OPTIONID_H
+
+
+#include "M4OSA_Types.h"
+
+/** M4OSA_OptionID is a 32 bits unsigned integer.
+- Right access (2 bits): Some options may have read only, write only or read
+ and write access
+- Core ID (14 bits): It is a unique ID for each core component
+- SubOption ID (16 bits): To select which option in a specific core component
+*/
+typedef M4OSA_UInt32 M4OSA_OptionID;
+typedef void* M4OSA_DataOption;
+
+#define M4_READ 0x01
+#define M4_WRITE 0x02
+#define M4_READWRITE 0x03
+
+/* Macro to process M4OSA_OptionID */
+
+/** This macro creates an optionID given read/write access,
+ coreID and SubOptionID*/
+#define M4OSA_OPTION_ID_CREATE(right, coreID, errorID)\
+ (M4OSA_Int32)((((((M4OSA_UInt32)right)&0x03)<<30))+((((M4OSA_UInt32)coreID)&0x003FFF)<<16)+(((M4OSA_UInt32)errorID)&0x00FFFF))
+
+/** This macro splits an optionID into read/write access,
+ coreID and SubOptionID*/
+#define M4OSA_OPTION_ID_SPLIT(optionID, right, coreID, errorID)\
+ { right=(M4OSA_UInt8)((optionID)>>30);\
+ coreID=(M4OSA_UInt16)(((optionID)>>16)&0x00003FFF);\
+ errorID=(M4OSA_UInt32)((optionID)&0x0000FFFF); }
+
+/** This macro returns 1 if the optionID is writable, 0 otherwise*/
+#define M4OSA_OPTION_ID_IS_WRITABLE(optionID) ((((optionID)>>30)&M4_WRITE)!=0)
+
+/** This macro returns 1 if the optionID is readable, 0 otherwise*/
+#define M4OSA_OPTION_ID_IS_READABLE(optionID) ((((optionID)>>30)&M4_READ)!=0)
+
+/** This macro returns 1 if the optionID has its core ID equal to 'coreID', 0 otherwise*/
+#define M4OSA_OPTION_ID_IS_COREID(optionID, coreID)\
+ (((((optionID)>>16)&0x003FFF) == (coreID)) ? M4OSA_TRUE:M4OSA_FALSE)
+
+
+#endif /*M4OSA_OPTIONID_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Semaphore.h b/libvideoeditor/osal/inc/M4OSA_Semaphore.h
new file mode 100755
index 0000000..2630454
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Semaphore.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Semaphore.h
+ * @ingroup OSAL
+ * @brief semaphore API
+ ************************************************************************
+*/
+
+#ifndef M4OSA_SEMAPHORE_H
+#define M4OSA_SEMAPHORE_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreOpen( M4OSA_Context* context,
+ M4OSA_UInt32 initialNumber );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphorePost( M4OSA_Context context );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreWait( M4OSA_Context context,
+ M4OSA_Int32 timeout );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_semaphoreClose( M4OSA_Context context );
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /*M4OSA_SEMAPHORE_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Thread.h b/libvideoeditor/osal/inc/M4OSA_Thread.h
new file mode 100755
index 0000000..ca96afb
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Thread.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Thread.h
+ * @ingroup OSAL
+ * @brief thread API
+ ************************************************************************
+*/
+
+
+#ifndef M4OSA_THREAD_H
+#define M4OSA_THREAD_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+
+
+/* Definition of common error codes */
+#define M4ERR_THREAD_NOT_STARTED M4OSA_ERR_CREATE(M4_ERR,M4OSA_THREAD,0x000001)
+
+
+typedef enum
+{
+ M4OSA_kThreadOpened = 0x100,
+ M4OSA_kThreadStarting = 0x200,
+ M4OSA_kThreadRunning = 0x300,
+ M4OSA_kThreadStopping = 0x400,
+ M4OSA_kThreadClosed = 0x500
+} M4OSA_ThreadState;
+
+
+
+typedef enum
+{
+ M4OSA_kThreadHighestPriority = 0x000,
+ M4OSA_kThreadHighPriority = 0x100,
+ M4OSA_kThreadNormalPriority = 0x200,
+ M4OSA_kThreadLowPriority = 0x300,
+ M4OSA_kThreadLowestPriority = 0x400
+} M4OSA_ThreadPriorityLevel;
+
+
+
+typedef enum
+{
+ M4OSA_ThreadStarted
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x01),
+
+ M4OSA_ThreadStopped
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x02),
+
+ M4OSA_ThreadPriority
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x03),
+
+ M4OSA_ThreadName
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x04),
+
+ M4OSA_ThreadStackSize
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x05),
+
+ M4OSA_ThreadUserData
+ = M4OSA_OPTION_ID_CREATE(M4_READ|M4_WRITE, M4OSA_THREAD, 0x06)
+
+} M4OSA_ThreadOptionID;
+
+
+
+typedef M4OSA_ERR (*M4OSA_ThreadDoIt)(M4OSA_Void*);
+typedef M4OSA_Void (*M4OSA_ThreadCallBack)(M4OSA_Context, M4OSA_Void*);
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncOpen( M4OSA_Context* context,
+ M4OSA_ThreadDoIt func );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncStart( M4OSA_Context context,
+ M4OSA_Void* param );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncStop( M4OSA_Context context );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncClose( M4OSA_Context context );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncGetState( M4OSA_Context context,
+ M4OSA_ThreadState* state );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSleep( M4OSA_UInt32 time );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncSetOption(M4OSA_Context context,
+ M4OSA_ThreadOptionID option,
+ M4OSA_DataOption value );
+
+
+M4OSAL_REALTIME_EXPORT_TYPE M4OSA_ERR M4OSA_threadSyncGetOption(M4OSA_Context context,
+ M4OSA_ThreadOptionID option,
+ M4OSA_DataOption* value );
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /*M4OSA_THREAD_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Thread_priv.h b/libvideoeditor/osal/inc/M4OSA_Thread_priv.h
new file mode 100755
index 0000000..b424b05
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Thread_priv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Thread_priv.h
+ * @ingroup OSAL
+ * @brief Thread private for Android
+ * @note
+ ************************************************************************
+*/
+
+#ifndef M4OSA_THREAD_PRIV_H
+#define M4OSA_THREAD_PRIV_H
+
+
+#include "M4OSA_Types.h"
+
+
+/* Context for the thread */
+typedef struct M4OSA_ThreadContext {
+ M4OSA_UInt32 coreID; /* thread context identifiant */
+ pthread_t threadID; /* thread identifier. */
+ M4OSA_Char* name; /* thread name */
+ M4OSA_UInt32 stackSize; /* thread stackSize in bytes */
+ M4OSA_ThreadDoIt func; /* thread function */
+ M4OSA_Void* param; /* thread parameter */
+/*
+ M4OSA_Void* userData; / * thread user data * /
+*/
+ M4OSA_ThreadState state; /* thread automaton state */
+ M4OSA_Context stateMutex; /* mutex for thread state management */
+/*
+ M4OSA_ThreadCallBack startCallBack; / * starting thread call back * /
+ M4OSA_ThreadCallBack stopCallBack; / * stopping thread call back * /
+*/
+ M4OSA_Context semStartStop; /* semaphore for start and stop do_it */
+ M4OSA_ThreadPriorityLevel priority; /* thread priority level */
+} M4OSA_ThreadContext ;
+
+
+/** Those define enable/disable option ID*/
+#define M4OSA_OPTIONID_THREAD_STARTED M4OSA_TRUE
+#define M4OSA_OPTIONID_THREAD_STOPPED M4OSA_TRUE
+#define M4OSA_OPTIONID_THREAD_PRIORITY M4OSA_TRUE
+#define M4OSA_OPTIONID_THREAD_STACK_SIZE M4OSA_TRUE
+#define M4OSA_OPTIONID_THREAD_NAME M4OSA_TRUE
+#define M4OSA_OPTIONID_THREAD_USER_DATA M4OSA_TRUE
+
+#endif /*M4OSA_THREAD_PRIV_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Time.h b/libvideoeditor/osal/inc/M4OSA_Time.h
new file mode 100755
index 0000000..21f25ed
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Time.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Time.h
+ * @ingroup OSAL
+ * @brief Time macros
+ * @note This file defines time type and associated macros which must
+ * be used to manipulate time.
+ ************************************************************************
+*/
+
+/* $Id: M4OSA_Time.h,v 1.2 2007/01/05 13:12:22 thenault Exp $ */
+
+#ifndef M4OSA_TIME_H
+#define M4OSA_TIME_H
+
+
+#include "M4OSA_Types.h"
+
+
+typedef signed long long M4OSA_Time;
+
+
+/** This macro sets the unknown time value */
+
+#define M4OSA_TIME_UNKNOWN 0x80000000
+
+/** This macro converts a time with a time scale to millisecond.
+ The result is a M4OSA_Double*/
+#define M4OSA_TIME_TO_MS(result, time, timescale)\
+ { result = (1000*(M4OSA_Double)time)/((M4OSA_Double)timescale); }
+
+#endif /*M4OSA_TIME_H*/
+
diff --git a/libvideoeditor/osal/inc/M4OSA_Types.h b/libvideoeditor/osal/inc/M4OSA_Types.h
new file mode 100755
index 0000000..92a68d8
--- /dev/null
+++ b/libvideoeditor/osal/inc/M4OSA_Types.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Types.h
+ * @ingroup OSAL
+ * @brief Abstraction types for Android
+ * @note This file redefines basic types which must be
+ * used to declare any variable.
+************************************************************************
+*/
+
+
+#ifndef M4OSA_TYPES_H
+#define M4OSA_TYPES_H
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+#include "M4OSA_Export.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef signed char M4OSA_Bool;
+typedef unsigned char M4OSA_UInt8;
+typedef signed char M4OSA_Int8;
+typedef unsigned short M4OSA_UInt16;
+typedef signed short M4OSA_Int16;
+typedef unsigned long M4OSA_UInt32;
+typedef signed long M4OSA_Int32;
+
+typedef signed char M4OSA_Char;
+typedef unsigned char M4OSA_UChar;
+
+typedef double M4OSA_Double;
+typedef float M4OSA_Float;
+
+typedef unsigned char M4OSA_WChar;
+
+typedef void M4OSA_Void;
+
+/* Min & max definitions*/
+#define M4OSA_UINT8_MIN 0
+#define M4OSA_UINT8_MAX 255
+
+#define M4OSA_UINT16_MIN 0
+#define M4OSA_UINT16_MAX 65535
+
+#define M4OSA_UINT32_MIN 0
+#define M4OSA_UINT32_MAX 0xFFFFFFFF
+
+#define M4OSA_INT8_MIN -128
+#define M4OSA_INT8_MAX 127
+
+#define M4OSA_INT16_MIN -32768
+#define M4OSA_INT16_MAX 32767
+
+#define M4OSA_INT32_MIN (-0x7FFFFFFF-1)
+#define M4OSA_INT32_MAX 0x7FFFFFFF
+
+#define M4OSA_CHAR_MIN -128
+#define M4OSA_CHAR_MAX 127
+
+#define M4OSA_UCHAR_MIN 0
+#define M4OSA_UCHAR_MAX 255
+
+#define M4OSA_NULL 0x00
+#define M4OSA_TRUE 0x01
+#define M4OSA_FALSE 0x00
+#define M4OSA_WAIT_FOREVER 0xffffffff
+
+#define M4OSA_CONST const
+#define M4OSA_INLINE inline
+
+/* Rollover offset of the clock */
+/* This value must be the one of M4OSA_clockGetTime */
+#define M4OSA_CLOCK_ROLLOVER M4OSA_INT32_MAX
+
+typedef void* M4OSA_Context;
+
+/** It is a unique ID for each core component*/
+typedef M4OSA_UInt16 M4OSA_CoreID;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*M4OSA_TYPES_H*/
+
diff --git a/libvideoeditor/osal/src/Android.mk b/libvideoeditor/osal/src/Android.mk
new file mode 100755
index 0000000..5415e96
--- /dev/null
+++ b/libvideoeditor/osal/src/Android.mk
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideoeditor_osal
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_osal
+
+LOCAL_SRC_FILES:= \
+ M4OSA_CharStar.c \
+ M4OSA_Clock.c \
+ M4OSA_FileCommon.c \
+ M4OSA_FileReader.c \
+ M4OSA_FileWriter.c \
+ M4OSA_Mutex.c \
+ M4OSA_Random.c \
+ M4OSA_Semaphore.c \
+ M4OSA_Thread.c \
+ M4PSW_DebugTrace.c \
+ M4PSW_MemoryInterface.c \
+ M4PSW_Trace.c \
+ LVOSA_FileReader_optim.c
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+ -D__ANDROID__ \
+ -DM4OSA_FILE_BLOCK_WITH_SEMAPHORE \
+ -DUSE_STAGEFRIGHT_CODECS \
+ -DUSE_STAGEFRIGHT_AUDIODEC \
+ -DUSE_STAGEFRIGHT_VIDEODEC \
+ -DUSE_STAGEFRIGHT_AUDIOENC \
+ -DUSE_STAGEFRIGHT_VIDEOENC \
+ -DUSE_STAGEFRIGHT_READERS \
+ -DUSE_STAGEFRIGHT_3GPP_READER
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/osal/src/LVOSA_FileReader_optim.c b/libvideoeditor/osal/src/LVOSA_FileReader_optim.c
new file mode 100755
index 0000000..e11e008
--- /dev/null
+++ b/libvideoeditor/osal/src/LVOSA_FileReader_optim.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4OSA_FileReader_optim.c
+ * @brief
+ * @note This file implements functions to manipulate filesystem access
+ ******************************************************************************
+*/
+
+/** Addition of Trace ID **/
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Error.h"
+
+#ifdef M4TRACE_ID
+#undef M4TRACE_ID
+#endif
+#define M4TRACE_ID M4OSA_FILE_READER
+
+
+#include "M4OSA_FileCommon.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+
+#include "LVOSA_FileReader_optim.h"
+
+#define M4OSA_READER_OPTIM_USE_OSAL_IF
+#ifndef M4OSA_READER_OPTIM_USE_OSAL_IF
+ #include "M4OSA_FileAccess.h"
+#endif
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL) return (retval);
+
+
+
+
+/**
+ ******************************************************************************
+ * File reader cache buffers parameters (size, number of buffers, etc)
+ ******************************************************************************
+*/
+#define M4OSA_READBUFFER_SIZE 1024*16
+#define M4OSA_READBUFFER_NB 2
+#define M4OSA_READBUFFER_NONE -1
+#define M4OSA_EOF -1
+
+#define MAX_FILLS_SINCE_LAST_ACCESS M4OSA_READBUFFER_NB*2
+
+/**
+ ******************************************************************************
+ * structure M4OSA_FileReader_Buffer
+ * @brief This structure defines the File reader Buffers context (private)
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 data; /**< buffer data */
+ M4OSA_FilePosition size; /**< size of the buffer */
+ M4OSA_FilePosition filepos; /**< position in the file where the buffer starts */
+ M4OSA_FilePosition remain; /**< data amount not already copied from buffer */
+ M4OSA_UInt32 nbFillSinceLastAcess; /**< To know since how many time we didn't use this buffer */
+} M4OSA_FileReader_Buffer_optim;
+
+/**
+ ******************************************************************************
+ * structure M4OSA_FileReader_Context
+ * @brief This structure defines the File reader context (private)
+ * @note This structure is used for all File Reader calls to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Bool IsOpened; /**< Micro state machine */
+ M4OSA_FileAttribute FileAttribute; /**< Opening mode */
+ M4OSA_FilePosition readFilePos; /**< Effective position of the GFL read pointer */
+ M4OSA_FilePosition absolutePos; /**< Virtual position for next reading */
+ M4OSA_FilePosition fileSize; /**< Size of the file */
+
+ M4OSA_FileReader_Buffer_optim buffer[M4OSA_READBUFFER_NB]; /**< Read buffers */
+
+ M4OSA_Void* aFileDesc; /**< File descriptor */
+
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_FileReadPointer* FS; /**< Filesystem interface */
+#else
+ M4OSA_FileSystem_FctPtr *FS; /**< Filesystem interface */
+#endif
+
+} M4OSA_FileReader_Context_optim;
+
+/* __________________________________________________________ */
+/*| |*/
+/*| Global function for handling low level read access |*/
+/*|__________________________________________________________|*/
+
+static M4OSA_FileReadPointer* gv_NXPSW_READOPT_lowLevelFunctions;
+
+M4OSA_ERR NXPSW_FileReaderOptim_init(M4OSA_Void *lowLevel_functionPointers, M4OSA_Void *optimized_functionPointers)
+{
+ M4OSA_FileReadPointer* lowLevel_fp = (M4OSA_FileReadPointer*) lowLevel_functionPointers;
+ M4OSA_FileReadPointer* optimized_fp = (M4OSA_FileReadPointer*) optimized_functionPointers;
+
+ //Set the optimized functions, to be called by the user
+ optimized_fp->openRead = M4OSA_fileReadOpen_optim;
+ optimized_fp->readData = M4OSA_fileReadData_optim;
+ optimized_fp->seek = M4OSA_fileReadSeek_optim;
+ optimized_fp->closeRead = M4OSA_fileReadClose_optim;
+ optimized_fp->setOption = M4OSA_fileReadSetOption_optim;
+ optimized_fp->getOption = M4OSA_fileReadGetOption_optim;
+
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR NXPSW_FileReaderOptim_cleanUp()
+{
+
+ gv_NXPSW_READOPT_lowLevelFunctions = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+
+M4OSA_ERR NXPSW_FileReaderOptim_getLowLevelFunctions(M4OSA_Void **FS)
+{
+ M4OSA_FileReadPointer** pFunctionsPointer = (M4OSA_FileReadPointer**) FS;
+ *pFunctionsPointer = gv_NXPSW_READOPT_lowLevelFunctions;
+ return M4NO_ERROR;
+}
+
+
+/* __________________________________________________________ */
+/*| |*/
+/*| Buffer handling functions for Read access |*/
+/*|__________________________________________________________|*/
+
+/**************************************************************/
+M4OSA_ERR M4OSA_FileReader_BufferInit(M4OSA_FileReader_Context_optim* apContext)
+/**************************************************************/
+{
+ M4OSA_UInt8 i;
+
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ apContext->buffer[i].data = M4OSA_NULL;
+ apContext->buffer[i].size = 0;
+ apContext->buffer[i].filepos = 0;
+ apContext->buffer[i].remain = 0;
+ }
+
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ apContext->buffer[i].data = (M4OSA_MemAddr8) M4OSA_32bitAlignedMalloc(M4OSA_READBUFFER_SIZE,
+ M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReader_BufferInit");
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, apContext->buffer[i].data);
+ }
+
+ return M4NO_ERROR;
+}
+
+/**************************************************************/
+M4OSA_Void M4OSA_FileReader_BufferFree(M4OSA_FileReader_Context_optim* apContext)
+/**************************************************************/
+{
+ M4OSA_Int8 i;
+
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ if(apContext->buffer[i].data != M4OSA_NULL)
+ free(apContext->buffer[i].data);
+}
+
+/**************************************************************/
+M4OSA_FilePosition M4OSA_FileReader_BufferCopy(M4OSA_FileReader_Context_optim* apContext,
+ M4OSA_Int8 i, M4OSA_FilePosition pos,
+ M4OSA_FilePosition size, M4OSA_MemAddr8 pData)
+/**************************************************************/
+{
+ M4OSA_FilePosition copysize;
+ M4OSA_FilePosition offset;
+
+ if(apContext->buffer[i].size == M4OSA_EOF) return M4OSA_EOF;
+
+ if( (pos < apContext->buffer[i].filepos)
+ || (pos > (apContext->buffer[i].filepos + apContext->buffer[i].size - 1)) )
+ {
+ return 0; /* nothing copied */
+ }
+
+ offset = pos - apContext->buffer[i].filepos;
+
+ copysize = apContext->buffer[i].size - offset;
+ copysize = (size < copysize) ? size : copysize;
+
+ memcpy((void *)pData, (void *)(apContext->buffer[i].data + offset), copysize);
+
+ apContext->buffer[i].remain -= copysize;
+ apContext->buffer[i].nbFillSinceLastAcess = 0;
+
+ return copysize;
+}
+
+/**************************************************************/
+M4OSA_ERR M4OSA_FileReader_BufferFill(M4OSA_FileReader_Context_optim* apContext,
+ M4OSA_Int8 i, M4OSA_FilePosition pos)
+/**************************************************************/
+{
+ M4OSA_FilePosition gridPos;
+ M4OSA_FilePosition tempPos;
+ M4OSA_UInt32 bufferSize;
+ M4OSA_FilePosition diff;
+ M4OSA_FilePosition size;
+ M4OSA_ERR err = M4NO_ERROR;
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR errno = M4NO_ERROR;
+ M4OSA_UInt32 fileReadSize = 0;
+ M4OSA_FilePosition fileSeekPosition = 0;
+#else
+ M4OSA_Int32 ret_val;
+ M4OSA_UInt16 errno;
+#endif
+
+ M4OSA_TRACE3_4("BufferFill i = %d pos = %ld read = %ld old = %ld", i, pos,
+ apContext->readFilePos, apContext->buffer[i].filepos);
+
+ /* Avoid cycling statement because of EOF */
+ if(pos >= apContext->fileSize)
+ return M4WAR_NO_MORE_AU;
+
+ /* Relocate to absolute postion if necessary */
+ bufferSize = M4OSA_READBUFFER_SIZE;
+ tempPos = (M4OSA_FilePosition) (pos / bufferSize);
+ gridPos = tempPos * M4OSA_READBUFFER_SIZE;
+ diff = gridPos - apContext->readFilePos;
+
+ if(diff != 0)
+ {
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ fileSeekPosition = diff;
+ errno = apContext->FS->seek(apContext->aFileDesc, M4OSA_kFileSeekCurrent,
+ &fileSeekPosition);
+ apContext->readFilePos = gridPos;
+
+ if(M4NO_ERROR != errno)
+ {
+ err = errno;
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR1 = 0x%x", err);
+ return err;
+ }
+
+#else
+ ret_val = apContext->FS->pFctPtr_Seek(apContext->aFileDesc, diff,
+ M4OSA_kFileSeekCurrent, &errno);
+ apContext->readFilePos = gridPos;
+
+ if(ret_val != 0)
+ {
+ err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR1 = 0x%x", err);
+ return err;
+ }
+#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
+ }
+
+ apContext->buffer[i].filepos = apContext->readFilePos;
+
+ /* Read Data */
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ fileReadSize = M4OSA_READBUFFER_SIZE;
+ errno = apContext->FS->readData(apContext->aFileDesc,
+ (M4OSA_MemAddr8)apContext->buffer[i].data, &fileReadSize);
+
+ size = (M4OSA_FilePosition)fileReadSize;
+ if ((M4NO_ERROR != errno)&&(M4WAR_NO_DATA_YET != errno))
+ {
+ apContext->buffer[i].size = M4OSA_EOF;
+ apContext->buffer[i].remain = 0;
+
+ err = errno;
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR2 = 0x%x", err);
+ return err;
+ }
+#else
+ size = apContext->FS->pFctPtr_Read(apContext->aFileDesc,
+ (M4OSA_UInt8 *)apContext->buffer[i].data, M4OSA_READBUFFER_SIZE, &errno);
+ if(size == -1)
+ {
+ apContext->buffer[i].size = M4OSA_EOF;
+ apContext->buffer[i].remain = 0;
+
+ err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR2 = 0x%x", err);
+ return err;
+ }
+#endif
+
+ apContext->buffer[i].size = size;
+ apContext->buffer[i].remain = size;
+ apContext->buffer[i].nbFillSinceLastAcess = 0;
+
+ /* Retrieve current position */
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ errno = apContext->FS->getOption(apContext->aFileDesc,
+ M4OSA_kFileReadGetFilePosition,
+ (M4OSA_DataOption*) &apContext->readFilePos);
+
+ if (M4NO_ERROR != errno)
+ {
+ err = errno;
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR3 = 0x%x", err);
+ }
+ else if( (apContext->buffer[i].size >= 0)
+ && (apContext->buffer[i].size < M4OSA_READBUFFER_SIZE) )
+ {
+ err = M4WAR_NO_DATA_YET;
+ M4OSA_TRACE2_0("M4OSA_FileReader_BufferFill returns NO DATA YET");
+ return err;
+ }
+#else
+ apContext->readFilePos = apContext->FS->pFctPtr_Tell(apContext->aFileDesc, &errno);
+
+ if( (apContext->buffer[i].size >= 0)
+ && (apContext->buffer[i].size < M4OSA_READBUFFER_SIZE) )
+ {
+ err = M4WAR_NO_DATA_YET;
+ M4OSA_TRACE1_1("M4OSA_FileReader_BufferFill ERR3 = 0x%x", err);
+ return err;
+ }
+#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
+
+ /* Return without error */
+ return M4NO_ERROR;
+}
+
+/**************************************************************/
+M4OSA_Int8 M4OSA_FileReader_BufferMatch(M4OSA_FileReader_Context_optim* apContext,
+ M4OSA_FilePosition pos)
+/**************************************************************/
+{
+ M4OSA_Int8 i;
+
+
+ /* Select the buffer which matches with given pos */
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ if( (pos >= apContext->buffer[i].filepos)
+ && (pos < (apContext->buffer[i].filepos + apContext->buffer[i].size)) )
+ {
+ return i;
+ }
+ }
+ return M4OSA_READBUFFER_NONE;
+}
+
+/**************************************************************/
+M4OSA_Int8 M4OSA_FileReader_BufferSelect(M4OSA_FileReader_Context_optim* apContext,
+ M4OSA_Int8 current_i)
+/**************************************************************/
+{
+ M4OSA_Int8 i,j;
+ M4OSA_FilePosition min_amount,max_amount;
+ M4OSA_Int8 min_i,max_count;
+
+ /* update nbFillSinceLastAcess field */
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ apContext->buffer[i].nbFillSinceLastAcess ++;
+ }
+
+ /* Plan A : Scan for empty buffer */
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ if(apContext->buffer[i].remain == 0)
+ {
+ return i;
+ }
+ }
+
+ max_count = M4OSA_READBUFFER_NB;
+ max_amount = MAX_FILLS_SINCE_LAST_ACCESS;
+
+ /* Plan B : Scan for dead buffer */
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ if(apContext->buffer[i].nbFillSinceLastAcess >= (M4OSA_UInt32) max_amount)
+ {
+ max_amount = apContext->buffer[i].nbFillSinceLastAcess;
+ max_count = i;
+ }
+ }
+ if(max_count<M4OSA_READBUFFER_NB)
+ {
+ M4OSA_TRACE2_2("DEAD BUFFER: %d, %d",max_count,apContext->buffer[max_count].nbFillSinceLastAcess);
+ return max_count;
+ }
+
+ min_i = current_i;
+ min_amount = M4OSA_READBUFFER_SIZE;
+
+ /* Select the buffer which is the most "empty" */
+ for(i=0; i<M4OSA_READBUFFER_NB; i++)
+ {
+ j = (i+current_i)%M4OSA_READBUFFER_NB;
+
+ if(apContext->buffer[j].remain < min_amount)
+ {
+ min_amount = apContext->buffer[j].remain;
+ min_i = j;
+ }
+ }
+
+ return min_i;
+
+}
+
+/**************************************************************/
+M4OSA_ERR M4OSA_FileReader_CalculateSize(M4OSA_FileReader_Context_optim* apContext)
+/**************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR errno = M4NO_ERROR;
+#else
+ M4OSA_Int32 ret_val;
+ M4OSA_UInt16 errno;
+#endif
+
+ /* go to the end of file*/
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ errno = apContext->FS->getOption(apContext->aFileDesc, M4OSA_kFileReadGetFileSize,
+ (M4OSA_DataOption*) &apContext->fileSize);
+ if (M4NO_ERROR != errno)
+ {
+ err = errno;
+ M4OSA_TRACE1_1("M4OSA_FileReader_CalculateSize ERR = 0x%x", err);
+ }
+#else
+ ret_val = apContext->FS->pFctPtr_Seek(apContext->aFileDesc, 0, M4OSA_kFileSeekEnd, &errno);
+
+ if (ret_val != 0)
+ {
+ apContext->readFilePos = M4OSA_EOF;
+ err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
+ M4OSA_TRACE1_1("M4OSA_FileReader_CalculateSize ERR = 0x%x", err);
+ }
+ else
+ {
+ /* Retrieve size of the file */
+ apContext->fileSize = apContext->FS->pFctPtr_Tell(apContext->aFileDesc, &errno);
+ apContext->readFilePos = apContext->fileSize;
+ }
+#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
+
+ return err;
+}
+
+
+/* __________________________________________________________ */
+/*| |*/
+/*| OSAL filesystem API |*/
+/*|__________________________________________________________|*/
+
+/**
+******************************************************************************
+* @brief This method opens the provided fileDescriptor and returns its context.
+* @param pContext: (OUT) File reader context.
+* @param pFileDescriptor : (IN) File Descriptor of the input file.
+* @param FileModeAccess : (IN) File mode access.
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER pContext or fileDescriptor is NULL
+* @return M4ERR_ALLOC there is no more memory available
+* @return M4ERR_FILE_BAD_MODE_ACCESS the file mode access is not correct (it must be either isTextMode or read)
+* @return M4ERR_FILE_NOT_FOUND The file can not be opened.
+******************************************************************************
+*/
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR M4OSA_fileReadOpen_optim(M4OSA_Context* pContext,
+ M4OSA_Void* pFileDescriptor,
+ M4OSA_UInt32 FileModeAccess)
+#else
+ M4OSA_ERR M4OSA_fileReadOpen_optim(M4OSA_Context* pContext,
+ M4OSA_Void* pFileDescriptor,
+ M4OSA_UInt32 FileModeAccess,
+ M4OSA_FileSystem_FctPtr *FS)
+#endif
+{
+ M4OSA_FileReader_Context_optim* apContext = M4OSA_NULL;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_Void* aFileDesc = M4OSA_NULL;
+ M4OSA_Bool buffers_allocated = M4OSA_FALSE;
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR errno = M4NO_ERROR;
+#else
+ M4OSA_UInt16 errno;
+#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
+
+ M4OSA_TRACE2_3("M4OSA_fileReadOpen_optim p = 0x%p fd = %s mode = %lu", pContext,
+ pFileDescriptor, FileModeAccess);
+
+ /* Check input parameters */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pFileDescriptor);
+
+ *pContext = M4OSA_NULL;
+
+ /* Allocate memory for the File reader context. */
+ apContext = (M4OSA_FileReader_Context_optim *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileReader_Context_optim),
+ M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReader_Context_optim");
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, apContext);
+
+ /* Set filesystem interface */
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+
+ /*Set the optimized functions, to be called by the user*/
+
+ apContext->FS = (M4OSA_FileReadPointer*) M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileReadPointer),
+ M4OSA_FILE_READER, (M4OSA_Char *)"M4OSA_FileReaderOptim_init");
+ if (M4OSA_NULL==apContext->FS)
+ {
+ M4OSA_TRACE1_0("M4OSA_FileReaderOptim_init - ERROR : allocation failed");
+ return M4ERR_ALLOC;
+ }
+ apContext->FS->openRead = M4OSA_fileReadOpen;
+ apContext->FS->readData = M4OSA_fileReadData;
+ apContext->FS->seek = M4OSA_fileReadSeek;
+ apContext->FS->closeRead = M4OSA_fileReadClose;
+ apContext->FS->setOption = M4OSA_fileReadSetOption;
+ apContext->FS->getOption = M4OSA_fileReadGetOption;
+#else
+ apContext->FS = FS;
+#endif
+
+ /* Verify access mode */
+ if ( ((FileModeAccess & M4OSA_kFileAppend) != 0)
+ || ((FileModeAccess & M4OSA_kFileRead) == 0))
+ {
+ err = M4ERR_FILE_BAD_MODE_ACCESS;
+ goto cleanup;
+ }
+
+ /* Open file in read mode */
+ if((FileModeAccess & M4OSA_kFileCreate) != 0)
+ {
+ err = M4ERR_FILE_BAD_MODE_ACCESS;
+ }
+ else
+ {
+ if ((FileModeAccess & M4OSA_kFileRead))
+ {
+ /* File is opened in read only*/
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ errno = apContext->FS->openRead(&aFileDesc, pFileDescriptor, FileModeAccess);
+
+ if ((aFileDesc == M4OSA_NULL)||(M4NO_ERROR != errno))
+ {
+ /* converts the error to PSW format*/
+ err = errno;
+ M4OSA_TRACE2_1("M4OSA_fileReadOpen_optim ERR1 = 0x%x", err);
+ apContext->IsOpened = M4OSA_FALSE;
+ }
+#else
+ aFileDesc = apContext->FS->pFctPtr_Open(pFileDescriptor, FileModeAccess, &errno);
+
+ if (aFileDesc == M4OSA_NULL)
+ {
+ /* converts the error to PSW format*/
+ err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
+ M4OSA_TRACE2_1("M4OSA_fileReadOpen_optim ERR1 = 0x%x", err);
+ apContext->IsOpened = M4OSA_FALSE;
+ }
+#endif
+
+ else
+ {
+ apContext->IsOpened = M4OSA_TRUE;
+ }
+ }
+ else
+ {
+ err = M4ERR_FILE_BAD_MODE_ACCESS;
+ }
+ }
+
+ if (M4NO_ERROR != err) goto cleanup;
+
+ /* Allocate buffers */
+ err = M4OSA_FileReader_BufferInit(apContext);
+ buffers_allocated = M4OSA_TRUE;
+
+ if (M4NO_ERROR != err) goto cleanup;
+
+ /* Initialize parameters */
+ apContext->fileSize = 0;
+ apContext->absolutePos = 0;
+ apContext->readFilePos = 0;
+
+ /* Retrieve the File Descriptor*/
+ apContext->aFileDesc = aFileDesc;
+
+ /* Retrieve the File mode Access */
+ apContext->FileAttribute.modeAccess = (M4OSA_FileModeAccess) FileModeAccess;
+
+ /*Retrieve the File reader context */
+ *pContext= (M4OSA_Context)apContext;
+
+ /* Compute file size */
+ err = M4OSA_FileReader_CalculateSize(apContext);
+
+ if (M4NO_ERROR != err) goto cleanup;
+
+ return M4NO_ERROR;
+
+cleanup:
+
+ /* free context */
+ if (M4OSA_NULL != apContext)
+ {
+ if(buffers_allocated == M4OSA_TRUE)
+ {
+ M4OSA_FileReader_BufferFree(apContext);
+ }
+
+ free( apContext);
+ *pContext = M4OSA_NULL;
+ }
+
+ M4OSA_TRACE2_1 ("M4OSA_fileReadOpen_optim: returns error 0x%0x", err)
+ return err;
+}
+
+/**
+******************************************************************************
+* @brief This method reads the 'size' bytes in the core file reader (selected by its 'context')
+* and writes the data to the 'data' pointer. If 'size' byte can not be read in the core file reader,
+* 'size' parameter is updated to match the correct number of read bytes.
+* @param pContext: (IN) File reader context.
+* @param pData : (OUT) Data pointer of the read data.
+* @param pSize : (INOUT) Size of the data to read (in byte).
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER pSize, fileDescriptor or pData is NULL
+* @return M4ERR_ALLOC there is no more memory available
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one.
+******************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadData_optim(M4OSA_Context pContext,M4OSA_MemAddr8 pData,
+ M4OSA_UInt32* pSize)
+{
+ M4OSA_FileReader_Context_optim* apContext =
+ (M4OSA_FileReader_Context_optim*) pContext;
+
+ M4OSA_ERR err;
+ M4OSA_FilePosition aSize;
+ M4OSA_FilePosition copiedSize;
+ M4OSA_Int8 selected_buffer, current_buffer;
+
+ M4OSA_TRACE3_3("M4OSA_fileReadData_optim p = 0x%p d = 0x%p s = %lu",
+ pContext, pData, *pSize);
+
+ /* Check input parameters */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pData);
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pSize);
+
+ if (apContext->IsOpened != M4OSA_TRUE)
+ {
+ return M4ERR_BAD_CONTEXT;
+ }
+
+ /* Prevent reading beyond EOF */
+ if((*pSize > 0) && (apContext->absolutePos >= apContext->fileSize))
+ {
+ copiedSize = 0;
+ err = M4WAR_NO_MORE_AU;
+ goto cleanup;
+ }
+
+ /* Check if data can be read from a buffer */
+ /* If not, fill one according to quantized positions */
+ copiedSize = 0;
+ err = M4NO_ERROR;
+
+ selected_buffer = M4OSA_FileReader_BufferMatch(apContext, apContext->absolutePos);
+
+ if(selected_buffer == M4OSA_READBUFFER_NONE)
+ {
+ selected_buffer = M4OSA_FileReader_BufferSelect(apContext, 0);
+ err = M4OSA_FileReader_BufferFill(apContext, selected_buffer,
+ apContext->absolutePos);
+ }
+
+ if(err != M4NO_ERROR)
+ {
+ if(err == M4WAR_NO_DATA_YET)
+ {
+ if (*pSize <= (M4OSA_UInt32)apContext->buffer[selected_buffer].size)
+ {
+ err = M4NO_ERROR;
+ }
+ else
+ {
+ copiedSize = (M4OSA_UInt32)apContext->buffer[selected_buffer].size;
+ /*copy the content into pData*/
+ M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
+ apContext->absolutePos, copiedSize, pData);
+ goto cleanup;
+ }
+ }
+ else
+ {
+ goto cleanup;
+ }
+ }
+
+ M4OSA_TRACE3_3("read size = %lu buffer = %d pos = %ld", *pSize,
+ selected_buffer, apContext->absolutePos);
+
+ /* Copy buffer into pData */
+ while(((M4OSA_UInt32)copiedSize < *pSize) && (err == M4NO_ERROR))
+ {
+ aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
+ apContext->absolutePos+copiedSize,
+ *pSize-copiedSize, pData+copiedSize);
+ copiedSize += aSize;
+
+ if(aSize == 0)
+ {
+ err = M4WAR_NO_DATA_YET;
+ }
+ else
+ {
+ if((M4OSA_UInt32)copiedSize < *pSize)
+ {
+ current_buffer = selected_buffer;
+ selected_buffer = M4OSA_FileReader_BufferMatch(apContext,
+ apContext->absolutePos+copiedSize);
+
+ if(selected_buffer == M4OSA_READBUFFER_NONE)
+ {
+ selected_buffer = M4OSA_FileReader_BufferSelect(apContext,
+ current_buffer);
+ err = M4OSA_FileReader_BufferFill(apContext, selected_buffer,
+ apContext->absolutePos+copiedSize);
+
+ if(err != M4NO_ERROR)
+ {
+ if(err == M4WAR_NO_DATA_YET)
+ {
+ /*If we got all the data that we wanted, we should return no error*/
+ if ((*pSize-copiedSize) <= (M4OSA_UInt32)apContext->buffer[selected_buffer].size)
+ {
+ err = M4NO_ERROR;
+ }
+ /*If we did not get enough data, we will return NO_DATA_YET*/
+
+ /*copy the data read*/
+ aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
+ apContext->absolutePos+copiedSize,
+ *pSize-copiedSize, pData+copiedSize);
+ copiedSize += aSize;
+
+ /*we reached end of file, so stop trying to read*/
+ goto cleanup;
+ }
+ if (err == M4WAR_NO_MORE_AU)
+ {
+ err = M4WAR_NO_DATA_YET;
+
+ /*copy the data read*/
+ aSize = M4OSA_FileReader_BufferCopy(apContext, selected_buffer,
+ apContext->absolutePos+copiedSize,
+ *pSize-copiedSize, pData+copiedSize);
+ copiedSize += aSize;
+
+ /*we reached end of file, so stop trying to read*/
+ goto cleanup;
+
+ }
+ else
+ {
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+ }
+
+cleanup :
+
+ /* Update the new position of the pointer */
+ apContext->absolutePos = apContext->absolutePos + copiedSize;
+
+ if((err != M4NO_ERROR)&&(err!=M4WAR_NO_DATA_YET))
+ {
+ M4OSA_TRACE2_3("M4OSA_fileReadData_optim size = %ld copied = %ld err = 0x%x",
+ *pSize, copiedSize, err);
+ }
+
+ /* Effective copied size must be returned */
+ *pSize = copiedSize;
+
+
+ /* Read is done */
+ return err;
+}
+
+/**
+******************************************************************************
+* @brief This method seeks at the provided position in the core file reader (selected by its 'context').
+* The position is related to the seekMode parameter it can be either :
+* From the beginning (position MUST be positive) : end position = position
+* From the end (position MUST be negative) : end position = file size + position
+* From the current position (signed offset) : end position = current position + position.
+* @param pContext: (IN) File reader context.
+* @param SeekMode : (IN) Seek access mode.
+* @param pPosition : (IN) Position in the file.
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER Seekmode or fileDescriptor is NULL
+* @return M4ERR_ALLOC there is no more memory available
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one.
+* @return M4ERR_FILE_INVALID_POSITION the position cannot be reached.
+******************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadSeek_optim( M4OSA_Context pContext, M4OSA_FileSeekAccessMode SeekMode,
+ M4OSA_FilePosition* pPosition)
+{
+ M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_TRACE3_3("M4OSA_fileReadSeek_optim p = 0x%p mode = %d pos = %d", pContext,
+ SeekMode, *pPosition);
+
+ /* Check input parameters */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pPosition);
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, SeekMode);
+
+ if (apContext->IsOpened != M4OSA_TRUE)
+ {
+ return M4ERR_BAD_CONTEXT; /*< The context can not be correct */
+ }
+
+ /* Go to the desired position */
+ switch(SeekMode)
+ {
+ case M4OSA_kFileSeekBeginning :
+ if(*pPosition < 0) {
+ return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
+ }
+ apContext->absolutePos = *pPosition;
+ *pPosition = apContext->absolutePos;
+ break;
+
+ case M4OSA_kFileSeekEnd :
+ if(*pPosition > 0) {
+ return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
+ }
+ apContext->absolutePos = apContext->fileSize + *pPosition;
+ *pPosition = apContext->absolutePos;
+ break;
+
+ case M4OSA_kFileSeekCurrent :
+ if(((apContext->absolutePos + *pPosition) > apContext->fileSize) ||
+ ((apContext->absolutePos + *pPosition) < 0)){
+ return M4ERR_PARAMETER; /**< Bad SeekAcess mode */
+ }
+ apContext->absolutePos = apContext->absolutePos + *pPosition;
+ *pPosition = apContext->absolutePos;
+ break;
+
+ default :
+ err = M4ERR_PARAMETER; /**< Bad SeekAcess mode */
+ break;
+ }
+
+ /* Return without error */
+ return err;
+}
+
+/**
+******************************************************************************
+* @brief This method asks the core file reader to close the file
+* (associated to the context) and also frees the context.
+* @param pContext: (IN) File reader context.
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one.
+******************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadClose_optim(M4OSA_Context pContext)
+{
+ M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
+
+ M4OSA_ERR err = M4NO_ERROR;
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ M4OSA_ERR errno = M4NO_ERROR;
+#else
+ M4OSA_UInt16 errno;
+#endif
+
+ M4OSA_TRACE2_1("M4OSA_fileReadClose_optim p = 0x%p", pContext );
+
+ /* Check input parameters */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
+
+ if (apContext->IsOpened != M4OSA_TRUE)
+ {
+ return M4ERR_BAD_CONTEXT; /**< The context can not be correct */
+ }
+
+ /* buffer */
+ M4OSA_FileReader_BufferFree(apContext);
+
+ /* Close the file */
+#ifdef M4OSA_READER_OPTIM_USE_OSAL_IF
+ errno = apContext->FS->closeRead(apContext->aFileDesc);
+
+ if (M4NO_ERROR != errno)
+ {
+ /* converts the error to PSW format*/
+ err = errno;
+ M4OSA_TRACE2_1("M4OSA_fileReadClose_optim ERR1 = 0x%x", err);
+ }
+#else
+ aRet_Val = apContext->FS->pFctPtr_Close(apContext->aFileDesc, &errno);
+
+ if (aRet_Val != 0)
+ {
+ /* converts the error to PSW format*/
+ err = M4OSA_ERR_CREATE(M4_ERR, M4OSA_FILE_READER, errno);
+ M4OSA_TRACE2_1("M4OSA_fileReadClose_optim ERR1 = 0x%x", err);
+ }
+#endif /*M4OSA_READER_OPTIM_USE_OSAL_IF*/
+
+ apContext->IsOpened = M4OSA_FALSE;
+
+ //>>>> GLM20090212 : set the low level function statically
+ if (apContext->FS != M4OSA_NULL)
+ {
+ free( apContext->FS);
+ }
+ //<<<< GLM20090212 : set the low level function statically
+
+ /* Free the context */
+ free(apContext);
+
+ /* Return without error */
+ return err;
+}
+
+/**
+******************************************************************************
+* @brief This is a dummy function required to maintain function pointer
+* structure.
+* @note This is a dummy function required to maintain function pointer
+* structure.
+* @param pContext: (IN) Execution context.
+* @param OptionId : (IN) Id of the option to set.
+* @param OptionValue : (IN) Value of the option.
+* @return M4NO_ERROR: there is no error
+******************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadSetOption_optim(M4OSA_Context pContext,
+ M4OSA_FileReadOptionID OptionID,
+ M4OSA_DataOption OptionValue)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ return err;
+}
+
+/**
+******************************************************************************
+* @brief This method asks the core file reader to return the value associated
+* with the optionID.The caller is responsible for allocating/de-allocating
+* the memory of the value field.
+* @note The options handled by the component depend on the implementation
+* of the component.
+* @param pContext: (IN) Execution context.
+* @param OptionId : (IN) Id of the option to set.
+* @param pOptionValue : (OUT) Value of the option.
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_BAD_CONTEXT pContext is NULL
+* @return M4ERR_BAD_OPTION_ID the option id is not valid.
+* @return M4ERR_NOT_IMPLEMENTED The option is not implemented yet.
+******************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadGetOption_optim(M4OSA_Context pContext,
+ M4OSA_FileReadOptionID OptionID,
+ M4OSA_DataOption* pOptionValue)
+{
+ M4OSA_FileReader_Context_optim* apContext = (M4OSA_FileReader_Context_optim*) pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check input parameters */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_BAD_CONTEXT, apContext);
+
+ if (apContext->IsOpened != M4OSA_TRUE)
+ {
+ return M4ERR_BAD_CONTEXT; /**< The context can not be correct */
+ }
+
+ /* Get the desired option if it is avalaible */
+ switch(OptionID)
+ {
+ /* Get File Size */
+ case M4OSA_kFileReadGetFileSize:/**< Get size of the file, limited to 32 bit size */
+
+ (*(M4OSA_UInt32 *)pOptionValue) = apContext->fileSize;
+ break;
+
+ /* Check End of file Occurs */
+ case M4OSA_kFileReadIsEOF : /**< See if we are at the end of the file */
+
+ (*(M4OSA_Bool *)pOptionValue) = (apContext->absolutePos >= apContext->fileSize) ? M4OSA_TRUE : M4OSA_FALSE;
+ break;
+
+ /* Get File Position */
+ case M4OSA_kFileReadGetFilePosition : /**< Get file position */
+
+ *(M4OSA_FilePosition *)pOptionValue = apContext->absolutePos;
+ break;
+
+ /* Get Attribute */
+ case M4OSA_kFileReadGetFileAttribute : /**< Get the file attribute = access mode */
+
+ (*(M4OSA_FileAttribute *)pOptionValue).modeAccess = apContext->FileAttribute.modeAccess;
+ break;
+
+ default:
+ /**< Bad option ID */
+ err = M4ERR_BAD_OPTION_ID;
+ break;
+ }
+
+ /*Return without error */
+ return err;
+}
diff --git a/libvideoeditor/osal/src/M4OSA_CharStar.c b/libvideoeditor/osal/src/M4OSA_CharStar.c
new file mode 100755
index 0000000..0814cbf
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_CharStar.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4DPAK_CharStar.c
+ * @ingroup
+ * @brief definition of the Char Star set of functions.
+ * @note This file defines the Char Star set of functions.
+ *
+ ************************************************************************
+*/
+
+
+#include "M4OSA_CharStar.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+
+/* WARNING: Specific Android */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+
+
+/**
+ ************************************************************************
+ * @brief This function mimics the functionality of the libc's strncpy().
+ * @note It copies exactly len2Copy characters from pStrIn to pStrOut,
+ * truncating pStrIn or adding null characters to pStrOut if
+ * necessary.
+ * - If len2Copy is less than or equal to the length of pStrIn,
+ * a null character is appended automatically to the copied
+ * string.
+ * - If len2Copy is greater than the length of pStrIn, pStrOut is
+ * padded with null characters up to length len2Copy.
+ * - pStrOut and pStrIn MUST NOT OVERLAP (this is NOT CHECKED).
+ * @param pStrOut: (OUT) Destination character string.
+ * @param pStrIn: (IN) Source character string.
+ * @param len2Copy: (IN) Maximum number of characters from pStrIn to copy.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pStrIn or pStrOut is M4OSA_NULL.
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_chrNCopy(M4OSA_Char* pStrOut, M4OSA_Char *pStrIn, M4OSA_UInt32 len2Copy)
+{
+ M4OSA_TRACE1_3("M4OSA_chrNCopy\t(M4OSA_Char* %x,M4OSA_Char* %x,M4OSA_UInt32 %ld)",
+ pStrOut,pStrIn,len2Copy);
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pStrOut),M4ERR_PARAMETER,
+ "M4OSA_chrNCopy:\tpStrOut is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pStrIn),M4ERR_PARAMETER,
+ "M4OSA_chrNCopy:\tpStrIn is M4OSA_NULL");
+
+ strncpy((char *)pStrOut, (const char *)pStrIn, (size_t)len2Copy);
+ if(len2Copy <= (M4OSA_UInt32)strlen((const char *)pStrIn))
+ {
+ pStrOut[len2Copy] = '\0';
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief This function returns the boolean comparison of pStrIn1 and pStrIn2.
+ * @note The value returned in result is M4OSA_TRUE if the string
+ * pointed to by pStrIn1 is strictly identical to the string pointed
+ * to by pStrIn2, and M4OSA_FALSE otherwise.
+ * @param pStrIn1: (IN) First character string.
+ * @param pStrIn2: (IN) Second character string.
+ * @param cmpResult: (OUT) Comparison result.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pStrIn1 pStrIn2 or cmpResult is M4OSA_NULL.
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_chrAreIdentical(M4OSA_Char* pStrIn1, M4OSA_Char* pStrIn2,
+ M4OSA_Bool* pResult)
+{
+ M4OSA_UInt32 i32,len32;
+ M4OSA_TRACE1_3("M4OSA_chrAreIdentical\t(M4OSA_Char* %x,M4OSA_Char* %x,"
+ "M4OSA_Int32* %x)",pStrIn1,pStrIn2,pResult);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn1, M4ERR_PARAMETER,
+ "M4OSA_chrAreIdentical:\tpStrIn1 is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn2, M4ERR_PARAMETER,
+ "M4OSA_chrAreIdentical:\tpStrIn2 is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pResult, M4ERR_PARAMETER,
+ "M4OSA_chrAreIdentical:\tpResult is M4OSA_NULL");
+
+ len32 = (M4OSA_UInt32)strlen((const char *)pStrIn1);
+ if(len32 != (M4OSA_UInt32)strlen((const char *)pStrIn2))
+ {
+ *pResult = M4OSA_FALSE;
+ return M4NO_ERROR;
+ }
+
+ for(i32=0;i32<len32;i32++)
+ {
+ if(pStrIn1[i32] != pStrIn2[i32])
+ {
+ *pResult = M4OSA_FALSE;
+ return M4NO_ERROR;
+ }
+ }
+
+ *pResult = M4OSA_TRUE;
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function gets a M4OSA_UInt32 from string.
+ * @note This function converts the first set of non-whitespace
+ * characters of pStrIn to a M4OSA_UInt32 value pVal, assuming a
+ * representation in base provided by the parameter base. pStrOut is
+ * set to the first character of the string following the last
+ * character of the number that has been converted.
+ * - in case of a failure during the conversion, pStrOut is not
+ * updated, and pVal is set to null.
+ * - in case of negative number, pStrOut is not updated, and pVal is
+ * set to null.
+ * - in case of numerical overflow, pVal is set to M4OSA_UINT32_MAX.
+ * - if pStrOut is not to be used, it can be set to M4OSA_NULL.
+ * @param pStrIn: (IN) Character string.
+ * @param pVal: (OUT) read value.
+ * @param pStrOut: (OUT) Output character string.
+ * @param base: (IN) Base of the character string representation.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pStrIn or pVal is M4OSA_NULL.
+ * @return M4ERR_CHR_CONV_FAILED: conversion failure.
+ * @return M4WAR_CHR_NUM_RANGE: the character string represents a number
+ * greater than M4OSA_UINT32_MAX.
+ * @return M4WAR_CHR_NEGATIVE: the character string represents a negative
+ * number.
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_chrGetUInt32(M4OSA_Char* pStrIn,
+ M4OSA_UInt32* pVal,
+ M4OSA_Char** pStrOut,
+ M4OSA_chrNumBase base)
+{
+ M4OSA_UInt32 ul;
+ char* pTemp;
+
+ M4OSA_TRACE1_4("M4OSA_chrGetUInt32\t(M4OSA_Char* %x, M4OSA_UInt32* %x"
+ "M4OSA_Char** %x,M4OSA_chrNumBase %d)",pStrIn,pVal,pStrOut,base);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn, M4ERR_PARAMETER,
+ "M4OSA_chrGetUInt32:\tpStrIn is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pVal, M4ERR_PARAMETER,
+ "M4OSA_chrGetUInt32:\tpVal is M4OSA_NULL");
+
+ errno = 0;
+ switch(base)
+ {
+ case M4OSA_kchrDec:
+ ul = strtoul((const char *)pStrIn, &pTemp, 10);
+ break;
+ case M4OSA_kchrHexa:
+ ul = strtoul((const char *)pStrIn, &pTemp,16);
+ break;
+ case M4OSA_kchrOct:
+ ul = strtoul((const char *)pStrIn, &pTemp,8);
+ break;
+ default:
+ return M4ERR_PARAMETER;
+ }
+
+ /* has conversion failed ? */
+ if((M4OSA_Char*)pTemp == pStrIn)
+ {
+ *pVal = 0;
+ return M4ERR_CHR_CONV_FAILED;
+ }
+
+ /* was the number negative ? */
+ if(*(pStrIn+strspn((const char *)pStrIn," \t")) == '-')
+ {
+ *pVal = 0;
+ return M4WAR_CHR_NEGATIVE;
+ }
+
+ /* has an overflow occured ? */
+ if(errno == ERANGE)
+ {
+ *pVal = M4OSA_UINT32_MAX;
+ if(M4OSA_NULL != pStrOut)
+ {
+ *pStrOut = (M4OSA_Char*)pTemp;
+ }
+ return M4WAR_CHR_NUM_RANGE;
+ }
+
+ /* nominal case */
+ *pVal = (M4OSA_UInt32)ul;
+ if(M4OSA_NULL != pStrOut)
+ {
+ *pStrOut = (M4OSA_Char*)pTemp;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief This function gets a M4OSA_UInt16 from string.
+ * @note This function converts the first set of non-whitespace
+ * characters of pStrIn to a M4OSA_UInt16 value pVal, assuming a
+ * representation in base provided by the parameter base. pStrOut is
+ * set to the first character of the string following the last
+ * character of the number that has been converted.
+ * - in case of a failure during the conversion, pStrOut is not
+ * updated, and pVal is set to null.
+ * - in case of negative number, pStrOut is not updated, and pVal is
+ * set to null.
+ * - in case of numerical overflow, pVal is set to M4OSA_UINT16_MAX.
+ * - if pStrOut is not to be used, it can be set to M4OSA_NULL.
+ * @param pStrIn: (IN) Character string.
+ * @param pVal: (OUT) read value.
+ * @param pStrOut: (OUT) Output character string.
+ * @param base: (IN) Base of the character string representation.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pStrIn or pVal is M4OSA_NULL.
+ * @return M4ERR_CHR_CONV_FAILED: conversion failure.
+ * @return M4WAR_CHR_NUM_RANGE: the character string represents a number
+ * greater than M4OSA_UINT16_MAX.
+ * @return M4WAR_CHR_NEGATIVE: the character string represents a negative
+ * number.
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_chrGetUInt16 (M4OSA_Char* pStrIn, M4OSA_UInt16 *pVal,
+ M4OSA_Char** pStrOut, M4OSA_chrNumBase base)
+{
+ M4OSA_UInt32 ul;
+ char* pTemp;
+
+ M4OSA_TRACE1_4("M4OSA_chrGetUInt16\t(M4OSA_Char* %x, M4OSA_UInt16* %x"
+ "M4OSA_Char** %x,M4OSA_chrNumBase %d)",pStrIn,pVal,pStrOut,base);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pStrIn,M4ERR_PARAMETER,
+ "M4OSA_chrGetUInt16:\tpStrIn is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pVal, M4ERR_PARAMETER,
+ "M4OSA_chrGetUInt16:\tpVal is M4OSA_NULL");
+
+ switch(base)
+ {
+ case M4OSA_kchrDec:
+ ul = strtoul((const char *)pStrIn, &pTemp,10);
+ break;
+ case M4OSA_kchrHexa:
+ ul = strtoul((const char *)pStrIn, &pTemp,16);
+ break;
+ case M4OSA_kchrOct:
+ ul = strtoul((const char *)pStrIn, &pTemp,8);
+ break;
+ default:
+ return M4ERR_PARAMETER;
+ }
+
+ /* has conversion failed ? */
+ if((M4OSA_Char*)pTemp == pStrIn)
+ {
+ *pVal = 0;
+ return M4ERR_CHR_CONV_FAILED;
+ }
+
+ /* was the number negative ? */
+ if(*(pStrIn+strspn((const char *)pStrIn," \t")) == '-')
+ {
+ *pVal = 0;
+ return M4WAR_CHR_NEGATIVE;
+ }
+
+ /* has an overflow occured ? */
+ if(ul>M4OSA_UINT16_MAX)
+ {
+ *pVal = M4OSA_UINT16_MAX;
+ if(M4OSA_NULL != pStrOut)
+ {
+ *pStrOut = (M4OSA_Char*)pTemp;
+ }
+ return M4WAR_CHR_NUM_RANGE;
+ }
+
+ /* nominal case */
+ *pVal = (M4OSA_UInt16)ul;
+ if(M4OSA_NULL != pStrOut)
+ {
+ *pStrOut = (M4OSA_Char*)pTemp;
+ }
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4OSA_chrSPrintf(M4OSA_Char *pStrOut, M4OSA_UInt32 strOutMaxLen,
+ M4OSA_Char *format, ...)
+{
+ va_list marker;
+ M4OSA_Char *pTemp;
+ M4OSA_Char *percentPointer;
+ M4OSA_Char *newFormat;
+ M4OSA_Int32 newFormatLength=0;
+ M4OSA_UInt32 count_ll = 0;
+ M4OSA_UInt32 count_tm = 0;
+ M4OSA_UInt32 count_aa = 0;
+ M4OSA_UInt32 count;
+ M4OSA_UInt32 nbChar;
+ M4OSA_Int32 err;
+ M4OSA_Char flagChar[] = "'-+ #0";
+ M4OSA_Char widthOrPrecisionChar[] = "*0123456789";
+ M4OSA_Char otherPrefixChar[] = "hlL";
+ M4OSA_Char conversionChar[] = "diouxXnfeEgGcCsSp%";
+
+ M4OSA_TRACE1_3("M4OSA_chrSPrintf\t(M4OSA_Char* %x, M4OSA_UInt32 %ld"
+ "M4OSA_Char* %x)",pStrOut,strOutMaxLen,format);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pStrOut, M4ERR_PARAMETER,
+ "M4OSA_chrSPrintf:\tpStrOut is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == format, M4ERR_PARAMETER,
+ "M4OSA_chrSPrintf:\tformat is M4OSA_NULL");
+
+ va_start(marker,format);
+
+ /* count the number of %[flags][width][.precision]ll[conversion] */
+ pTemp = format;
+ while(*pTemp)
+ {
+ percentPointer = (M4OSA_Char *)strchr((const char *)pTemp,'%'); /* get the next percent character */
+ if(!percentPointer)
+ break; /* "This is the End", (c) J. Morrisson */
+ pTemp = percentPointer+1; /* span it */
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ pTemp += strspn((const char *)pTemp,(const char *)flagChar); /* span the optional flags */
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ pTemp += strspn((const char *)pTemp,(const char *)widthOrPrecisionChar); /* span the optional width */
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ if(*pTemp=='.')
+ {
+ pTemp++;
+ pTemp += strspn((const char *)pTemp, (const char *)widthOrPrecisionChar); /* span the optional precision */
+ }
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ if(strlen((const char *)pTemp)>=2)
+ {
+ if(!strncmp((const char *)pTemp,"ll",2))
+ {
+ count_ll++; /* I got ONE */
+ pTemp +=2; /* span the "ll" prefix */
+ }
+ else if(!strncmp((const char *)pTemp,"tm",2))
+ {
+ count_tm++;
+ pTemp +=2;
+ }
+ else if(!strncmp((const char *)pTemp,"aa",2))
+ {
+ count_aa++;
+ pTemp +=2;
+ }
+ }
+ pTemp += strspn((const char *)pTemp, (const char *)otherPrefixChar); /* span the other optional prefix */
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ pTemp += strspn((const char *)pTemp, (const char *)conversionChar);
+ if(!*pTemp)
+ break; /* "This is the End", (c) J. Morrisson */
+ }
+
+ count = count_ll + count_tm + count_aa;
+
+ if(!count)
+ {
+ err= vsnprintf((char *)pStrOut, (size_t)strOutMaxLen + 1, (const char *)format, marker);
+ va_end(marker);
+ if ((err<0) || ((M4OSA_UInt32)err>strOutMaxLen))
+ {
+ pStrOut[strOutMaxLen] = '\0';
+ return M4ERR_CHR_STR_OVERFLOW;
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+ }
+
+
+ newFormatLength = strlen((const char *)format) + 1;
+
+ newFormatLength -= (count_ll+count_tm+count_aa);
+
+ newFormat =(M4OSA_Char*)M4OSA_32bitAlignedMalloc(newFormatLength,
+ M4OSA_CHARSTAR,(M4OSA_Char*)"M4OSA_chrPrintf: newFormat");
+ if(M4OSA_NULL == newFormat)
+ return M4ERR_ALLOC;
+ newFormat[newFormatLength-1] = '\0';
+ pTemp = newFormat;
+
+ /* copy format to newFormat, replacing %[flags][width][.precision]ll[conversion]
+ * by %[flags][width][.precision]I64[conversion] */
+ while(*format)
+ {
+ nbChar = strcspn((const char *)format, "%");
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar); /* copy characters before the % character */
+ format +=nbChar;
+ pTemp +=nbChar;
+ }
+ if(!*format) break;
+ *pTemp++ = *format++; /* copy the % character */
+ nbChar = strspn((const char *)format, (const char *)flagChar);
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar); /* copy the flag characters */
+ format +=nbChar;
+ pTemp +=nbChar;
+ }
+ if(!*format) break;
+ nbChar = strspn((const char *)format, (const char *)widthOrPrecisionChar);
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar); /* copy the width characters */
+ format +=nbChar;
+ pTemp +=nbChar;
+ }
+ if(!*format) break;
+ if(*format=='.')
+ {
+ *pTemp++ = *format++; /* copy the dot character */
+ if(!format) break;
+ nbChar = strspn((const char *)format, (const char *)widthOrPrecisionChar);
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar); /* copy the width characters */
+ format +=nbChar;
+ pTemp +=nbChar;
+ }
+ if(!format) break;
+ }
+ if(strlen((const char *)format)>=2)
+ {
+ if(!strncmp((const char *)format, "ll", 2))
+ {
+ *pTemp++ = 'l'; /* %l */
+ format +=2; /* span the "ll" prefix */
+ }
+ else if(!strncmp((const char *)format, "tm", 2))
+ {
+ *pTemp++ = 'l'; /* %l */
+ format +=2; /* span the "tm" prefix */
+ }
+ else if(!strncmp((const char *)format, "aa", 2))
+ {
+ *pTemp++ = 'l';
+ format +=2; /* span the "aa" prefix */
+ }
+ }
+ nbChar = strspn((const char *)format, (const char *)otherPrefixChar);
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar); /* copy the other Prefix */
+ format +=nbChar;
+ pTemp +=nbChar;
+ }
+ if(!*format) break;
+ nbChar = strspn((const char *)format, (const char *)conversionChar);
+ if(nbChar)
+ {
+ strncpy((char *)pTemp, (const char *)format, nbChar);
+ format += nbChar;
+ pTemp += nbChar;
+ }
+ if(!*format) break;
+ }
+
+ /* Zero terminate the format string. */
+ (*pTemp) = '\0';
+
+ err = vsnprintf((char *)pStrOut, (size_t)strOutMaxLen + 1, (const char *)newFormat, marker);
+ va_end(marker);
+ free(newFormat);
+ if ((err<0) || ((M4OSA_UInt32)err>strOutMaxLen))
+ {
+ pStrOut[strOutMaxLen] = '\0';
+ return M4ERR_CHR_STR_OVERFLOW;
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_Clock.c b/libvideoeditor/osal/src/M4OSA_Clock.c
new file mode 100755
index 0000000..9817b22
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_Clock.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ ************************************************************************
+ * @file M4OSA_Clock.c
+ * @brief Clock related functions
+ * @note This file implements functions to manipulate clock
+ ************************************************************************
+*/
+
+#include <sys/time.h>
+#include <time.h>
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_Clock.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Types.h"
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This function gets an absolute time to an unknown reference with
+ * a high precision.
+ * @note It means it can only be used to get a relative time by computing
+ * differences between to times.
+ * It is to the caller to allocate time. Time is expressed in
+ * timescale unit.
+ * M4OSA_ROLLOVER_CLOCK in M4OSA_Types.h must be configured with the rollover
+ * offset of this function.
+ * @param time: (IN/OUT) time
+ * @param timescale: (IN) The timescale (time unit per second)
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4WAR_TIMESCALE_TOO_BIG: the precision of the system clock is
+ * not
+ * compliant with the input timescale
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_clockGetTime(M4OSA_Time* pTime, M4OSA_UInt32 timescale)
+{
+ struct timeval tv;
+ struct timezone tz;
+ M4OSA_UInt32 u32_time = 0;
+ M4OSA_UInt32 u32_time_hi;
+ M4OSA_UInt32 u32_time_lo;
+ M4OSA_UInt32 u32_time_lh;
+ M4OSA_UInt32 factor;
+
+ M4OSA_TRACE1_2("M4OSA_clockGetTime\t\tM4OSA_Time* 0x%x\tM4OSA_UInt32 %d",
+ pTime, timescale);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pTime, M4ERR_PARAMETER,
+ "M4OSA_clockGetTime: pTime is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == timescale, M4ERR_PARAMETER,
+ "M4OSA_clockGetTime: timescale is 0");
+
+ factor = 1000000 / timescale;
+
+ if(gettimeofday(&tv, &tz) == 0)
+ {
+ u32_time_lo = (tv.tv_sec & 0xFFFF) * timescale;
+ u32_time_hi = (((tv.tv_sec >> 16) & 0xFFFF) * timescale) + ((u32_time_lo >> 16) & 0xFFFF);
+ u32_time_lo &= 0xFFFF;
+ u32_time_lo += tv.tv_usec / factor;
+ u32_time_hi += ((u32_time_lo >> 16) & 0xFFFF);
+ u32_time_lo &= 0xFFFF;
+ u32_time = ((u32_time_hi & 0x7FFF) << 16) | u32_time_lo;
+ }
+
+ /* M4OSA_Time is signed, so we need to check the max value*/
+ if (u32_time > M4OSA_INT32_MAX)
+ {
+ u32_time = u32_time - M4OSA_INT32_MAX;
+ }
+
+ *pTime = (M4OSA_Time)u32_time;
+
+ if( timescale > 10000 )
+ {
+ return M4WAR_TIMESCALE_TOO_BIG;
+ }
+
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/osal/src/M4OSA_FileCommon.c b/libvideoeditor/osal/src/M4OSA_FileCommon.c
new file mode 100755
index 0000000..c12db5d
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_FileCommon.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileCommon.c
+ * @brief File common for Android
+ * @note This file implements functions used by both the file writer
+ * and file reader.
+ ************************************************************************
+*/
+
+#ifndef USE_STAGEFRIGHT_CODECS
+#error "USE_STAGEFRIGHT_CODECS is not defined"
+#endif /*USE_STAGEFRIGHT_CODECS*/
+
+#ifdef UTF_CONVERSION
+#include <string.h>
+#endif /*UTF_CONVERSION*/
+
+#include <sys/stat.h>
+#include <errno.h>
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+#include "M4OSA_Semaphore.h"
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileCommon.h"
+#include "M4OSA_FileCommon_priv.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CharStar.h"
+
+/**
+ ************************************************************************
+ * @brief This function opens the provided URL and returns its context.
+ * If an error occured, the context is set to NULL.
+ * @param core_id: (IN) Core ID of the caller (M4OSA_FILE_READER or M4OSA_FILE_WRITER)
+ * @param context: (OUT) Context of the core file reader
+ * @param url: (IN) URL of the input file
+ * @param fileModeAccess: (IN) File mode access
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_ALLOC: there is no more memory available
+ * @return M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
+ * file
+ * @return M4ERR_FILE_NOT_FOUND: the file cannot be found
+ * @return M4ERR_FILE_LOCKED: the file is locked by an other
+ * application/process
+ * @return M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileCommonOpen(M4OSA_UInt16 core_id, M4OSA_Context* pContext,
+ M4OSA_Char* pUrl, M4OSA_FileModeAccess fileModeAccess)
+{
+
+ M4OSA_Int32 i = 0;
+ M4OSA_Int32 iMode = 0;
+ M4OSA_Int32 iSize = 0;
+ M4OSA_Int32 iSavePos = 0;
+
+ M4OSA_Char mode[4] = "";
+ M4OSA_Char* pReadString = (M4OSA_Char*)"r";
+ M4OSA_Char* pWriteString = (M4OSA_Char*)"w";
+ M4OSA_Char* pAppendString = (M4OSA_Char*)"a";
+ M4OSA_Char* pBinaryString = (M4OSA_Char*)"b";
+ M4OSA_Char* pPlusString = (M4OSA_Char*)"+";
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ FILE* pFileHandler = M4OSA_NULL;
+ M4OSA_FileContext *pFileContext = M4OSA_NULL;
+
+
+#ifdef UTF_CONVERSION
+ /*FB: to test the UTF16->UTF8 conversion into Video Artist*/
+ /*Convert the URL from UTF16 to UTF8*/
+ M4OSA_Void* tempConversionBuf;
+ M4OSA_UInt32 tempConversionSize = 1000;
+
+ tempConversionBuf = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(tempConversionSize +1, 0, "conversion buf");
+ if(tempConversionBuf == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Error when allocating conversion buffer\n");
+ return M4ERR_PARAMETER;
+ }
+ M4OSA_ToUTF8_OSAL(pUrl, tempConversionBuf, &tempConversionSize);
+ ((M4OSA_Char*)tempConversionBuf)[tempConversionSize ] = '\0';
+
+ printf("file open %s\n", tempConversionBuf);
+#endif /*UTF CONVERSION*/
+
+ M4OSA_TRACE3_4("M4OSA_fileCommonOpen\t\tM4OSA_UInt16 %d\tM4OSA_Context* 0x%x\t"
+ "M4OSA_Char* %s\tfileModeAccess %d", core_id, pContext, pUrl, fileModeAccess);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER, "M4OSA_fileCommonOpen: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl, M4ERR_PARAMETER, "M4OSA_fileCommonOpen: pUrl is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == fileModeAccess, M4ERR_PARAMETER, "M4OSA_fileCommonOpen: fileModeAccess is 0");
+
+ /* Read mode not set for the reader */
+ M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && !(fileModeAccess & M4OSA_kFileRead),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileRead");
+
+ /* Read mode not set for the reader */
+ M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && !(fileModeAccess & M4OSA_kFileRead),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileRead");
+
+ /* M4OSAfileReadOpen cannot be used with Write file mode access */
+ M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileWrite),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileWrite");
+
+ /* Append and Create flags cannot be used with Read */
+ M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileAppend),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileAppend");
+
+ M4OSA_DEBUG_IF1((M4OSA_FILE_READER == core_id) && (fileModeAccess & M4OSA_kFileCreate),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileCreate");
+
+ /* Write mode not set for the writer */
+ M4OSA_DEBUG_IF1((M4OSA_FILE_WRITER == core_id) && !(fileModeAccess & M4OSA_kFileWrite),
+ M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: M4OSA_kFileWrite");
+
+ /* Create flag necessary for opening file */
+ if ((fileModeAccess & M4OSA_kFileRead) &&
+ (fileModeAccess & M4OSA_kFileWrite)&&(fileModeAccess & M4OSA_kFileCreate))
+ {
+ strncat((char *)mode, (const char *)pWriteString, (size_t)1);
+ strncat((char *)mode, (const char *)pPlusString, (size_t)1);
+ }
+ else
+ {
+ if(fileModeAccess & M4OSA_kFileAppend)
+ {
+ strncat((char *)mode, (const char *)pAppendString, (size_t)1);
+ }
+ else if(fileModeAccess & M4OSA_kFileRead)
+ {
+ strncat((char *)mode, (const char *)pReadString, (size_t)1);
+ }
+ else if(fileModeAccess & M4OSA_kFileWrite)
+ {
+ strncat((char *)mode, (const char *)pWriteString, (size_t)1);
+ }
+
+ if((fileModeAccess & M4OSA_kFileRead)&&(fileModeAccess & M4OSA_kFileWrite))
+ {
+ strncat((char *)mode,(const char *)pPlusString, (size_t)1);
+ }
+ }
+
+ if(!(fileModeAccess & M4OSA_kFileIsTextMode))
+ {
+ strncat((char *)mode, (const char *)pBinaryString,(size_t)1);
+ }
+
+ /*Open the file*/
+
+#ifdef UTF_CONVERSION
+ /*Open the converted path*/
+ pFileHandler = fopen((const char *)tempConversionBuf, (const char *)mode);
+ /*Free the temporary decoded buffer*/
+ free(tempConversionBuf);
+#else /* UTF_CONVERSION */
+ pFileHandler = fopen((const char *)pUrl, (const char *)mode);
+#endif /* UTF_CONVERSION */
+
+ if (M4OSA_NULL == pFileHandler)
+ {
+ switch(errno)
+ {
+ case ENOENT:
+ {
+ M4OSA_DEBUG(M4ERR_FILE_NOT_FOUND, "M4OSA_fileCommonOpen: No such file or directory");
+ M4OSA_TRACE1_1("File not found: %s", pUrl);
+ return M4ERR_FILE_NOT_FOUND;
+ }
+ case EACCES:
+ {
+ M4OSA_DEBUG(M4ERR_FILE_LOCKED, "M4OSA_fileCommonOpen: Permission denied");
+ return M4ERR_FILE_LOCKED;
+ }
+ case EINVAL:
+ {
+ M4OSA_DEBUG(M4ERR_FILE_BAD_MODE_ACCESS, "M4OSA_fileCommonOpen: Invalid Argument");
+ return M4ERR_FILE_BAD_MODE_ACCESS;
+ }
+ case EMFILE:
+ case ENOSPC:
+ case ENOMEM:
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen: Too many open files");
+ return M4ERR_ALLOC;
+ }
+ default:
+ {
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileCommonOpen");
+ return M4ERR_NOT_IMPLEMENTED;
+ }
+ }
+ }
+
+ /* Allocate the file context */
+ pFileContext = (M4OSA_FileContext*) M4OSA_32bitAlignedMalloc(sizeof(M4OSA_FileContext),
+ core_id, (M4OSA_Char*)"M4OSA_fileCommonOpen: file context");
+ if (M4OSA_NULL == pFileContext)
+ {
+ fclose(pFileHandler);
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen");
+ return M4ERR_ALLOC;
+ }
+
+ pFileContext->file_desc = pFileHandler;
+ pFileContext->access_mode = fileModeAccess;
+ pFileContext->current_seek = SeekNone;
+ pFileContext->b_is_end_of_file = M4OSA_FALSE;
+
+ /**
+ * Note: Never use this expression "i = (value1 == value2) ? x: y;"
+ * because that doens't compile on other platforms (ADS for example)
+ * Use: if(value1 == value2)
+ * { i= x; ..etc
+ */
+ pFileContext->coreID_write = 0;
+ pFileContext->coreID_read = 0;
+ pFileContext->m_DescrModeAccess = M4OSA_kDescNoneAccess;
+
+ if (M4OSA_FILE_READER == core_id)
+ {
+ pFileContext->coreID_read = core_id;
+ pFileContext->m_DescrModeAccess = M4OSA_kDescReadAccess;
+ }
+ else if (M4OSA_FILE_WRITER == core_id)
+ {
+ pFileContext->coreID_write = core_id;
+ pFileContext->m_DescrModeAccess = M4OSA_kDescWriteAccess;
+ }
+
+ pFileContext->read_position = 0;
+ pFileContext->write_position = 0;
+
+ /* Allocate the memory to store the URL string */
+ pFileContext->url_name = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen((const char *)pUrl)+1,
+ core_id, (M4OSA_Char*)"M4OSA_fileCommonOpen: URL name");
+ if (M4OSA_NULL == pFileContext->url_name)
+ {
+ fclose(pFileHandler);
+ free(pFileContext);
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonOpen");
+ return M4ERR_ALLOC;
+ }
+ M4OSA_chrNCopy(pFileContext->url_name, pUrl, strlen((const char *)pUrl)+1);
+
+ /* Get the file name */
+ err = M4OSA_fileCommonGetFilename(pUrl, &pFileContext->file_name);
+ if(M4NO_ERROR != err)
+ {
+ fclose(pFileHandler);
+ free(pFileContext->url_name);
+ free(pFileContext);
+ M4OSA_DEBUG(err, "M4OSA_fileCommonOpen");
+ return err;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreOpen(&(pFileContext->semaphore_context), 1); /* Allocate the semaphore */
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+
+
+#ifdef USE_STAGEFRIGHT_CODECS
+ // Workaround for file system bug on Stingray/Honeycomb where a file re-created will keep
+ // the original file's size filled with 0s. Do not seek to the end to avoid ill effects
+ if(fileModeAccess & M4OSA_kFileAppend) {
+ /* Get the file size */
+ iSavePos = ftell(pFileHandler); /* 1- Check the first position */
+ fseek(pFileHandler, 0, SEEK_END); /* 2- Go to the end of the file*/
+ iSize = ftell(pFileHandler); /* 3- Check the file size */
+ fseek(pFileHandler, iSavePos, SEEK_SET);/* 4- go to the first position */
+ } else {
+ iSize = 0;
+ }
+#else /* USE_STAGEFRIGHT_CODECS */
+ /* Get the file size */
+ iSavePos = ftell(pFileHandler); /* 1- Check the first position */
+ fseek(pFileHandler, 0, SEEK_END); /* 2- Go to the end of the file*/
+ iSize = ftell(pFileHandler); /* 3- Check the file size */
+ fseek(pFileHandler, iSavePos, SEEK_SET);/* 4- go to the first position */
+#endif /* USE_STAGEFRIGHT_CODECS */
+
+
+
+ /* Warning possible overflow if the file is higher than 2GBytes */
+ pFileContext->file_size = iSize;
+
+ *pContext = pFileContext;
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function convert from UTF16 to UTF8
+ * @param pBufferIn: (IN) UTF16 input path
+ * @param pBufferOut: (OUT) UTF8 output path
+ * @param bufferOutSize: (IN/OUT) size of the output path
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: the output path size is not enough to contain
+ * the decoded path
+ ************************************************************************
+*/
+#ifdef UTF_CONVERSION
+M4OSA_ERR M4OSA_ToUTF8_OSAL (M4OSA_Void *pBufferIn, M4OSA_UInt8 *pBufferOut,
+ M4OSA_UInt32 *bufferOutSize)
+{
+ M4OSA_UInt16 i;
+ wchar_t *w_str = (wchar_t *) pBufferIn;
+ M4OSA_UInt32 len, size_needed, size_given;
+ if (pBufferIn == NULL)
+ {
+ *pBufferOut=NULL;
+ *bufferOutSize=1;
+ }
+ else
+ {
+ len = wcslen(w_str);
+ size_needed = len+1;
+ size_given = *bufferOutSize;
+
+ *bufferOutSize=size_needed;
+ if (size_given < size_needed )
+ {
+ return M4ERR_PARAMETER;
+ }
+ else
+ {
+ for (i=0; i<len; i++)
+ {
+ pBufferOut[i]=(M4OSA_UInt8)w_str[i];
+ }
+ pBufferOut[len]=0;
+ }
+ }
+ return M4NO_ERROR;
+}
+#endif /*UTF CONVERSION*/
+
+/**
+ ************************************************************************
+ * @brief This function seeks at the provided position.
+ * @param context: (IN/OUT) Context of the core file reader
+ * @param seekMode: (IN) Seek access mode
+ * @param position: (IN/OUT) Position in the file
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ * @return M4ERR_FILE_INVALID_POSITION: the position cannot be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileCommonSeek(M4OSA_Context pContext,
+ M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* pFilePos)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_FilePosition fpos_current;
+ M4OSA_FilePosition fpos_seek;
+ M4OSA_FilePosition fpos_null = 0;
+ M4OSA_FilePosition fpos_neg_un = -1;
+ M4OSA_FilePosition fpos_file_size;
+ M4OSA_FilePosition fpos_seek_from_beginning;
+
+ M4OSA_TRACE3_3("M4OSA_fileCommonSeek\t\tM4OSA_Context 0x%x\t M4OSA_FileSeekAccessMode %d\tM4OSA_FilePosition* 0x%x",
+ pContext, seekMode, pFilePos);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
+ M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFilePos, M4ERR_PARAMETER, "M4OSA_fileCommonSeek");
+
+ fpos_file_size = pFileContext->file_size;
+
+ if(SeekRead == pFileContext->current_seek)
+ {
+ fpos_current = pFileContext->read_position;
+ }
+ else if(SeekWrite == pFileContext->current_seek)
+ {
+ fpos_current = pFileContext->write_position;
+ }
+ else
+ {
+ fpos_current = 0;
+ }
+
+ switch(seekMode)
+ {
+ case M4OSA_kFileSeekCurrent:
+ {
+ fpos_seek = *pFilePos;
+ break;
+ }
+ case M4OSA_kFileSeekBeginning:
+ {
+ fpos_seek = *pFilePos - fpos_current;
+ break;
+ }
+ case M4OSA_kFileSeekEnd:
+ {
+ fpos_seek = *pFilePos + fpos_file_size - fpos_current;
+ break;
+ }
+ default:
+ {
+ return M4ERR_PARAMETER;
+ }
+ }
+
+ fpos_seek_from_beginning = fpos_current + fpos_seek;
+
+ if(fseek(pFileContext->file_desc, fpos_seek, SEEK_CUR) != 0)
+ {
+ switch(errno)
+ {
+ case EINVAL:
+ {
+ /* meaning the value for origin is invalid or the position
+ specified by offset is before the beginning of the file */
+ return M4ERR_FILE_INVALID_POSITION;
+ }
+
+ case EBADF:
+ default:
+ {
+ return M4ERR_BAD_CONTEXT;/* file handle is invalid */
+ }
+ }
+ }
+
+ /* Set the returned position from the beginning of the file */
+ *pFilePos = fpos_seek_from_beginning;
+
+ /* SEEK done, reset end of file value */
+ pFileContext->b_is_end_of_file = M4OSA_FALSE;
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function asks to close the file (associated to the context)
+ * @note The context of the core file reader/writer is freed.
+ * @param context: (IN/OUT) Context of the core file reader
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ ************************************************************************
+*/
+
+M4OSA_ERR M4OSA_fileCommonClose(M4OSA_UInt16 core_id, M4OSA_Context pContext)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_Int32 i32_err_code=0;
+
+ M4OSA_TRACE3_2("M4OSA_fileCommonClose\tM4OSA_UInt16 %d\tM4OSA_Context 0x%x",
+ core_id, pContext);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext,
+ M4ERR_PARAMETER, "M4OSA_fileCommonClose: pContext is M4OSA_NULL");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
+ "M4OSA_fileCommonClose: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ free(pFileContext->url_name);
+ pFileContext->url_name = M4OSA_NULL;
+
+ free(pFileContext->file_name);
+ pFileContext->file_name = M4OSA_NULL;
+
+ i32_err_code = fclose(pFileContext->file_desc);
+
+ pFileContext->file_desc = M4OSA_NULL;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreClose(pFileContext->semaphore_context);/* free the semaphore */
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ free(pFileContext);
+
+ if (i32_err_code != 0)
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileCommonClose");
+ return M4ERR_BAD_CONTEXT;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function gets the file attributes (associated to the
+ * context)
+ * @param context: (IN) Context of the core file reader
+ * @param attribute: (OUT) The file attribute (allocated by the caller)
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileCommonGetAttribute(M4OSA_Context pContext, M4OSA_FileAttribute* pAttribute)
+{
+
+ M4OSA_FileContext* fileContext = pContext;
+
+ struct stat TheStat;
+
+ M4OSA_TRACE3_2("M4OSA_fileCommonGetAttribute\tM4OSA_Context 0x%x\t"
+ "M4OSA_FileAttribute* 0x%x", pContext, pAttribute);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER, "M4OSA_fileCommonGetAttribute");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pAttribute, M4ERR_PARAMETER, "M4OSA_fileCommonGetAttribute");
+
+ if(stat((char*)fileContext->url_name, &TheStat) != 0)
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileCommonGetAttribute");
+ return M4ERR_BAD_CONTEXT;
+ }
+
+ pAttribute->creationDate.time = (M4OSA_Time)TheStat.st_ctime;
+ pAttribute->lastAccessDate.time = (M4OSA_Time)TheStat.st_atime;
+ pAttribute->modifiedDate.time = (M4OSA_Time)TheStat.st_mtime;
+
+ pAttribute->creationDate.timeScale = 1;
+ pAttribute->lastAccessDate.timeScale = 1;
+ pAttribute->modifiedDate.timeScale = 1;
+
+ pAttribute->creationDate.referenceYear = 1970;
+ pAttribute->lastAccessDate.referenceYear = 1970;
+ pAttribute->modifiedDate.referenceYear = 1970;
+
+ pAttribute->modeAccess = fileContext->access_mode;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief This function gets the file URL (associated to the context).
+ * @note
+ * @param context: (IN) Context of the core file reader
+ * @param url: (OUT) The buffer containing the URL (allocated by
+ * M4OSA_fileCommonGetURL)
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileCommonGetURL(M4OSA_Context pContext, M4OSA_Char** pUrl)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_UInt32 uiLength;
+
+ M4OSA_TRACE3_2("M4OSA_fileCommonGetURL\tM4OSA_Context 0x%x\tM4OSA_Char** 0x%x",
+ pContext, pUrl);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileCommonGetURL: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl, M4ERR_PARAMETER,
+ "M4OSA_fileCommonGetURL: pUrl is M4OSA_NULL");
+
+ uiLength = strlen((const char *)pFileContext->url_name)+1;
+
+ /* Allocate the memory to store the url_name */
+ *pUrl = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(uiLength, M4OSA_FILE_COMMON,
+ (M4OSA_Char*)"M4OSA_fileCommonGetURL: url");
+ if(M4OSA_NULL == *pUrl)
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonGetURL");
+ return M4ERR_ALLOC;
+ }
+
+ M4OSA_chrNCopy(*pUrl, pFileContext->url_name, uiLength);
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function gets a string containing the file name associated
+ * to the input URL.
+ * @note The user should not forget to delete the output string using
+ * M4OSA_strDestroy
+ * @param pUrl: (IN) The buffer containing the URL
+ * @param pFileName: (OUT) The string containing the URL. It is
+ * allocated inside this function
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
+ * file
+ * @return M4ERR_ALLOC: there is no more memory available
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileCommonGetFilename(M4OSA_Char* pUrl, M4OSA_Char** pFileName)
+{
+ M4OSA_Int32 i = 0;
+ M4OSA_Int32 iUrlLen = 0;
+ M4OSA_Int32 FileNameLen = 0;
+
+ M4OSA_Char* ptrUrl = M4OSA_NULL;
+ M4OSA_Char* ptrFilename = M4OSA_NULL;
+
+ M4OSA_TRACE3_2("M4OSA_fileCommonGetURL\tM4OSA_Char* %s\tM4OSA_Char** 0x%x",
+ pUrl, pFileName);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pUrl, M4ERR_PARAMETER,
+ "M4OSA_fileCommonGetFilename: pUrl is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileName, M4ERR_PARAMETER,
+ "M4OSA_fileCommonGetFilename: pFileName is M4OSA_NULL");
+
+ *pFileName = M4OSA_NULL;
+
+ /*Parse URL*/
+ iUrlLen = strlen((const char *)pUrl);
+ for(i=iUrlLen-1; i>=0; i--)
+ {
+ if (pUrl[i] != '\\' && pUrl[i] != '/')
+ {
+ FileNameLen++;
+ }
+ else
+ {
+ break; /* find the beginning of the file name */
+ }
+ }
+
+ ptrFilename = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(FileNameLen+1, M4OSA_FILE_COMMON,
+ (M4OSA_Char*)"M4OSA_fileCommonGetFilename: Filename string");
+ if (ptrFilename == M4OSA_NULL)
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileCommonGetFilename");
+ return M4ERR_ALLOC;
+ }
+
+ ptrUrl = pUrl + (iUrlLen - FileNameLen);
+ M4OSA_chrNCopy(ptrFilename, ptrUrl, FileNameLen+1);
+
+ *pFileName = ptrFilename;
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_FileReader.c b/libvideoeditor/osal/src/M4OSA_FileReader.c
new file mode 100755
index 0000000..40a72f5
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_FileReader.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+************************************************************************
+ * @file M4OSA_FileReader.c
+ * @author Cedric Lecoutre (cedric.lecoutre@philips.com)
+ * Laurent Fay (laurent.fay@philips.com)
+ * @par Org: Philips Digital Systems Laboratories - Paris (PDSL-P)
+ * @brief File reader for Android
+ * @note This file implements functions to read a file.
+ ************************************************************************
+*/
+
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileCommon_priv.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileReader_priv.h"
+#include "M4OSA_Memory.h"
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+#include "M4OSA_Semaphore.h"
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+
+/**
+************************************************************************
+* @brief This function opens the provided URL and returns its context.
+* If an error occured, the context is set to NULL.
+* @param context: (OUT) Context of the core file reader
+* @param url: (IN) URL of the input file
+* @param fileModeAccess: (IN) File mode access
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER: at least one parameter is NULL
+* @return M4ERR_ALLOC: there is no more memory available
+* @return M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
+* file
+* @return M4ERR_FILE_NOT_FOUND: the file cannot be found
+* @return M4ERR_FILE_LOCKED: the file is locked by an other
+* application/process
+* @return M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
+************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadOpen(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+ M4OSA_UInt32 fileModeAccess)
+{
+ M4OSA_TRACE1_3("M4OSA_fileReadOpen : pC = 0x%p fd = 0x%p mode = %lu",
+ pContext, pFileDescriptor, fileModeAccess);
+
+ return M4OSA_fileCommonOpen(M4OSA_FILE_READER, pContext,
+ pFileDescriptor, fileModeAccess);
+}
+
+/**
+************************************************************************
+* @brief This function reads the 'size' bytes in the core file reader
+* (selected by its 'context') and writes the data to the 'data'
+* pointer.
+* @note If 'size' byte cannot be read in the core file reader, 'size'
+* parameter is updated to match the correct
+* @note number of read bytes.
+* @param context: (IN/OUT) Context of the core file reader
+* @param buffer: (OUT) Data pointer of the read data
+* @param size: (IN/OUT) Size of the data to read (in bytes)
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER: at least one parameter is NULL
+* @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+* @return M4ERR_ALLOC: there is no more memory available
+* @return M4WAR_NO_DATA_YET: there is no enough data to fill the 'data'
+* buffer, so the size parameter has been updated.
+************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadData(M4OSA_Context pContext, M4OSA_MemAddr8 data,
+ M4OSA_UInt32* pSize)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_Int32 uiSizeRead;
+
+ M4OSA_TRACE2_2("M4OSA_fileReadData : data = 0x%p size = %lu",
+ data, (M4OSA_NULL != pSize) ? (*pSize) : 0);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileReadData: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == data, M4ERR_PARAMETER,
+ "M4OSA_fileReadData: data is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pSize, M4ERR_PARAMETER,
+ "M4OSA_fileReadData: pSize is M4OSA_NULL");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
+ M4ERR_BAD_CONTEXT, "M4OSA_fileReadData: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if(M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess) /* read write */
+ {
+ uiSizeRead = fread(data, sizeof(M4OSA_Char), *pSize,
+ pFileContext->file_desc);
+ if(-1 == uiSizeRead)
+ {
+ /* handle is invalid, or the file is not open for reading, or the file is locked */
+ *pSize = 0;
+ err = M4ERR_BAD_CONTEXT;
+ }
+ else
+ {
+ pFileContext->read_position = pFileContext->read_position + uiSizeRead;
+ if ((M4OSA_UInt32)uiSizeRead < *pSize)
+ {
+ *pSize = uiSizeRead;
+ /* This is the end of file */
+ pFileContext->b_is_end_of_file = M4OSA_TRUE;
+ err = M4WAR_NO_DATA_YET;
+ }
+ else
+ {
+ *pSize = uiSizeRead;
+ }
+ }
+
+ return err;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if(pFileContext->current_seek != SeekRead)
+ {
+ /* fseek to the last read position */
+ err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
+ &(pFileContext->read_position));
+ if(M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return err;
+ }
+
+ pFileContext->current_seek = SeekRead;
+ }
+
+ /* Read data */
+ uiSizeRead = fread(data, sizeof(M4OSA_Char), *pSize,
+ pFileContext->file_desc);
+ if(-1 == uiSizeRead)
+ {
+ /* handle is invalid, or the file is not open for reading,
+ or the file is locked */
+ *pSize = 0;
+ err = M4ERR_BAD_CONTEXT;
+ }
+ else
+ {
+ pFileContext->read_position = pFileContext->read_position + uiSizeRead;
+ if ((M4OSA_UInt32)uiSizeRead < *pSize)
+ {
+ *pSize = uiSizeRead;
+
+ /* This is the end of file */
+ pFileContext->b_is_end_of_file = M4OSA_TRUE;
+
+ err = M4WAR_NO_DATA_YET;
+ }
+ else
+ {
+ *pSize = uiSizeRead;
+ }
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+
+ return err;
+}
+
+
+/**
+************************************************************************
+ * @brief This function seeks at the provided position in the core file
+ * reader (selected by its 'context'). The position is related to
+ * the seekMode parameter it can be either from the beginning, from
+ * the end or from the current postion. To support large file
+ * access (more than 2GBytes), the position is provided on a 64
+ * bits.
+ * @note If this function returns an error the current position pointer
+ * in the file must not change. Else the current
+ * position pointer must be updated.
+ * @param context: (IN/OUT) Context of the core file reader
+ * @param seekMode: (IN) Seek access mode
+ * @param position: (IN/OUT) Position in the file
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ * @return M4ERR_FILE_INVALID_POSITION: the position cannot be reached
+ ************************************************************************
+*/
+
+M4OSA_ERR M4OSA_fileReadSeek(M4OSA_Context pContext, M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* pPosition)
+{
+ M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_2("M4OSA_fileReadSeek : mode = %d pos = %lu", seekMode,
+ (pPosition != M4OSA_NULL) ? (*pPosition) : 0);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileReadSeek: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER,
+ "M4OSA_fileReadSeek: seekMode is 0");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pPosition, M4ERR_PARAMETER,
+ "M4OSA_fileReadSeek: pPosition is M4OSA_NULL");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
+ M4ERR_BAD_CONTEXT, "M4OSA_fileReadSeek: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
+ {
+ M4OSA_UInt32 SeekModeOption;
+ /* Go to the desired position */
+ if (M4OSA_kFileSeekBeginning == seekMode)
+ {
+ SeekModeOption = SEEK_SET;
+ }
+ else if (M4OSA_kFileSeekEnd == seekMode)
+ {
+ SeekModeOption = SEEK_END;
+ }
+ else if (M4OSA_kFileSeekCurrent == seekMode)
+ {
+ SeekModeOption = SEEK_CUR;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4OSA_fileReadSeek: END WITH ERROR !!! (CONVERION ERROR FOR THE SEEK MODE)");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Go to the desired position */
+ err = fseek(pFileContext->file_desc, *pPosition, SeekModeOption);
+ if(err != 0)
+ {
+ /* converts the error to PSW format*/
+ err=((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(err);
+ M4OSA_TRACE1_1("M4OSA_FileReadSeek error:%x",err);
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+
+ /* Return without error */
+ return err;
+ }
+
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if(pFileContext->current_seek != SeekRead)
+ {
+
+ /* fseek to the last read position */
+ err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
+ &(pFileContext->read_position));
+ if(M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return err;
+ }
+
+ pFileContext->current_seek = SeekRead;
+ }
+
+ err = M4OSA_fileCommonSeek(pContext, seekMode, pPosition);
+ if(M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_DEBUG(err, "M4OSA_fileReadData: M4OSA_fileCommonSeek");
+ }
+ else
+ {
+ pFileContext->read_position = *pPosition;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return err;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function asks the core file reader to close the file
+ * (associated to the context).
+ * @note The context of the core file reader is freed.
+ * @param pContext: (IN/OUT) Context of the core file reader
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadClose(M4OSA_Context pContext)
+{
+ M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
+
+ M4OSA_TRACE1_1("M4OSA_fileReadClose : pC = 0x%p", pContext);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileReadClose: pContext is M4OSA_NULL");
+
+ if(M4OSA_FILE_WRITER == pFileContext->coreID_write)
+ {
+ return M4NO_ERROR;
+ }
+
+ return M4OSA_fileCommonClose(M4OSA_FILE_READER, pContext);
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This function asks the core file reader to return the value
+ * associated with the optionID. The caller is responsible for
+ * allocating/de-allocating the memory of the value field.
+ * @note 'value' must be cast according to the type related to the
+ * optionID As the caller is responsible for
+ * allocating/de-allocating the 'value' field, the callee must copy
+ * this field to its internal variable.
+ * @param pContext: (IN/OUT) Context of the core file reader
+ * @param pOptionID: (IN) ID of the option
+ * @param pOptionValue: (OUT) Value of the option
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+ * @return M4ERR_WRITE_ONLY: this option is a write only one
+ * @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadGetOption(M4OSA_Context pContext, M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption* pOptionValue)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+
+ M4OSA_TRACE2_1("M4OSA_fileReadGetOption : option = 0x%x", optionID);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileReadGetOption: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(optionID == 0, M4ERR_PARAMETER,
+ "M4OSA_fileReadGetOption: optionID is 0");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pOptionValue, M4ERR_PARAMETER,
+ "M4OSA_fileReadGetOption: pOptionValue is M4OSA_NULL");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_READER),
+ M4ERR_BAD_OPTION_ID, "M4OSA_fileReadGetOption");
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID),
+ M4ERR_WRITE_ONLY, "M4OSA_fileReadGetOption");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
+ M4ERR_BAD_CONTEXT,
+ "M4OSA_fileReadGetOption: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ switch(optionID)
+ {
+#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION == M4OSA_TRUE)
+ case M4OSA_kFileReadGetFilePosition:
+ {
+ M4OSA_FilePosition* pPosition = (M4OSA_FilePosition*)pOptionValue;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ *pPosition = pFileContext->read_position;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_POSITION*/
+
+#if(M4OSA_OPTIONID_FILE_READ_IS_EOF == M4OSA_TRUE)
+ case M4OSA_kFileReadIsEOF:
+ {
+ M4OSA_Bool* bIsEndOfFile = (M4OSA_Bool*)pOptionValue;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ *bIsEndOfFile = pFileContext->b_is_end_of_file;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_READ_IS_EOF*/
+
+
+#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE == M4OSA_TRUE)
+ case M4OSA_kFileReadGetFileSize:
+ {
+ M4OSA_FilePosition* pPosition = (M4OSA_FilePosition*)pOptionValue;
+ M4OSA_Int32 iSavePos = 0;
+ M4OSA_Int32 iSize = 0;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+ /**
+ * Bugfix: update the file size.
+ * When a file is in read mode, may be another application is writing in.
+ * So, we have to update the file size */
+ iSavePos = ftell(pFileContext->file_desc); /*1- Check the first position */
+ fseek(pFileContext->file_desc, 0, SEEK_END); /*2- Go to the end of the file */
+ iSize = ftell(pFileContext->file_desc); /*3- Check the file size*/
+ fseek(pFileContext->file_desc, iSavePos, SEEK_SET); /*4- go to the first position*/
+ pFileContext->file_size = iSize;
+
+ *pPosition = pFileContext->file_size;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_SIZE*/
+
+#if(M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE == M4OSA_TRUE)
+ case M4OSA_kFileReadGetFileAttribute:
+ {
+ return M4OSA_fileCommonGetAttribute(pContext,
+ (M4OSA_FileAttribute*)pOptionValue);
+ }
+#endif /*M4OSA_OPTIONID_FILE_READ_GET_FILE_ATTRIBUTE*/
+
+#if(M4OSA_OPTIONID_FILE_READ_GET_URL == M4OSA_TRUE)
+ case M4OSA_kFileReadGetURL:
+ {
+ return M4OSA_fileCommonGetURL(pContext, (M4OSA_Char**)pOptionValue);
+ }
+#endif /*M4OSA_OPTIONID_FILE_READ_GET_URL*/
+
+ case M4OSA_kFileReadLockMode:
+ {
+ *(M4OSA_UInt32*)pOptionValue = pFileContext->m_uiLockMode;
+ return M4NO_ERROR;
+ }
+ }
+
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileReadGetOption");
+
+ return M4ERR_NOT_IMPLEMENTED;
+}
+
+/**
+ ************************************************************************
+ * @fn M4OSA_ERR M4OSA_fileReadSetOption (M4OSA_Context context,
+ * M4OSA_OptionID optionID, M4OSA_DataOption optionValue))
+ * @brief This function asks the core file reader to set the value associated with the optionID.
+ * The caller is responsible for allocating/de-allocating the memory of the value field.
+ * @note As the caller is responsible for allocating/de-allocating the 'value' field, the callee must copy this field
+ * to its internal variable.
+ * @param pContext: (IN/OUT) Context of the core file reader
+ * @param optionID: (IN) ID of the option
+ * @param value: (IN) Value of the option
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+ * @return M4ERR_READ_ONLY: this option is a read only one
+ * @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileReadSetOption(M4OSA_Context pContext,
+ M4OSA_FileReadOptionID optionID,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+
+ M4OSA_TRACE2_1("M4OSA_fileReadSetOption : option = 0x%x", optionID);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileReadSetOption: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == optionID, M4ERR_PARAMETER,
+ "M4OSA_fileReadSetOption");
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_READER),
+ M4ERR_BAD_OPTION_ID, "M4OSA_fileReadSetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID),
+ M4ERR_READ_ONLY, "M4OSA_fileReadSetOption");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
+ M4ERR_BAD_CONTEXT,
+ "M4OSA_fileReadSetOption: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ switch(optionID)
+ {
+ case M4OSA_kFileReadLockMode:
+ {
+ pFileContext->m_uiLockMode= (M4OSA_UInt32)*(M4OSA_UInt32*)optionValue;
+ return M4NO_ERROR;
+ }
+ default:
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileReadSetOption");
+ return M4ERR_NOT_IMPLEMENTED;
+ }
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_FileWriter.c b/libvideoeditor/osal/src/M4OSA_FileWriter.c
new file mode 100755
index 0000000..37fc173
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_FileWriter.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_FileWriter.c
+ * @brief File writer for Android
+ * @note This file implements functions to write in a file.
+ ************************************************************************
+*/
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileCommon_priv.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileWriter_priv.h"
+#include "M4OSA_Memory.h"
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+#include "M4OSA_Semaphore.h"
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+/**
+ ************************************************************************
+ * @brief This function opens the provided URL and returns its context.
+ * If an error occured, the context is set to NULL.
+ * @param pContext: (OUT) Context of the core file writer
+ * @param pUrl: (IN) URL of the input file
+ * @param fileModeAccess: (IN) File mode access
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_ALLOC: there is no more memory available
+ * @return M4ERR_NOT_IMPLEMENTED: the URL does not match with the supported
+ * file
+ * @return M4ERR_FILE_NOT_FOUND: the file cannot be found
+ * @return M4ERR_FILE_LOCKED: the file is locked by an other
+ * application/process
+ * @return M4ERR_FILE_BAD_MODE_ACCESS: the file mode access is not correct
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileWriteOpen(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ M4OSA_UInt32 fileModeAccess)
+{
+ M4OSA_TRACE1_3("M4OSA_fileWriteOpen : pC = 0x%p fd = 0x%p mode = %d",
+ pContext, pUrl, fileModeAccess);
+
+ return M4OSA_fileCommonOpen(M4OSA_FILE_WRITER, pContext, pUrl,
+ fileModeAccess);
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function writes the 'size' bytes stored at 'data' memory
+ * in the file selected by its context.
+ * @note The caller is responsible for allocating/de-allocating the
+ * memory for 'data' parameter.
+ * @note Moreover the data pointer must be allocated to store at least
+ * 'size' bytes.
+ * @param pContext: (IN/OUT) Context of the core file reader
+ * @param buffer: (IN) Data pointer of the write data
+ * @param size: (IN) Size of the data to write (in bytes)
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileWriteData(M4OSA_Context pContext, M4OSA_MemAddr8 data,
+ M4OSA_UInt32 uiSize)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_ERR err;
+ M4OSA_UInt32 uiSizeWrite;
+
+ M4OSA_TRACE2_2("M4OSA_fileWriteData : data = 0x%p size = %lu", data,
+ uiSize);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteData: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == data, M4ERR_PARAMETER,
+ "M4OSA_fileWriteData: data is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == uiSize, M4ERR_PARAMETER,
+ "M4OSA_fileWriteData: uiSize is 0");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context,
+ M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteData: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
+ {
+ M4OSA_UInt32 WriteSize;
+ err = M4NO_ERROR;
+ WriteSize = fwrite((void *)data,1, uiSize, pFileContext->file_desc);
+ if(WriteSize != uiSize)
+ {
+ /* converts the error to PSW format*/
+ err = ((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(WriteSize);
+ M4OSA_TRACE1_1("M4OSA_FileWriteData error:%x",err);
+ }
+ fflush(pFileContext->file_desc);
+
+ pFileContext->write_position = pFileContext->write_position + WriteSize;
+
+ /* Update the file size */
+ if(pFileContext->write_position > pFileContext->file_size)
+ {
+ pFileContext->file_size = pFileContext->write_position;
+ }
+ return err;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if(pFileContext->current_seek != SeekWrite)
+ {
+ /* fseek to the last read position */
+ err = M4OSA_fileCommonSeek(pContext, M4OSA_kFileSeekBeginning,
+ &(pFileContext->write_position));
+
+ if(M4OSA_ERR_IS_ERROR(err))
+ {
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+ M4OSA_DEBUG(err, "M4OSA_fileWriteData: M4OSA_fileCommonSeek");
+ return err;
+ }
+
+ pFileContext->current_seek = SeekWrite;
+ }
+
+ /* Write data */
+ uiSizeWrite = fwrite(data, sizeof(M4OSA_Char), uiSize, pFileContext->file_desc);
+
+ if(uiSizeWrite == (M4OSA_UInt32)-1)
+ {
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ /* An error occured */
+
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_fileWriteData: fwrite failed");
+ return M4ERR_BAD_CONTEXT;
+ }
+
+ pFileContext->write_position = pFileContext->write_position + uiSizeWrite;
+
+ /* Update the file size */
+ if(pFileContext->write_position > pFileContext->file_size)
+ {
+ pFileContext->file_size = pFileContext->write_position;
+ }
+
+ if((M4OSA_UInt32)uiSizeWrite < uiSize)
+ {
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_fileWriteData");
+ return M4ERR_ALLOC;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function seeks at the provided position in the core file
+ * writer (selected by its 'context'). The position is related to
+ * the seekMode parameter it can be either from the beginning,
+ * from the end or from the current postion. To support large file
+ * access (more than 2GBytes), the position is provided on a 64
+ * bits.
+ * @note If this function returns an error the current position pointer
+ * in the file must not change. Else the current position pointer
+ * must be updated.
+ * @param pContext: (IN/OUT) Context of the core file reader
+ * @param seekMode: (IN) Seek access mode
+ * @param position: (IN/OUT) Position in the file
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+ * @return M4ERR_FILE_INVALID_POSITION: the position cannot be reached
+ ************************************************************************
+ */
+M4OSA_ERR M4OSA_fileWriteSeek(M4OSA_Context pContext, M4OSA_FileSeekAccessMode seekMode,
+ M4OSA_FilePosition* pPosition)
+{
+ M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_2("M4OSA_fileWriteSeek : mode = %d pos = %lu",
+ seekMode, (M4OSA_NULL != pPosition) ? (*pPosition) : 0);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteSeek: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(0 == seekMode, M4ERR_PARAMETER,
+ "M4OSA_fileWriteSeek: seemMode is 0");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pPosition, M4ERR_PARAMETER,
+ "M4OSA_fileWriteSeek: pPosition is M4OSA_NULL");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteSeek: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if (M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess) /* read write */
+ {
+ M4OSA_UInt32 SeekModeOption;
+ /*The position for the seek mode between the SHP and the OSAl part are different */
+ if (M4OSA_kFileSeekBeginning == seekMode)
+ {
+ SeekModeOption = SEEK_SET;
+ }
+ else if (M4OSA_kFileSeekEnd == seekMode)
+ {
+ SeekModeOption = SEEK_END;
+ }
+ else if (M4OSA_kFileSeekCurrent == seekMode)
+ {
+ SeekModeOption = SEEK_CUR;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4OSA_fileWriteSeek: END WITH ERROR !!! (CONVERION ERROR FOR THE SEEK MODE) ");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Go to the desired position */
+ err = fseek(pFileContext->file_desc,*pPosition,SeekModeOption);
+ if(err != 0)
+ {
+ /* converts the error to PSW format*/
+ err=((M4OSA_UInt32)(M4_ERR)<<30)+(((M4OSA_FILE_WRITER)&0x003FFF)<<16)+(M4OSA_Int16)(err);
+ M4OSA_TRACE1_1("M4OSA_FileWriteSeek error:%x",err);
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+
+ return err;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ err = M4OSA_fileCommonSeek(pContext, seekMode, pPosition);
+
+ if(M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_DEBUG(err, "M4OSA_fileWriteSeek: M4OSA_fileCommonSeek");
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return err;
+ }
+
+ pFileContext->write_position = *pPosition;
+
+ pFileContext->current_seek = SeekWrite;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function asks the core file writer to close the file
+ * (associated to the context).
+ * @note The context of the core file writer is freed.
+ * @param pContext: (IN/OUT) Context of the core file writer
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_ALLOC: there is no more memory available
+************************************************************************
+*/
+
+M4OSA_ERR M4OSA_fileWriteClose(M4OSA_Context pContext)
+{
+ M4OSA_FileContext* pFileContext = (M4OSA_FileContext*)pContext;
+
+ M4OSA_TRACE1_1("M4OSA_fileWriteClose : pC = 0x%p", pContext);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteClose: pContext is M4OSA_NULL");
+
+ return M4OSA_fileCommonClose(M4OSA_FILE_WRITER, pContext);
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function flushes the stream associated to the context.
+ * @param pContext: (IN/OUT) Context of the core file writer
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_fileWriteFlush(M4OSA_Context pContext)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_TRACE2_1("M4OSA_fileWriteFlush : pC = 0x%p", pContext);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteFlush: pcontext is M4OSA_NULL");
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteFlush: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ if (fflush(pFileContext->file_desc) != 0)
+ {
+ err = M4ERR_BAD_CONTEXT;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return err;
+}
+
+
+/**
+ ************************************************************************
+ * @brief This function asks the core file writer to return the value
+ * associated with the optionID.
+ * The caller is responsible for allocating/de-allocating the
+ * memory of the value field.
+ * @note 'value' must be cast according to the type related to the
+ * optionID
+ * As the caller is responsible for allocating/de-allocating the
+ * 'value' field, the callee must copy this field
+ * to its internal variable.
+ * @param pContext: (IN/OUT) Context of the core file writer
+ * @param optionID: (IN) ID of the option
+ * @param value: (OUT) Value of the option
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+ * @return M4ERR_WRITE_ONLY: this option is a write only one
+ * @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+************************************************************************
+*/
+
+M4OSA_ERR M4OSA_fileWriteGetOption(M4OSA_Context pContext, M4OSA_OptionID optionID,
+ M4OSA_DataOption* pOptionValue)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+
+ M4OSA_TRACE2_1("M4OSA_fileWriteGetOption : option = 0x%x", optionID);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteGetOption: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(optionID == 0, M4ERR_PARAMETER, "M4OSA_fileWriteGetOption");
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pOptionValue, M4ERR_PARAMETER,
+ "M4OSA_fileWriteGetOption: pOtionValue is M4OSA_NULL");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_WRITER),
+ M4ERR_BAD_OPTION_ID, "M4OSA_fileWriteGetOption");
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID), M4ERR_WRITE_ONLY,
+ "M4OSA_fileWriteGetOption");
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteGetOption: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ switch(optionID)
+ {
+#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION == M4OSA_TRUE)
+ case M4OSA_kFileWriteGetFilePosition:
+ {
+ M4OSA_FilePosition* position = (M4OSA_FilePosition*)pOptionValue;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ *position = pFileContext->write_position;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_POSITION*/
+
+#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE == M4OSA_TRUE)
+ case M4OSA_kFileWriteGetFileSize:
+ {
+ M4OSA_FilePosition* position = (M4OSA_FilePosition*)pOptionValue;
+
+ if(M4OSA_kDescRWAccess == pFileContext->m_DescrModeAccess)
+ {
+ M4OSA_Int32 iSavePos = 0;
+ M4OSA_Int32 iSize = 0;
+
+ iSavePos = ftell(pFileContext->file_desc); /*1- Check the first position */
+ fseek(pFileContext->file_desc, 0, SEEK_END); /*2- Go to the end of the file */
+ *position = ftell(pFileContext->file_desc); /*3- Check the file size*/
+ fseek(pFileContext->file_desc, iSavePos, SEEK_SET); /*4- go to the first position*/
+ return M4NO_ERROR;
+ }
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphoreWait(pFileContext->semaphore_context, M4OSA_WAIT_FOREVER);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ *position = pFileContext->file_size;
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_semaphorePost(pFileContext->semaphore_context);
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_SIZE*/
+
+#if(M4OSA_OPTIONID_FILE_WRITE_GET_URL == M4OSA_TRUE)
+ case M4OSA_kFileWriteGetURL:
+ {
+ return M4OSA_fileCommonGetURL (pContext, (M4OSA_Char**)pOptionValue);
+ }
+#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_URL*/
+
+#if(M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE == M4OSA_TRUE)
+ case M4OSA_kFileWriteGetAttribute:
+ {
+ return M4OSA_fileCommonGetAttribute(pContext,
+ (M4OSA_FileAttribute*)pOptionValue);
+ }
+#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_FILE_ATTRIBUTE*/
+
+#if(M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT == M4OSA_TRUE)
+ case M4OSA_kFileWriteGetReaderContext:
+ {
+ M4OSA_FileModeAccess access = pFileContext->access_mode;
+
+ M4OSA_DEBUG_IF1(!(access & M4OSA_kFileRead), M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteGetOption: M4OSA_kFileRead");
+
+ M4OSA_DEBUG_IF1(!(access & M4OSA_kFileWrite), M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteGetOption: M4OSA_kFileWrite");
+
+ pFileContext->coreID_read = M4OSA_FILE_READER;
+
+ *pOptionValue = pContext;
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_FILE_WRITE_GET_READER_CONTEXT*/
+
+ case M4OSA_kFileWriteLockMode:
+ {
+ *(M4OSA_UInt32*)pOptionValue = pFileContext->m_uiLockMode;
+ return M4NO_ERROR;
+ }
+
+ }
+
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_fileWriteGetOption");
+
+ return M4ERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+************************************************************************
+* @brief This function asks the core file writer to set the value
+* associated with the optionID.
+* The caller is responsible for allocating/de-allocating the
+* memory of the value field.
+* @note As the caller is responsible for allocating/de-allocating the
+* 'value' field, the callee must copy this field to its internal
+* variable.
+* @param pContext: (IN/OUT) Context of the core file writer
+* @param optionID: (IN) ID of the option
+* @param value: (IN) Value of the option
+* @return M4NO_ERROR: there is no error
+* @return M4ERR_PARAMETER: at least one parameter is NULL
+* @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+* @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+* @return M4ERR_READ_ONLY: this option is a read only one
+* @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+************************************************************************
+*/
+
+M4OSA_ERR M4OSA_fileWriteSetOption(M4OSA_Context pContext,
+ M4OSA_OptionID optionID,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_FileContext* pFileContext = pContext;
+
+ M4OSA_TRACE2_1("M4OSA_fileWriteSetOption : option = 0x%x", optionID);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_fileWriteSetOption");
+
+ M4OSA_DEBUG_IF2(0 == optionID, M4ERR_PARAMETER, "M4OSA_fileWriteSetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_FILE_WRITER),
+ M4ERR_BAD_OPTION_ID, "M4OSA_fileWriteSetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID), M4ERR_READ_ONLY,
+ "M4OSA_fileReadSetOption");
+
+#ifdef M4OSA_FILE_BLOCK_WITH_SEMAPHORE
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pFileContext->semaphore_context, M4ERR_BAD_CONTEXT,
+ "M4OSA_fileWriteSetOption: semaphore_context is M4OSA_NULL");
+#endif /* M4OSA_FILE_BLOCK_WITH_SEMAPHORE */
+
+ switch(optionID)
+ {
+ case M4OSA_kFileWriteLockMode:
+ {
+ pFileContext->m_uiLockMode = (M4OSA_UInt32)*(M4OSA_UInt32*)optionValue;
+ return M4NO_ERROR;
+ }
+
+ case M4OSA_kFileWriteDescMode:
+ {
+ pFileContext->m_DescrModeAccess = (M4OSA_Int32)*(M4OSA_Int32*)optionValue;
+ return M4NO_ERROR;
+ }
+
+ default:
+ return M4ERR_NOT_IMPLEMENTED;
+ }
+
+ return M4ERR_NOT_IMPLEMENTED;
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_Mutex.c b/libvideoeditor/osal/src/M4OSA_Mutex.c
new file mode 100755
index 0000000..bbe6bba
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_Mutex.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @brief Mutex for Android
+ * @note This file implements functions to manipulate mutex
+ ************************************************************************
+*/
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Mutex.h"
+
+#include <pthread.h>
+#include <errno.h>
+
+
+/* Context for the mutex */
+typedef struct
+{
+ M4OSA_UInt32 coreID; /* mutex context identifiant */
+ pthread_mutex_t mutex; /* mutex */
+ pthread_t threadOwnerID; /* thread owner identifiant */
+} M4OSA_MutexContext;
+
+
+
+/**
+ ************************************************************************
+ * @brief This method creates a new mutex.
+ * @note This function creates and allocates a unique context. It's the
+ * OSAL real time responsibility for managing its context. It must
+ * be freed by the M4OSA_mutexClose function. The context parameter
+ * will be sent back to any OSAL core mutex functions to allow
+ * retrieving data associated to the opened mutex.
+ * @param pContext:(OUT) Context of the created mutex
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: there is no more available memory
+ * @return M4ERR_CONTEXT_FAILED: the context creation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_mutexOpen(M4OSA_Context* pContext)
+{
+ M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)M4OSA_NULL;
+ pthread_mutexattr_t attribute = { 0 };
+ M4OSA_Bool opened = M4OSA_FALSE;
+
+ M4OSA_TRACE1_1("M4OSA_mutexOpen\t\tM4OSA_Context* 0x%x", pContext);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pContext, M4ERR_PARAMETER,
+ "M4OSA_mutexOpen: pContext is M4OSA_NULL");
+
+ *pContext = M4OSA_NULL;
+
+ pMutexContext = (M4OSA_MutexContext*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_MutexContext),
+ M4OSA_MUTEX, (M4OSA_Char*)"M4OSA_mutexOpen: mutex context");
+
+ if(M4OSA_NULL == pMutexContext)
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_mutexOpen");
+ return M4ERR_ALLOC;
+ }
+
+ /* Initialize the mutex attribute. */
+ if ( 0 == pthread_mutexattr_init( &attribute ) )
+ {
+ /* Initialize the mutex type. */
+ if ( 0 == pthread_mutexattr_settype( &attribute, PTHREAD_MUTEX_RECURSIVE ) )
+ {
+ /* Initialize the mutex. */
+ if (0 == pthread_mutex_init( &pMutexContext->mutex, &attribute ) )
+ {
+ opened = M4OSA_TRUE;
+ }
+ }
+
+ /* Destroy the mutex attribute. */
+ pthread_mutexattr_destroy( &attribute );
+ }
+
+ if(!opened)
+ {
+ M4OSA_DEBUG(M4ERR_CONTEXT_FAILED, "M4OSA_mutexOpen: OS mutex creation failed");
+ free(pMutexContext);
+ return M4ERR_CONTEXT_FAILED ;
+ }
+
+ pMutexContext->coreID = M4OSA_MUTEX;
+
+ pMutexContext->threadOwnerID = 0;
+
+ *pContext = (M4OSA_Context) pMutexContext;
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method locks the mutex. "Context" identifies the mutex.
+ * @note If the mutex is already locked, the calling thread blocks until
+ * the mutex becomes available (by calling M4OSA_mutexUnlock) or
+ * "timeout" is reached. This is a blocking call.
+ * @param context:(IN/OUT) Context of the mutex
+ * @param timeout:(IN) Time out in milliseconds
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4WAR_TIME_OUT: time out is elapsed before mutex has been
+ * available
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_mutexLock(M4OSA_Context context, M4OSA_UInt32 timeout)
+{
+ M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
+ pthread_t currentThread;
+ int result;
+ struct timespec ts;
+ struct timespec left;
+
+ M4OSA_TRACE1_2("M4OSA_mutexLock\t\tM4OSA_Context 0x%x\tM4OSA_UInt32 %d",
+ context, timeout);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
+ "M4OSA_mutexLock: context is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(pMutexContext->coreID != M4OSA_MUTEX,
+ M4ERR_BAD_CONTEXT, "M4OSA_mutexLock");
+
+ currentThread = pthread_self();
+
+ if(pMutexContext ->threadOwnerID == currentThread)
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: Thread tried to lock a mutex it already owns");
+ return M4ERR_BAD_CONTEXT ;
+ }
+
+ /* Lock the mutex. */
+ if ( M4OSA_WAIT_FOREVER == timeout)
+ {
+ if ( 0 != pthread_mutex_lock(&pMutexContext->mutex) )
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: OS mutex wait failed");
+ return M4ERR_BAD_CONTEXT;
+ }
+ }
+ else
+ {
+ result = pthread_mutex_trylock(&pMutexContext->mutex);
+ while ( ( EBUSY == result ) && ( 0 < timeout ) )
+ {
+ ts.tv_sec = 0;
+ if (1 <= timeout)
+ {
+ ts.tv_nsec = 1000000;
+ timeout -= 1;
+ }
+ else
+ {
+ ts.tv_nsec = timeout * 1000000;
+ timeout = 0;
+ }
+ nanosleep(&ts, &left);
+ result = pthread_mutex_trylock(&pMutexContext->mutex);
+ }
+ if (0 != result)
+ {
+ if (EBUSY == result)
+ {
+ return M4WAR_TIME_OUT;
+ }
+ else
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexLock: OS mutex wait failed");
+ return M4ERR_BAD_CONTEXT;
+ }
+ }
+ }
+
+ pMutexContext->threadOwnerID = currentThread;
+
+ return M4NO_ERROR;
+}
+
+
+
+/**
+ ************************************************************************
+ * @brief This method unlocks the mutex. The mutex is identified by
+ * its context
+ * @note The M4OSA_mutexLock unblocks the thread with the highest
+ * priority and made it ready to run.
+ * @note No hypotheses can be made on which thread will be un-blocked
+ * between threads with the same priority.
+ * @param context:(IN/OUT) Context of the mutex
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+************************************************************************
+*/
+M4OSA_ERR M4OSA_mutexUnlock(M4OSA_Context context)
+{
+ M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
+ pthread_t currentThread;
+
+ M4OSA_TRACE1_1("M4OSA_mutexUnlock\t\tM4OSA_Context 0x%x", context);
+ M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
+ "M4OSA_mutexUnlock: context is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(M4OSA_MUTEX != pMutexContext->coreID,
+ M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock");
+
+ currentThread = pthread_self();
+
+ if(pMutexContext->threadOwnerID != currentThread)
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock: Thread tried to unlock a mutex it doesn't own");
+ return M4ERR_BAD_CONTEXT;
+ }
+
+ pMutexContext->threadOwnerID = 0 ;
+
+ pthread_mutex_unlock(&pMutexContext->mutex);
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method deletes a mutex (identify by its context). After
+ * this call, the mutex and its context is no more useable. This
+ * function frees all the memory related to this mutex.
+ * @note It is an application issue to warrant no more threads are locked
+ * on the deleted mutex.
+ * @param context:(IN/OUT) Context of the mutex
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_mutexClose(M4OSA_Context context)
+{
+ M4OSA_MutexContext* pMutexContext = (M4OSA_MutexContext*)context;
+
+ M4OSA_TRACE1_1("M4OSA_mutexClose\t\tM4OSA_Context 0x%x", context);
+
+ M4OSA_DEBUG_IF2(M4OSA_NULL == context, M4ERR_PARAMETER,
+ "M4OSA_mutexClose: context is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(pMutexContext->coreID != M4OSA_MUTEX,
+ M4ERR_BAD_CONTEXT, "M4OSA_mutexUnlock");
+
+ pthread_mutex_destroy(&pMutexContext->mutex);
+
+ free( pMutexContext);
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_Random.c b/libvideoeditor/osal/src/M4OSA_Random.c
new file mode 100755
index 0000000..c24d039
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_Random.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4PSW_Trace.c
+ * @brief Trace function for trace macros
+ * @note This file gives the implementation of the trace function used
+ * in the trace instrumentation macros
+ ************************************************************************
+*/
+
+#include <stdio.h> /*for printf */
+#include <stdarg.h> /* ANSI C macros and defs for variable args */
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Mutex.h"
+/**
+ ************************************************************************
+ * @fn M4OSA_ERR M4OSA_randInit()
+ * @brief this function initialize the number generator
+ * this function must be called once before any call to M4OSA_rand()
+ * need the stdlib and time libraries
+ * @note
+ * @param
+ * @return M4NO_ERROR
+ ************************************************************************
+*/
+
+M4OSA_ERR M4OSA_randInit()
+{
+ int i;
+
+ srand(time(NULL));
+
+ /* Windows' rand is rotten, the first generated value after the init
+ above is not random enough, so let's shake things a little... */
+
+ for (i=0; i<100; i++) rand();
+
+ return M4NO_ERROR;
+}
+/**
+ ************************************************************************
+ * @fn M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value, M4OSA_UInt32 max_value)
+ * @brief This function gives a random number between 1 and max_value
+ * (inclusive) with approximately equal probability, and
+ * returns this number in out_value. For instance, a max_value
+ * of 6 will simulate a fair 6-sided dice roll.
+ * @note
+ * @param out_value (OUT): on return, points to random result
+ * @param max_value (IN): max expected value
+ * @return M4NO_ERROR
+ ************************************************************************
+*/
+
+M4OSA_ERR M4OSA_rand(M4OSA_Int32* out_value, M4OSA_UInt32 max_value)
+{
+ if( (out_value == M4OSA_NULL) || (max_value < 1) )
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ (*out_value) = rand();
+ /* notice this algorithm will only work for max_values such that the multiplication
+ won't overflow, which means that max_value typically shouldn't go over the range of
+ an Int16. */
+ (*out_value) = (((*out_value) * max_value) / ((M4OSA_UInt32)RAND_MAX + 1)) + 1;
+
+ return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/osal/src/M4OSA_Semaphore.c b/libvideoeditor/osal/src/M4OSA_Semaphore.c
new file mode 100755
index 0000000..f3b5852
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_Semaphore.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_Semaphore.c
+ * @brief Semaphore for Windows
+ * @note This file implements functions to manipulate semaphore
+ ************************************************************************
+*/
+
+
+
+#include "M4OSA_Debug.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Semaphore.h"
+
+#include <semaphore.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <time.h>
+
+
+/* Context for the semaphore */
+typedef struct {
+ M4OSA_UInt32 coreID; /* semaphore context identifiant */
+ sem_t semaphore; /* semaphore */
+} M4OSA_SemaphoreContext;
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method creates a new semaphore with the "initialCounter"
+ * value.
+ * @note This function creates and allocates a unique context. It's the
+ * OSAL real time responsibility for managing its context. It must
+ * be freed by the M4OSA_semaphoreClose function. The context
+ * parameter will be sent back to any OSAL core semaphore functions
+ * to allow retrieving data associated to the opened semaphore.
+ * @param context:(OUT) Context of the created semaphore
+ * @param initial_count:(IN) Initial counter of the semaphore
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: provided context is NULL
+ * @return M4ERR_ALLOC: there is no more available memory
+ * @return M4ERR_CONTEXT_FAILED: the context creation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_semaphoreOpen(M4OSA_Context* context,
+ M4OSA_UInt32 initial_count)
+{
+ M4OSA_SemaphoreContext* semaphoreContext = M4OSA_NULL;
+
+ M4OSA_TRACE1_2("M4OSA_semaphoreOpen\t\tM4OSA_Context* 0x%x\tM4OSA_UInt32 "
+ "%d", context, initial_count);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_semaphoreOpen");
+
+ *context = M4OSA_NULL;
+
+ semaphoreContext = (M4OSA_SemaphoreContext*) M4OSA_32bitAlignedMalloc(
+ sizeof(M4OSA_SemaphoreContext), M4OSA_SEMAPHORE,
+ (M4OSA_Char*)"M4OSA_semaphoreOpen: semaphore context");
+
+ if(semaphoreContext == M4OSA_NULL)
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_semaphoreOpen");
+
+ return M4ERR_ALLOC;
+ }
+
+ if (0 != sem_init(&semaphoreContext->semaphore, 0, initial_count))
+ {
+ free(semaphoreContext);
+
+ M4OSA_DEBUG(M4ERR_CONTEXT_FAILED,
+ "M4OSA_semaphoreOpen: OS semaphore creation failed");
+
+ return M4ERR_CONTEXT_FAILED;
+ }
+
+ semaphoreContext->coreID = M4OSA_SEMAPHORE ;
+ *context = (M4OSA_Context)semaphoreContext;
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method decrements (one by one) the semaphore counter. The
+ * semaphore is identified by its context This call is not blocking
+ * if the semaphore counter is positive or zero (after
+ * decrementation). This call is blocking if the semaphore counter
+ * is less than zero (after decrementation), until the semaphore is
+ * upper than zero (see M4OSA_semaphorePost) or time_out is
+ * reached.
+ * @note If "timeout" value is M4OSA_WAIT_FOREVER, the calling thread
+ * will block indefinitely until the semaphore is unlocked.
+ * @param context:(IN/OUT) Context of the semaphore
+ * @param timeout:(IN) Time out in milliseconds
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4WAR_TIME_OUT: time out is elapsed before semaphore has been
+ * available.
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_semaphoreWait(M4OSA_Context context, M4OSA_Int32 timeout)
+{
+ M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
+ struct timespec ts;
+ struct timespec left;
+ int result;
+
+ M4OSA_TRACE1_2("M4OSA_semaphoreWait\t\tM4OSA_Context 0x%x\tM4OSA_UInt32 %d",
+ context, timeout);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_semaphoreWait");
+
+ M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
+ M4ERR_BAD_CONTEXT, "M4OSA_semaphoreWait");
+
+ if ( (M4OSA_Int32)M4OSA_WAIT_FOREVER == timeout)
+ {
+ if ( 0 != sem_wait(&semaphoreContext->semaphore) )
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT,
+ "M4OSA_semaphoreWait: OS semaphore wait failed");
+
+ return M4ERR_BAD_CONTEXT ;
+ }
+ }
+ else
+ {
+ result = sem_trywait(&semaphoreContext->semaphore);
+ while ( ((EBUSY == result) || (EAGAIN == result)) && ( 0 < timeout ) )
+ {
+ ts.tv_sec = 0;
+ if (1 <= timeout)
+ {
+ ts.tv_nsec = 1000000;
+ timeout -= 1;
+ }
+ else
+ {
+ ts.tv_nsec = timeout * 1000000;
+ timeout = 0;
+ }
+ nanosleep(&ts, &left);
+ result = sem_trywait(&semaphoreContext->semaphore);
+ }
+ if (0 != result)
+ {
+ if ((EBUSY == result) || (EAGAIN == result))
+ {
+ return M4WAR_TIME_OUT;
+ }
+ else
+ {
+ M4OSA_DEBUG(M4ERR_BAD_CONTEXT, "M4OSA_semaphoreWait: OS semaphore wait failed");
+ return M4ERR_BAD_CONTEXT;
+ }
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method increments the semaphore counter. The semaphore is
+ * identified by its context
+ * @note If the semaphore counter is upper than zero (after addition),
+ * the M4OSA_semaphoreWait call of the thread with the highest
+ * priority is unblocked and made ready to run.
+ * @note No hypotheses can be made on which thread will be unblocked
+ * between threads with the same priority.
+ * @param context:(IN/OUT) Context of the semaphore
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+************************************************************************
+*/
+M4OSA_ERR M4OSA_semaphorePost(M4OSA_Context context)
+{
+ M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
+
+ M4OSA_TRACE1_1("M4OSA_semaphorePost\t\tM4OSA_Context 0x%x", context);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_semaphorePost");
+
+ M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
+ M4ERR_BAD_CONTEXT, "M4OSA_semaphorePost");
+
+ sem_post(&semaphoreContext->semaphore);
+
+ return M4NO_ERROR;
+}
+
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method deletes a semaphore (identify by its context).
+ * After this call the semaphore and its context is no more
+ * useable. This function frees all the memory related to this
+ * semaphore.
+ * @note It is an application issue to warrant no more threads are locked
+ * on the deleted semaphore.
+ * @param context:(IN/OUT) Context of the semaphore
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one.
+************************************************************************
+*/
+M4OSA_ERR M4OSA_semaphoreClose(M4OSA_Context context)
+{
+ M4OSA_SemaphoreContext* semaphoreContext = (M4OSA_SemaphoreContext*)context;
+
+ M4OSA_TRACE1_1("M4OSA_semaphoreClose\t\tM4OSA_Context 0x%x", context);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_semaphoreClose");
+
+ M4OSA_DEBUG_IF2(semaphoreContext->coreID != M4OSA_SEMAPHORE,
+ M4ERR_BAD_CONTEXT, "M4OSA_semaphoreClose");
+
+ sem_destroy(&semaphoreContext->semaphore);
+
+ free(semaphoreContext);
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/osal/src/M4OSA_Thread.c b/libvideoeditor/osal/src/M4OSA_Thread.c
new file mode 100755
index 0000000..db54245
--- /dev/null
+++ b/libvideoeditor/osal/src/M4OSA_Thread.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file M4OSA_Thread.c
+ * @ingroup OSAL
+ * @brief Implements and manipulate threads
+ * @note This file implements functions to manipulate threads
+ ************************************************************************
+*/
+
+#include <sched.h>
+#include <time.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include <utils/threads.h>
+#include "M4OSA_Debug.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Thread.h"
+#include "M4OSA_Thread_priv.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Semaphore.h"
+#include "M4OSA_CharStar.h"
+
+
+void* M4OSA_threadSyncForEverDo(void *context)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ M4OSA_Bool auto_kill = M4OSA_FALSE;
+
+ /*
+ M4OSA_Void* userData;
+ */
+
+ M4OSA_TRACE2_1("M4OSA_threadSyncForEverDo\t\tLPVOID 0x%x", context);
+
+ /*
+ userData = threadContext->userData;
+ */
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+
+ threadContext->state = M4OSA_kThreadRunning;
+
+ M4OSA_semaphorePost(threadContext->semStartStop);
+
+ while(threadContext->state == M4OSA_kThreadRunning)
+ {
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ if((threadContext->func(threadContext->param)) != M4NO_ERROR)
+ {
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ if(threadContext->state == M4OSA_kThreadRunning)
+ {
+
+ //PR 2354 - ACO : Suppress stopping state and don't
+ // unlock mutex before closing the thread
+ threadContext->state = M4OSA_kThreadOpened;
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+ return 0;
+ }
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+ }
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+ }
+
+
+ M4OSA_semaphorePost(threadContext->semStartStop);
+
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+
+ return 0;
+}
+
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method creates a new thread. After this call the thread is
+ * identified by its "context". The thread function is provided by
+ * the "func" parameter. This function creates & allocates a unique
+ * context. It's the OSAL real time responsibility for managing its
+ * context. It must be freed by the M4OSA_threadSyncClose function.
+ * The context parameter will be sent back to any OSAL core thread
+ * functions to allow retrieving data associated to the opened
+ * thread.
+ * @note This function creates the thread, but the thread is not running.
+ * @note Once the thread is created, the state is M4OSA_kThreadOpened.
+ * @param context:(OUT) Context of the created thread
+ * @param func:(IN) "doIt" function pointer to run
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_ALLOC: there is no more available memory
+ * @return M4ERR_CONTEXT_FAILED: the context creation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncOpen(M4OSA_Context* context,
+ M4OSA_ThreadDoIt func)
+{
+ M4OSA_ThreadContext* threadContext = M4OSA_NULL;
+ M4OSA_ERR err_code;
+
+ M4OSA_TRACE1_2("M4OSA_threadSyncOpen\t\tM4OSA_Context* 0x%x\t"
+ "M4OSA_ThreadDoIt 0x%x", context, func);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncOpen");
+
+ M4OSA_DEBUG_IF2(func == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncOpen");
+
+ *context = M4OSA_NULL;
+
+ threadContext =
+ (M4OSA_ThreadContext*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_ThreadContext),
+ M4OSA_THREAD, (M4OSA_Char*)"M4OSA_threadSyncOpen: thread context");
+
+ if(threadContext == M4OSA_NULL)
+ {
+ M4OSA_DEBUG(M4ERR_ALLOC, "M4OSA_threadSyncOpen");
+
+ return M4ERR_ALLOC;
+ }
+
+ threadContext->func = func;
+ threadContext->stackSize = 64 * 1024;
+ threadContext->name = M4OSA_NULL;
+ threadContext->threadID = 0;
+ threadContext->coreID = M4OSA_THREAD;
+ threadContext->state = M4OSA_kThreadOpened;
+ threadContext->priority = M4OSA_kThreadNormalPriority ;
+
+ err_code = M4OSA_mutexOpen(&(threadContext->stateMutex));
+
+ if(M4OSA_ERR_IS_ERROR(err_code))
+ {
+ M4OSA_DEBUG(err_code, "M4OSA_threadSyncOpen: M4OSA_mutexOpen");
+
+ return err_code;
+ }
+
+ err_code = M4OSA_semaphoreOpen(&(threadContext->semStartStop), 0);
+
+ if(M4OSA_ERR_IS_ERROR(err_code))
+ {
+ M4OSA_DEBUG(err_code, "M4OSA_threadSyncOpen: M4OSA_semaphoreOpen");
+
+ return err_code;
+ }
+
+ *context = threadContext;
+
+ return M4NO_ERROR;
+}
+
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method runs a specified thread. The "param" parameter
+ * allows the application to set a specific parameter to the
+ * created thread. This parameter will be used as the second one of
+ * the "M4OSA_ThreadDoIt" function.
+ * @note This method is a blocking up to the thread is running.
+ * Before calling this method, the state is M4OSA_kThreadOpened.
+ * Once the method is called, the state is M4OSA_kThreadStarting.
+ * Once the thread is running, the state is M4OSA_kThreadRunning.
+ * @note This method returns immediately. If the "threadStarted" optionID
+ * is not NULL, the thread will call it before running the doIt
+ * function.
+ * @param context:(IN/OUT) Context of the thread
+ * @param param:(IN) Application data thread parameter
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_STATE: this function cannot be called now
+ * @return M4ERR_THREAD_NOT_STARTED: the thread did not start
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncStart(M4OSA_Context context,
+ M4OSA_Void* param)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ pthread_attr_t attribute = { 0, 0, 0, 0, 0, 0 };
+ int min = 0;
+ int max = 0;
+ int priority = 0;
+ struct sched_param sched = { 0 };
+
+ M4OSA_TRACE1_2("M4OSA_threadSyncStart\t\tM4OSA_Context 0x%x\tM4OSA_Void* "
+ "0x%x", context, param);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncStart");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncStart");
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ if(threadContext->state != M4OSA_kThreadOpened)
+ {
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncStart");
+
+ return M4ERR_STATE;
+ }
+
+ threadContext->state = M4OSA_kThreadStarting;
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+ threadContext->param = param;
+
+ if ( 0 == pthread_attr_init( &attribute ) )
+ {
+ if ( 0 == pthread_attr_setdetachstate( &attribute, PTHREAD_CREATE_DETACHED ) )
+ {
+ if ( 0 == pthread_attr_setstacksize( &attribute, (size_t)threadContext->stackSize ) )
+ {
+ if ( 0 == pthread_attr_setschedpolicy( &attribute, SCHED_OTHER ) )
+ {
+ /* Tentative patches to handle priorities in a better way : */
+ /* Use Android's predefined priorities (range +19..-20)
+ *rather than Linux ones (0..99)*/
+
+ /* Get min and max priorities */
+ min = sched_get_priority_min( SCHED_FIFO );
+ max = sched_get_priority_max( SCHED_FIFO );
+
+ M4OSA_TRACE1_2("M4OSA_threadSyncStart MAX=%d MIN=%d", max, min);
+
+ /* tentative modification of the priorities */
+ /* Set the priority based on default android priorities */
+ /* This probably requires some more tuning,
+ * outcome of this priority settings are not yet satisfactory */
+ /* Implementing thread handling based on Android's thread creation
+ * helpers might bring some improvement (see threads.h) */
+ switch(threadContext->priority)
+ {
+ case M4OSA_kThreadLowestPriority:
+ priority = ANDROID_PRIORITY_NORMAL;
+ break;
+ case M4OSA_kThreadLowPriority:
+ priority = ANDROID_PRIORITY_DISPLAY;
+ break;
+ case M4OSA_kThreadNormalPriority:
+ priority = ANDROID_PRIORITY_URGENT_DISPLAY;
+ break;
+ case M4OSA_kThreadHighPriority:
+ priority = ANDROID_PRIORITY_AUDIO;
+ break;
+ case M4OSA_kThreadHighestPriority:
+ priority = ANDROID_PRIORITY_URGENT_AUDIO;
+ break;
+ }
+ sched.sched_priority = priority;
+
+ if ( 0 == pthread_attr_setschedparam( &attribute, &sched ) )
+ {
+ if ( 0 == pthread_create( &threadContext->threadID,
+ &attribute,
+ &M4OSA_threadSyncForEverDo,
+ (void *)threadContext ) )
+ {
+ if ( M4OSA_FALSE == M4OSA_ERR_IS_ERROR( M4OSA_semaphoreWait(
+ threadContext->semStartStop,
+ M4OSA_WAIT_FOREVER ) ) )
+ {
+ return M4NO_ERROR;
+ }
+ }
+ }
+ }
+ }
+ }
+ pthread_attr_destroy( &attribute );
+ }
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ threadContext->state = M4OSA_kThreadOpened;
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_DEBUG(M4ERR_THREAD_NOT_STARTED, "M4OSA_threadSyncStart");
+
+ return M4ERR_THREAD_NOT_STARTED;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method stops a specified thread.
+ * @note This call is a blocking one up to the "M4OSA_ThreadDoIt"
+ * function has returned.
+ * Before the method is called, the state is M4OSA_kThreadRunning.
+ * Once the method is called, the state is M4OSA_kThreadStopping.
+ * Once the thread is stopped, the state is M4OSA_kThreadOpened.
+ * @note This method returns once the thread has been stopped. If the
+ * "threadStopped" optionID is not NULL, the thread will call it
+ * before dying.
+ * @param context:(IN/OUT) Context of the thread
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_STATE: this function cannot be called now
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncStop(M4OSA_Context context)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+
+ M4OSA_TRACE1_1("M4OSA_threadSyncStop\t\tM4OSA_Context 0x%x", context);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncStop");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncStop");
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ if(threadContext->state != M4OSA_kThreadRunning)
+ {
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncStop");
+
+ return M4ERR_STATE;
+ }
+
+ threadContext->state = M4OSA_kThreadStopping;
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_semaphoreWait(threadContext->semStartStop, M4OSA_WAIT_FOREVER);
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ threadContext->state = M4OSA_kThreadOpened;
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method deletes a thread (identified by its context). After
+ * this call the thread and its context are no more useable. This
+ * function frees all the memory related to this thread.
+ * @note Before the method is called, the state is M4OSA_kThreadOpened.
+ * Once the method is called, the state is M4OSA_kThreadClosed.
+ * @param context:(IN/OUT) Context of the thread
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_STATE: this function cannot be called now
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncClose(M4OSA_Context context)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ M4OSA_ERR err_code;
+
+ M4OSA_TRACE1_1("M4OSA_threadSyncClose\t\tM4OSA_Context 0x%x", context);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncClose");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncClose");
+
+ M4OSA_DEBUG_IF2(threadContext->state == M4OSA_kThreadClosed,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncClose");
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ if(threadContext->state != M4OSA_kThreadOpened)
+ {
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncClose");
+
+ return M4ERR_STATE;
+ }
+
+ threadContext->state = M4OSA_kThreadClosed;
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ err_code = M4OSA_mutexClose(threadContext->stateMutex);
+
+ if(M4OSA_ERR_IS_ERROR(err_code))
+ {
+ M4OSA_DEBUG(err_code, "M4OSA_threadSyncClose: M4OSA_mutexClose");
+
+ return err_code;
+ }
+
+ err_code = M4OSA_semaphoreClose(threadContext->semStartStop);
+
+ if(M4OSA_ERR_IS_ERROR(err_code))
+ {
+ M4OSA_DEBUG(err_code, "M4OSA_threadSyncClose: M4OSA_semaphoreClose");
+
+ return err_code;
+ }
+
+ if(threadContext->name != M4OSA_NULL)
+ {
+ free(threadContext->name);
+ }
+
+ free(threadContext);
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method asks the thread to return its state.
+ * @note The caller is responsible for allocating/deallocating the state
+ * field.
+ * @param context:(IN) Context of the thread
+ * @param state:(OUT) Thread state
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncGetState(M4OSA_Context context,
+ M4OSA_ThreadState* state)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+
+ M4OSA_TRACE1_2("M4OSA_threadSyncGetState\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_ThreadState* 0x%x", context, state);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncGetState");
+
+ M4OSA_DEBUG_IF2(state == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncGetState");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncGetState");
+
+ *state = threadContext->state;
+
+ return M4NO_ERROR;
+}
+
+
+
+
+/**
+ ************************************************************************
+ * @brief This method asks the calling thread to sleep during "timeSleep"
+ * milliseconds.
+ * @note This function does not have any context.
+ * @param time:(IN) Time to sleep in milliseconds
+ * @return M4NO_ERROR: there is no error
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSleep(M4OSA_UInt32 time)
+{
+ struct timespec rqtp = { 0, 0 };
+ struct timespec rmtp = { 0, 0 };
+
+ M4OSA_TRACE1_1("M4OSA_threadSleep\t\tM4OSA_UInt32 %d", time);
+
+ rqtp.tv_sec = (time_t)time/1000;
+ rqtp.tv_nsec = (time%1000) * 1000000;
+ nanosleep(&rqtp, &rmtp);
+
+ return M4NO_ERROR;
+}
+
+#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
+
+M4OSA_ERR M4OSA_SetThreadSyncPriority(M4OSA_Context context,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ M4OSA_ThreadPriorityLevel priority
+ = (M4OSA_ThreadPriorityLevel)(optionValue);
+
+ M4OSA_TRACE2_2("M4OSA_SetThreadSyncPriority\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_DataOption 0x%x", context, optionValue);
+
+ if((M4OSA_UInt32)optionValue>M4OSA_kThreadLowestPriority)
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ threadContext->priority = priority;
+
+ return M4NO_ERROR;
+}
+
+#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
+
+
+
+
+#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
+
+M4OSA_ERR M4OSA_SetThreadSyncName(M4OSA_Context context,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ M4OSA_Char* name = (M4OSA_Char*)optionValue;
+ M4OSA_UInt32 nameSize ;
+
+ M4OSA_TRACE2_2("M4OSA_SetThreadSyncName\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_DataOption 0x%x", context, optionValue);
+
+ if(threadContext->name != NULL)
+ {
+ free(threadContext->name);
+ threadContext->name = M4OSA_NULL;
+ }
+
+ if(optionValue != M4OSA_NULL)
+ {
+ nameSize = strlen((const char *)name)+1;
+
+ threadContext->name =
+ (M4OSA_Char*)M4OSA_32bitAlignedMalloc(nameSize, M4OSA_THREAD,
+ (M4OSA_Char*)"M4OSA_SetThreadSyncName: thread name");
+
+ if(threadContext == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ memcpy((void *)threadContext->name, (void *)name,
+ nameSize);
+ }
+
+ return M4NO_ERROR;
+}
+
+#endif /*M4OSA_OPTIONID_THREAD_NAME*/
+
+
+#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
+
+M4OSA_ERR M4OSA_SetThreadSyncStackSize(M4OSA_Context context,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+
+ M4OSA_TRACE2_2("M4OSA_SetThreadSyncStackSize\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_DataOption 0x%x", context, optionValue);
+
+ threadContext->stackSize = (M4OSA_UInt32)optionValue;
+
+ return M4NO_ERROR;
+}
+
+#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
+
+/**
+ ************************************************************************
+ * @brief This method asks the core OSAL-Thread component to set the value
+ * associated with the optionID. The caller is responsible for
+ * allocating/deallocating the memory of the value field.
+ * @note As the caller is responsible of allocating/de-allocating the
+ * "value" field, the callee must copy this field to its internal
+ * variable.
+ * @param context:(IN/OUT) Context of the thread
+ * @param optionID:(IN) ID of the option
+ * @param optionValue:(IN) Value of the option
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+ * @return M4ERR_STATE: this option is not available now
+ * @return M4ERR_READ_ONLY: this option is a read only one
+ * @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncSetOption(M4OSA_Context context,
+ M4OSA_ThreadOptionID optionID,
+ M4OSA_DataOption optionValue)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+ M4OSA_ERR err_code;
+
+ M4OSA_TRACE1_3("M4OSA_threadSyncSetOption\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_OptionID %d\tM4OSA_DataOption 0x%x",
+ context, optionID, optionValue);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncSetOption");
+
+ M4OSA_DEBUG_IF2(optionID == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncSetOption");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncSetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_THREAD),
+ M4ERR_BAD_OPTION_ID, "M4OSA_threadSyncSetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_WRITABLE(optionID),
+ M4ERR_READ_ONLY, "M4OSA_threadSyncSetOption");
+
+
+ M4OSA_mutexLock(threadContext->stateMutex, M4OSA_WAIT_FOREVER);
+
+ if(threadContext->state != M4OSA_kThreadOpened)
+ {
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ M4OSA_DEBUG(M4ERR_STATE, "M4OSA_threadSyncSetOption");
+
+ return M4ERR_STATE;
+ }
+
+ switch(optionID)
+ {
+
+#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
+ case M4OSA_ThreadPriority:
+ {
+ err_code = M4OSA_SetThreadSyncPriority(context, optionValue);
+
+ break;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
+
+#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
+ case M4OSA_ThreadName:
+ {
+ err_code = M4OSA_SetThreadSyncName(context, optionValue);
+
+ break;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_NAME*/
+
+#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
+ case M4OSA_ThreadStackSize:
+ {
+ err_code = M4OSA_SetThreadSyncStackSize(context, optionValue);
+
+ break;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
+
+ default:
+ {
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_threadSyncSetOption");
+
+ err_code = M4ERR_NOT_IMPLEMENTED;
+ }
+ }
+
+ M4OSA_mutexUnlock(threadContext->stateMutex);
+
+ return err_code;
+}
+
+
+
+/**
+ ************************************************************************
+ * @brief This method asks the OSAL-Thread to return the value associated
+ * with the optionID. The caller is responsible for
+ * allocating/deallocating the memory of the value field.
+ * @note "optionValue" must be cast according to the type related to the
+ * optionID.
+ * @note As the caller is responsible for de-allocating the "value"
+ * field, the core OSAL-Thread component must perform a copy of its
+ * internal value to the value field.
+ * @param context:(IN) Context of the thread
+ * @param optionID:(IN) ID of the option
+ * @param optionValue:(OUT) Value of the option
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is NULL
+ * @return M4ERR_BAD_CONTEXT: provided context is not a valid one
+ * @return M4ERR_BAD_OPTION_ID: the optionID is not a valid one
+ * @return M4ERR_WRITE_ONLY: this option is a write only one
+ * @return M4ERR_NOT_IMPLEMENTED: this option is not implemented
+ ************************************************************************
+*/
+M4OSA_ERR M4OSA_threadSyncGetOption(M4OSA_Context context,
+ M4OSA_ThreadOptionID optionID,
+ M4OSA_DataOption* optionValue)
+{
+ M4OSA_ThreadContext* threadContext = (M4OSA_ThreadContext*)context;
+
+ M4OSA_TRACE1_3("M4OSA_threadSyncGetOption\t\tM4OSA_Context 0x%x\t"
+ "M4OSA_OptionID %d\tM4OSA_DataOption* 0x%x",
+ context, optionID, optionValue);
+
+ M4OSA_DEBUG_IF2(context == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
+
+ M4OSA_DEBUG_IF2(optionID == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
+
+ M4OSA_DEBUG_IF2(optionValue == M4OSA_NULL,
+ M4ERR_PARAMETER, "M4OSA_threadSyncGetOption");
+
+ M4OSA_DEBUG_IF2(threadContext->coreID != M4OSA_THREAD,
+ M4ERR_BAD_CONTEXT, "M4OSA_threadSyncGetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_COREID(optionID, M4OSA_THREAD),
+ M4ERR_BAD_OPTION_ID, "M4OSA_threadSyncGetOption");
+
+ M4OSA_DEBUG_IF2(!M4OSA_OPTION_ID_IS_READABLE(optionID),
+ M4ERR_WRITE_ONLY, "M4OSA_threadSyncGetOption");
+
+ switch(optionID)
+ {
+
+#if(M4OSA_OPTIONID_THREAD_PRIORITY == M4OSA_TRUE)
+ case M4OSA_ThreadPriority:
+ {
+ M4OSA_ThreadPriorityLevel* priority =
+ (M4OSA_ThreadPriorityLevel*)optionValue;
+
+ *priority = threadContext->priority;
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_PRIORITY*/
+
+#if(M4OSA_OPTIONID_THREAD_NAME == M4OSA_TRUE)
+ case M4OSA_ThreadName:
+ {
+ M4OSA_Char** name = (M4OSA_Char**)optionValue;
+
+ *name = threadContext->name;
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_NAME*/
+
+#if(M4OSA_OPTIONID_THREAD_STACK_SIZE == M4OSA_TRUE)
+ case M4OSA_ThreadStackSize:
+ {
+ M4OSA_UInt32* stackSize = (M4OSA_UInt32*)optionValue;
+
+ *stackSize = threadContext->stackSize;
+
+ return M4NO_ERROR;
+ }
+#endif /*M4OSA_OPTIONID_THREAD_STACK_SIZE*/
+
+ default:
+ break;
+ }
+
+ M4OSA_DEBUG(M4ERR_NOT_IMPLEMENTED, "M4OSA_threadSyncGetOption");
+
+ return M4ERR_NOT_IMPLEMENTED;
+}
+
diff --git a/libvideoeditor/osal/src/M4PSW_DebugTrace.c b/libvideoeditor/osal/src/M4PSW_DebugTrace.c
new file mode 100755
index 0000000..0fcba94
--- /dev/null
+++ b/libvideoeditor/osal/src/M4PSW_DebugTrace.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4PSW_DebugTrace.c
+ * @brief Default trace function for debugging macros
+ * @note This file gives the default implementation of the trace function
+ * used in the debug instrumentation macros, based on printf.
+ * Application writers are strongly encouraged to implement their
+ * own "M4OSA_DebugTrace".
+ ************************************************************************
+*/
+
+
+#include <stdio.h> /*for printf */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/*#define NO_FILE */ /* suppresses the file name print out */
+
+
+/**
+ ************************************************************************
+ * void M4OSA_DebugTrace(M4OSA_Int32 line, char* file, M4OSA_Int32 level,
+ * M4OSA_Char* cond, char* msg, M4OSA_ERR err)
+ * @brief This function implements the trace for debug tests
+ * @note This function is to be called in the debug macros only.
+ * This implementation uses printf.
+ * @param line (IN): the line number in the source file
+ * @param file (IN): the source file name
+ * @param level (IN): the debug level
+ * @param msg (IN): the error message
+ * @param err (IN): the return value (error code)
+ * @return none
+ ************************************************************************
+*/
+
+M4OSAL_TRACE_EXPORT_TYPE void M4OSA_DebugTrace(M4OSA_Int32 line,
+ M4OSA_Char* file,
+ M4OSA_Int32 level,
+ M4OSA_Char* cond,
+ M4OSA_Char* msg,
+ M4OSA_ERR err)
+{
+ M4OSA_Int32 i;
+
+ /* try to "indent" the resulting traces depending on the level */
+ for (i =0 ; i < level; i ++)
+ {
+ printf(" ");
+ }
+
+#ifdef NO_FILE
+ printf("Error: %li, on %s: %s\n",err,cond,msg);
+#else /* NO_FILE */
+ printf("Error: %li, on %s: %s Line %lu in: %s\n",err,cond,msg,line,file);
+#endif /* NO_FILE */
+
+}
+
+M4OSAL_TRACE_EXPORT_TYPE M4OSA_Void M4OSA_DEBUG_traceFunction(M4OSA_UInt32 line,
+ M4OSA_Char* fileName,
+ M4OSA_UInt32 level,
+ M4OSA_Char* stringCondition,
+ M4OSA_Char* message,
+ M4OSA_ERR returnedError)
+{
+ M4OSA_DebugTrace(line, fileName, level, stringCondition, message, returnedError);
+}
+
diff --git a/libvideoeditor/osal/src/M4PSW_MemoryInterface.c b/libvideoeditor/osal/src/M4PSW_MemoryInterface.c
new file mode 100755
index 0000000..ea4ccea
--- /dev/null
+++ b/libvideoeditor/osal/src/M4PSW_MemoryInterface.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4PSW_MemoryInterface.c
+ * @brief Memory Interface
+ * @note Implementation of the osal memory functions
+ *************************************************************************
+*/
+
+#include <stdlib.h>
+#include <memory.h>
+
+#include <time.h>
+#include "M4OSA_Memory.h"
+#ifndef M4VPS_ADVANCED_MEMORY_MANAGER
+/**
+ ************************************************************************
+ * @fn M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc(M4OSA_UInt32 size,
+ * M4OSA_CoreID coreID,
+ * M4OSA_Char* string)
+ * @brief this function allocates a memory block (at least 32 bits aligned)
+ * @note
+ * @param size (IN): size of allocated block in bytes
+ * @param coreID (IN): identification of the caller component
+ * @param string (IN): description of the allocated block (null terminated)
+ * @return address of the allocated block, M4OSA_NULL if no memory available
+ ************************************************************************
+*/
+
+M4OSA_MemAddr32 M4OSA_32bitAlignedMalloc(M4OSA_UInt32 size,
+ M4OSA_CoreID coreID,
+ M4OSA_Char* string)
+{
+ M4OSA_MemAddr32 Address = M4OSA_NULL;
+
+ /**
+ * If size is 0, malloc on WIN OS allocates a zero-length item in
+ * the heap and returns a valid pointer to that item.
+ * On other platforms, malloc could returns an invalid pointer
+ * So, DON'T allocate memory of 0 byte */
+ if (size == 0)
+ {
+ return Address;
+ }
+
+ if (size%4 != 0)
+ {
+ size = size + 4 - (size%4);
+ }
+
+ Address = (M4OSA_MemAddr32) malloc(size);
+
+ return Address;
+}
+
+#endif
+
diff --git a/libvideoeditor/osal/src/M4PSW_Trace.c b/libvideoeditor/osal/src/M4PSW_Trace.c
new file mode 100755
index 0000000..f3d9a1f
--- /dev/null
+++ b/libvideoeditor/osal/src/M4PSW_Trace.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4PSW_Trace.c
+ * @brief Trace function for trace macros
+ * @note This file gives the implementation of the trace function used
+ * in the trace instrumentation macros
+ ************************************************************************
+*/
+
+
+#include <stdio.h> /*for printf */
+#include <stdarg.h> /* ANSI C macros and defs for variable args */
+#include "utils/Log.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#define NO_FILE /* suppresses the file name print out */
+
+#define MAX_STRING_SIZE 1024
+
+/**
+ ************************************************************************
+ * void M4OSA_Trace(M4OSA_Int32 line, M4OSA_Char* file ,M4OSA_Int32 level,
+ * M4OSA_Char* format, ...)
+ * @brief This function implements the trace for debug tests
+ * @note This implementation uses printf. First the variables are retrieved using
+ * ANSI C defs and macros which enable to access a variable number of arguments.
+ * Then the printf is done (with some ornemental adds).
+ * @param level (IN): the debug level
+ * @param format (IN): the "printf" formated string
+ * @param ... (IN): as many parameters as required ...
+ * @return none
+ ************************************************************************
+*/
+
+M4OSAL_TRACE_EXPORT_TYPE void M4OSA_Trace(M4OSA_Int32 line, M4OSA_Char* file ,
+ M4OSA_Int32 level, M4OSA_Char* format, ...)
+{
+ M4OSA_Char message[MAX_STRING_SIZE];
+ M4OSA_Int32 i;
+ va_list marker; /* pointer to list of arguments */
+
+ /* get the var arguments into the string message to be able to print */
+ va_start(marker,format); /* set ptr to first argument in the list of arguments passed to the function */
+ vsprintf((char *)message, (const char *)format,marker ); /* formats and writes the data into message */
+ va_end(marker); /* reset pointer to NULL */
+
+ /* do the actual print */
+#ifdef NO_FILE
+ __android_log_print(ANDROID_LOG_INFO, "M4OSA_Trace", "%s", (char*)message);
+#else /* NO_FILE */
+ __android_log_print(ANDROID_LOG_INFO, "M4OSA_Trace", "%s", "%s at %lu in %s",
+ (char *)message, line, file);
+#endif /* NO_FILE */
+
+}
+
+M4OSAL_TRACE_EXPORT_TYPE M4OSA_Void M4OSA_TRACE_traceFunction(M4OSA_UInt32 line,
+ M4OSA_Char* fileName,
+ M4OSA_CoreID coreID,
+ M4OSA_UInt32 level,
+ M4OSA_Char* stringMsg, ...)
+{
+ M4OSA_Char message[MAX_STRING_SIZE];
+ M4OSA_Int32 i;
+ va_list marker; /* pointer to list of arguments */
+
+ /* get the var arguments into the string message to be able to print */
+ va_start(marker,stringMsg); /* set ptr to first argument in the list of arguments passed to the function */
+ vsprintf((char *)message, (const char *)stringMsg,marker ); /* formats and writes the data into message */
+ va_end(marker); /* reset pointer to NULL */
+
+ /* do the actual print */
+#ifdef NO_FILE
+ __android_log_print(ANDROID_LOG_INFO, "M4OSA_TRACE_traceFunction", "%s", (char*)message);
+#else /* NO_FILE */
+ __android_log_print(ANDROID_LOG_INFO, "M4OSA_TRACE_traceFunction", "%s", "%s at %lu in %s",
+ (char *)message, line, (char*)file);
+#endif /* NO_FILE */
+
+}
+
diff --git a/libvideoeditor/vss/3gpwriter/Android.mk b/libvideoeditor/vss/3gpwriter/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
new file mode 100755
index 0000000..5f9d16b
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4MP4W_Types.h
+ * @brief Definition of types for the core MP4 writer
+ ******************************************************************************
+ */
+
+#ifndef M4MP4W_TYPES_H
+#define M4MP4W_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_Stream.h"
+
+/**
+ ******************************************************************************
+ * structure M4MP4C_FtypBox
+ * @brief Information to build the 'ftyp' atom
+ ******************************************************************************
+ */
+#define M4MPAC_FTYP_TAG 0x66747970 /* 'ftyp' */
+#define M4MPAC_MAX_COMPATIBLE_BRANDS 10
+typedef struct
+{
+ /* All brand fields are actually char[4] stored in big-endian integer format */
+
+ M4OSA_UInt32 major_brand; /* generally '3gp4' */
+ M4OSA_UInt32 minor_version; /* generally '0000' or 'x.x ' */
+ M4OSA_UInt32 nbCompatibleBrands; /* number of compatible brands */
+ M4OSA_UInt32 compatible_brands[M4MPAC_MAX_COMPATIBLE_BRANDS]; /* array of max compatible
+ brands */
+} M4MP4C_FtypBox;
+
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_memAddr
+ * @brief Buffer structure for the MP4 writer
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_UInt32 size;
+ M4OSA_MemAddr32 addr;
+} M4MP4W_memAddr;
+
+/**
+ ******************************************************************************
+ * Time type for the core MP4 writer
+ ******************************************************************************
+ */
+typedef M4OSA_UInt32 M4MP4W_Time32;
+
+/**
+ ******************************************************************************
+ * enumeration M4MP4W_State
+ * @brief This enum defines the core MP4 writer states
+ * @note These states are used internaly, but can be retrieved from outside
+ * the writer.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MP4W_opened = 0x100,
+ M4MP4W_ready = 0x200,
+ M4MP4W_writing = 0x300,
+ M4MP4W_writing_startAU = 0x301,
+ M4MP4W_closed = 0x400
+} M4MP4W_State;
+
+/**
+ ******************************************************************************
+ * enumeration M4MP4W_OptionID
+ * @brief This enum defines the core MP4 writer options
+ * @note These options give parameters for the core MP4 writer
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MP4W_maxAUperChunk = 0xC101,
+ M4MP4W_maxChunkSize = 0xC102,
+ M4MP4W_maxChunkInter = 0xC103,
+ M4MP4W_preWriteCallBack = 0xC104,
+ M4MP4W_postWriteCallBack = 0xC105,
+ M4MP4W_maxAUsize = 0xC106,
+ M4MP4W_IOD = 0xC111,
+ M4MP4W_ESD = 0xC112,
+ M4MP4W_SDP = 0xC113,
+ M4MP4W_trackSize = 0xC114,
+ M4MP4W_MOOVfirst = 0xC121,
+ M4MP4W_V2_MOOF = 0xC131,
+ M4MP4W_V2_tblCompres = 0xC132,
+ /*warning: unspecified options:*/
+ M4MP4W_maxFileSize = 0xC152,
+ M4MP4W_CamcoderVersion = 0xC153, /*000 to 999 !*/
+ M4MP4W_estimateAudioSize = 0xC154, /*audio AUs are processed after the video, */
+ /*this option MUST NOT be set if non constant audio
+ frame size (e.g. if SID)*/
+ M4MP4W_embeddedString = 0xC155,
+ M4MP4W_integrationTag = 0xC156,
+ M4MP4W_maxFileDuration = 0xC157,
+ M4MP4W_setFtypBox = 0xC158,
+ M4MP4W_DSI = 0xC159,
+ /* H.264 trimming */
+ M4MP4W_MUL_PPS_SPS = 0xC160,
+ /* H.264 trimming */
+} M4MP4W_OptionID;
+
+/**
+ ******************************************************************************
+ * Audio & video stream IDs
+ ******************************************************************************
+ */
+#define AudioStreamID 1
+#define VideoStreamID 2
+
+/**
+ ******************************************************************************
+ * Default parameters values, that can be modified by M4MP4W_setOption
+ ******************************************************************************
+ */
+#define M4MP4W_DefaultWidth 320
+#define M4MP4W_DefaultHeight 240
+#define M4MP4W_DefaultMaxAuSize 4096 /*bytes*/
+#define M4MP4W_DefaultMaxChunkSize 100000 /*bytes*/
+#define M4MP4W_DefaultInterleaveDur 0 /*bytes*/
+
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_StreamIDsize
+ * @brief Video plane size
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4SYS_StreamID streamID;
+ M4OSA_UInt16 height;
+ M4OSA_UInt16 width;
+} M4MP4W_StreamIDsize;
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_TrackData
+ * @brief Internal core MP4 writer track structure
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4SYS_StreamType trackType;
+ M4OSA_UInt32 timescale; /* T (video=1000), (AMR8=8000), (AMR16=16000)*/
+ M4OSA_UInt32 sampleSize; /* S (video=0)*/
+ M4OSA_UInt32 sttsTableEntryNb; /* J (audio=1)*/
+ M4MP4W_Time32 lastCTS; /* CTS of the previous AU,
+ init to 0.Gives duration at the end.*/
+ M4OSA_UInt32 sampleNb; /* K (audio=F)*/
+} M4MP4W_TrackData;
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_AudioTrackData
+ * @brief Internal core MP4 writer audio specific structure
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4MP4W_State microState;
+ M4MP4W_TrackData CommonData;
+ M4OSA_UChar** Chunk;
+ M4OSA_UInt32* chunkSizeTable;
+#ifndef _M4MP4W_MOOV_FIRST
+ M4OSA_UInt32* chunkOffsetTable;
+#endif /*_M4MP4W_MOOV_FIRST*/
+ M4OSA_UInt32* chunkSampleNbTable;
+ M4OSA_UInt32* chunkTimeMsTable;
+ M4OSA_UInt32 currentChunk; /* Init to 0*/
+ M4OSA_UInt32 currentPos; /* Init to 0 */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ M4OSA_UInt32 currentStsc; /* Init to 0 */
+#endif
+ M4MP4W_Time32 sampleDuration; /* Check (AMR8=160), (AMR16=320)*/
+ M4OSA_UInt32 MaxChunkSize; /* Init to M4MP4W_Mp4FileData.MaxChunkSize*/
+ M4OSA_UInt32 MaxAUSize; /* Init to M4MP4W_Mp4FileData.MaxAUSize*/
+ M4OSA_UInt32 LastAllocatedChunk;
+ /* previously, audio au size was supposed constant,
+ * which is actually not the case if silences (sid).*/
+ /* at first audio au, sampleSize is set. It is later reset to 0 if non constant size.*/
+ /* So sampleSize should be tested to know weither or not there is a TABLE_STSZ. */
+ M4OSA_UInt32* TABLE_STSZ; /* table size is 4K*/
+ M4OSA_UInt32 nbOfAllocatedStszBlocks;
+ M4OSA_UInt32* TABLE_STTS;
+ M4OSA_UInt32 nbOfAllocatedSttsBlocks;
+ M4OSA_UInt32 maxBitrate; /*not used in amr case*/
+ M4OSA_UInt32 avgBitrate; /*not used in amr case*/
+ M4OSA_UChar* DSI; /* Decoder Specific Info: May be M4OSA_NULL
+ (defaulted) for AMR */
+ M4OSA_UInt8 dsiSize; /* DSI size, always 9 bytes for AMR */
+} M4MP4W_AudioTrackData;
+
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_VideoTrackData
+ * @brief Internal core MP4 writer video specific structure
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4MP4W_State microState;
+ M4MP4W_TrackData CommonData;
+ M4OSA_UChar** Chunk;
+ M4OSA_UInt32* chunkSizeTable;
+#ifndef _M4MP4W_MOOV_FIRST
+ M4OSA_UInt32* chunkOffsetTable;
+#endif /*_M4MP4W_MOOV_FIRST*/
+ M4OSA_UInt32* chunkSampleNbTable;
+ M4MP4W_Time32* chunkTimeMsTable;
+ M4OSA_UInt32 currentChunk; /* Init to 0*/
+ M4OSA_UInt32 currentPos ; /* Init to 0*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ M4OSA_UInt32 currentStsc; /* Init to 0*/
+#endif
+ M4OSA_UInt32 stssTableEntryNb ; /* N*/
+ M4OSA_UInt16 width; /* X*/
+ M4OSA_UInt16 height; /* Y*/
+ M4OSA_UInt32* TABLE_STTS; /* table size is J*/
+ M4OSA_UInt32 nbOfAllocatedSttsBlocks;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ M4OSA_UInt16* TABLE_STSZ; /* table size is 2K*/
+#else
+ M4OSA_UInt32* TABLE_STSZ; /* table size is 4K*/
+#endif
+ M4OSA_UInt32 nbOfAllocatedStszBlocks;
+ M4OSA_UInt32* TABLE_STSS; /* table size is N*/
+ M4OSA_UInt32 nbOfAllocatedStssBlocks;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ M4OSA_UInt32 MaxAUperChunk; /*Init to 0, i.e. not used*/
+#endif
+ M4OSA_UInt32 MaxChunkSize; /*Init to M4MP4W_Mp4FileData.MaxChunkSize*/
+ M4OSA_UInt32 MaxAUSize; /*Init to M4MP4W_Mp4FileData.MaxAUSize*/
+ M4OSA_UInt32 LastAllocatedChunk;
+ M4OSA_UInt32 maxBitrate;
+ M4OSA_UInt32 avgBitrate;
+ M4OSA_UChar* DSI; /* Decoder Specific Info: May be M4OSA_NULL
+ (defaulted) for H263*/
+ M4OSA_UInt8 dsiSize; /* DSI size, always 7 bytes for H263 */
+} M4MP4W_VideoTrackData;
+
+/**
+ ******************************************************************************
+ * structure M4MP4W_Mp4FileData
+ * @brief Internal core MP4 writer private context structure
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4MP4W_State state;
+ M4OSA_Char* url;
+ M4OSA_UInt32 duration; /* D in ms, max duration of audio&video*/
+ M4OSA_UInt32 filesize; /* actual filesize in bytes*/
+ M4MP4W_AudioTrackData* audioTrackPtr;
+ M4OSA_Bool hasAudio;
+ M4MP4W_VideoTrackData* videoTrackPtr;
+ M4OSA_Bool hasVideo;
+ M4OSA_UInt32 MaxChunkSize; /* Init to 100000*/
+ M4OSA_UInt32 MaxAUSize; /* Init to 4096*/
+ M4OSA_UInt32 MaxFileSize; /* Init to 0, i.e. not used*/
+ M4MP4W_Time32 InterleaveDur; /* Init to 0, i.e. not used, ms*/
+ /* M4MP4W_WriteCallBack PreWriteCallBack;*/ /*Init to M4OSA_NULL*/
+ /* M4MP4W_WriteCallBack PostWriteCallBack;*/ /*Init to M4OSA_NULL*/
+ M4OSA_FileWriterPointer* fileWriterFunctions;
+ M4OSA_FileReadPointer* fileReaderFunctions;
+ M4OSA_UInt32 camcoderVersion;
+ M4OSA_Bool estimateAudioSize; /* default is false*/
+ M4OSA_UInt32 audioMsChunkDur; /* in ms, set only if estimateAudioSize
+ is true*/
+ M4OSA_UInt32 audioMsStopTime; /* time to stop audio, set only if
+ estimateAudioSize is true*/
+ M4OSA_Context fileWriterContext;
+#ifndef _M4MP4W_MOOV_FIRST
+ M4OSA_UInt32 absoluteCurrentPos; /* new field for offset update*/
+#endif /*_M4MP4W_MOOV_FIRST*/
+ M4OSA_UChar* embeddedString; /* 16 bytes string, default value
+ writen if NULL*/
+ M4OSA_UChar* integrationTag; /* 60 bytes string, memset to 0 if NULL */
+ M4OSA_UInt32 MaxFileDuration; /* Init to 0, i.e. not used*/
+ M4MP4C_FtypBox ftyp; /* ftyp atom, if not defined set major_brand
+ = 0, will use default box */
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+ M4OSA_Char* safetyFileUrl;
+ M4OSA_Bool cleanSafetyFile;
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+ M4OSA_Bool bMULPPSSPS;
+} M4MP4W_Mp4FileData;
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*M4MP4W_TYPES_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
new file mode 100755
index 0000000..fbe7abb
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4MP4W_Utils.h
+ * @brief Utilities and private functions declaration for the MP4 writer
+ ******************************************************************************
+ */
+
+#ifndef M4MP4W_UTILS_H
+#define M4MP4W_UTILS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h"
+
+
+/**
+ ******************************************************************************
+ * Utility functions to write data in big endian
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * Write a bulk of data into the specified file, size is given in bytes
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
+ M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * Convert the 'nb' unsigned integers in 'tab' table from LE into BE
+ ******************************************************************************
+ */
+void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb);
+
+/**
+ ******************************************************************************
+ * Convert an unsigned 32 bits integer from LE into BE
+ ******************************************************************************
+ */
+void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr);
+
+/**
+ ******************************************************************************
+ * Re-allocation function
+ ******************************************************************************
+ */
+void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize);
+
+/**
+ ******************************************************************************
+ * De-allocate the context
+ * This method is no longer in the writer external interface, but is called from
+ * the function M4MP4W_closeWrite
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context);
+
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+/**
+ ******************************************************************************
+ * Put Hi and Lo u16 part in a u32 variable
+ ******************************************************************************
+ */
+M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi);
+M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo);
+M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab);
+M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab);
+#endif
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*M4MP4W_UTILS_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
new file mode 100755
index 0000000..b73a223
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4MP4W_Writer.h
+ * @brief Core MP4 writer interface
+ * @note This file declares the MP4 writer interface functions.
+ * The MP4 writer specific types are defined in file M4MP4W_Types.h
+ ******************************************************************************
+ */
+#ifndef M4MP4W_WRITER_H
+#define M4MP4W_WRITER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4MP4W_Types.h"
+
+/**
+ ******************************************************************************
+ * MP4W Errors & Warnings definition
+ ******************************************************************************
+ */
+#define M4WAR_MP4W_OVERSIZE M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000001)
+#define M4WAR_MP4W_NOT_EVALUABLE M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000002)
+
+/**
+ ******************************************************************************
+ * @brief Get MP4W version
+ * @param major (OUT) Pointer to the 'major' version number.
+ * @param minor (OUT) Pointer to the 'minor' version number.
+ * @param revision (OUT) Pointer to the 'revision' number.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8* major,
+ M4OSA_UInt8* minor,
+ M4OSA_UInt8* revision);
+
+/**
+ ******************************************************************************
+ * @brief Initiation of the MP4 file creation
+ * @param contextPtr (OUT) Pointer to the MP4 writer context to create.
+ * @param outputFileDescriptor (IN) Descriptor of the output file to open.
+ * @param fileWriterFunction (IN) Pointer to structure containing the set of
+ * OSAL file write functions.
+ * @param tempFileDescriptor (IN) Descriptor of the temporary file to open.
+ * @param fileReaderFunction (IN) Pointer to structure containing the set of
+ * OSAL file read functions.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_ALLOC: Memory allocation failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_openWrite( M4OSA_Context* contextPtr,
+ void* outputFileDescriptor,
+ M4OSA_FileWriterPointer* fileWriterFunction,
+ void* tempFileDescriptor,
+ M4OSA_FileReadPointer* fileReaderFunction );
+
+/**
+ ******************************************************************************
+ * @brief Add a new track
+ * @param context (IN/OUT) MP4 writer context.
+ * @param streamDescPtr (IN) Pointer to the structure containing the
+ parameters for the new track.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_ALLOC: Memory allocation failed
+ * @return M4ERR_STATE: Invalid state
+ * @return M4ERR_BAD_CONTEXT: An audio (resp.video) stream has already been added
+ * to this context while attempting to add another one,
+ * which is forbidden.
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_addStream( M4OSA_Context context,
+ M4SYS_StreamDescription* streamDescPtr);
+
+/**
+ ******************************************************************************
+ * @brief Signal to the core MP4 writer that there is no more tracks to add
+ * @param context (IN/OUT) MP4 writer context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_ALLOC: Memory allocation failed
+ * @return M4ERR_STATE: Invalid state
+ * @return M4ERR_BAD_CONTEXT: Audio size estimation is required but not two streams
+ * have been added.
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * @brief Asks the core MP4 writer to initiate the access unit creation in
+ * the streamID track
+ * @param context (IN/OUT) MP4 writer context.
+ * @param streamID (IN) Stream ID of the track.
+ * @param auPtr (IN/OUT) Access unit.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_BAD_STREAM_ID:Unknown stream ID
+ * @return M4ERR_ALLOC: Memory allocation failed
+ * @return M4ERR_STATE: Invalid state
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_startAU( M4OSA_Context context,
+ M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* auPtr);
+
+/**
+ ******************************************************************************
+ * @brief Ask the core MP4 writer to write the access unit in the streamID track
+ * @note If M4MP4W_WAR_OVERSIZE is returned, M4MP4W_startAU must not be called anymore,
+ * but directly M4MP4W_closeWrite().
+ * @param context (IN/OUT) MP4 writer context.
+ * @param streamID (IN) Stream ID of the track.
+ * @param auPtr (IN/OUT) Access unit.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_BAD_STREAM_ID: Unknown stream ID
+ * @return M4ERR_ALLOC: Memory allocation failed
+ * @return M4ERR_STATE: Invalid state
+ * @return M4WAR_MP4W_NOT_EVALUABLE: It is not possible to evaluate audio size if audio
+ * samples don't have a constant size.
+ * @return M4WAR_MP4W_OVERSIZE: Max file size was reached
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_processAU( M4OSA_Context context,
+ M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* auPtr);
+
+/**
+ ******************************************************************************
+ * @brief Close the MP4 file
+ * @note In previous versions of the MP4 writer, the M4MP4W_freeContext method
+ * was in the interface, which is not the case anymore.
+ * The context is now always deallocated in the M4MP4W_closeWrite function.
+ * @param context (IN/OUT) MP4 writer context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * @brief Ask the core MP4 writer to return the value associated with the optionID
+ * @param context (IN) MP4 writer context.
+ * @param option (IN) Option ID.
+ * @param valuePtr (OUT) Pointer to the option value.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_NOT_IMPLEMENTED: Not implemented in the current version
+ * @return M4ERR_BAD_OPTION_ID: Unknown optionID
+ * @return M4ERR_BAD_STREAM_ID: Bad stream ID in the option value
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getOption( M4OSA_Context context,
+ M4OSA_OptionID option,
+ M4OSA_DataOption *valuePtr);
+
+/**
+ ******************************************************************************
+ * @brief Ask the core MP4 writer to set the value associated with the optionID.
+ * @param context (IN/OUT) MP4 writer context.
+ * @param option (IN) Option ID.
+ * @param value (IN) Option value.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ * @return M4ERR_NOT_IMPLEMENTED: Not implemented in the current version
+ * @return M4ERR_BAD_OPTION_ID: Unknown optionID
+ * @return M4ERR_BAD_STREAM_ID: Bad stream ID in the option value
+ * @return M4ERR_ALLOC: A memory allocation failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_setOption( M4OSA_Context context,
+ M4OSA_OptionID option,
+ M4OSA_DataOption value);
+
+/**
+ ******************************************************************************
+ * @brief Ask the core MP4 writer to return its state.
+ * @note By selecting a specific streamID (not null), the caller can obtain
+ * the state of a specific stream. By using 0 as streamID the returned
+ * state is not stream specific.
+ * @param context (IN/OUT) MP4 writer context.
+ * @param context (IN) Pointer to the state enumeration.
+ * @param context (IN/OUT) streamID of the stream to retrieve the
+ * micro-state (0 for global state).
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_BAD_STREAM_ID: Unknown stream ID
+ * @return M4ERR_PARAMETER: At least one parameter is null or incorrect
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getState( M4OSA_Context context,
+ M4MP4W_State* statePtr,
+ M4SYS_StreamID streamID);
+
+/**
+ ******************************************************************************
+ * @brief Get the currently expected file size
+ * @param context (IN/OUT) MP4 writer context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context context,
+ M4OSA_UInt32* currentFileSize);
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /*M4MP4W_WRITER_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/src/Android.mk b/libvideoeditor/vss/3gpwriter/src/Android.mk
new file mode 100755
index 0000000..057c348
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/Android.mk
@@ -0,0 +1,54 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# lib3gpwriter
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_3gpwriter
+
+LOCAL_SRC_FILES:= \
+ M4MP4W_Interface.c \
+ M4MP4W_Utils.c \
+ M4MP4W_Writer.c
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/3gpwriter/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+ -DDUPLICATE_STTS_IN_LAST_AU
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
new file mode 100755
index 0000000..c2c5250
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4MP4W_Interface.c
+ * @brief 3GPP file writer interface
+ * @note This implementation follows the common interface defined
+ * in "M4WRITER_common.h".
+ ******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+/**
+ * OSAL includes */
+#include "M4OSA_Types.h" /**< OSAL basic types definiton */
+#include "M4OSA_FileWriter.h" /**< Include for OSAL file accesses implementation */
+#include "M4OSA_Memory.h" /**< Include for OSAL memory accesses implementation */
+#include "M4OSA_Debug.h" /**< OSAL debug tools */
+
+/**
+ * Writer includes */
+#include "M4WRITER_common.h" /**< Definition of the writer common interface that
+ this module follows */
+
+#ifdef _M4MP4W_USE_CST_MEMORY_WRITER
+#include "M4MP4W_Types_CstMem.h" /**< MP4/3GP core writer types */
+#include "M4MP4W_Writer_CstMem.h" /**< MP4/3GP core writer functions */
+#else
+#include "M4MP4W_Types.h" /**< MP4/3GP core writer types */
+#include "M4MP4W_Writer.h" /**< MP4/3GP core writer functions */
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+/**
+ * Specific errors for this module */
+#define M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE \
+ M4OSA_ERR_CREATE(M4_ERR, M4WRITER_3GP, 0x000001)
+
+
+/**
+ ******************************************************************************
+ * structure M4WRITER_3GP_InternalContext
+ * @brief This structure defines the writer context (private)
+ * @note This structure is used for all writer calls to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Context pMP4Context; /**< MP4 writer context */
+ M4OSA_UInt32 maxAUsizes; /**< the maximum AU size possible */
+} M4WRITER_3GP_InternalContext;
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_openWrite(M4WRITER_Context* pContext, void* pWhat,
+ * M4OSA_FileWriterPointer* pFileWriterPointer)
+ * @brief Open a writer session.
+ * @note
+ * @param pContext: (OUT) Execution context of the 3GP writer, allocated by this function.
+ * @param outputFileDescriptor (IN) Descriptor of the output file to create.
+ * @param fileWriterFunction (IN) Pointer to structure containing the set of OSAL
+ * file write functions.
+ * @param tempFileDescriptor (IN) Descriptor of the temporary file to open
+ * (NULL if not used)
+ * @param fileReaderFunction (IN) Pointer to structure containing the set of OSAL file read
+ * functions (NULL if not used)
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: there is no more available memory
+ * @return M4ERR_PARAMETER: pContext or pFilePtrFct is M4OSA_NULL (debug only)
+ * @return any error returned by the MP4 core writer openWrite (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_openWrite( M4WRITER_Context* pContext,
+ void* outputFileDescriptor,
+ M4OSA_FileWriterPointer* pFileWriterPointer,
+ void* tempFileDescriptor,
+ M4OSA_FileReadPointer* pFileReaderPointer )
+{
+ M4WRITER_3GP_InternalContext* apContext;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE1_0("M4WRITER_3GP_openWrite");
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_openWrite: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWriterPointer),M4ERR_PARAMETER,
+ "M4WRITER_3GP_openWrite: pFileWriterPointer is M4OSA_NULL");
+
+ /**
+ * Allocate memory for the context */
+ *pContext=M4OSA_NULL;
+ apContext = (M4WRITER_3GP_InternalContext*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4WRITER_3GP_InternalContext),
+ M4WRITER_3GP,
+ (M4OSA_Char *)"M4WRITER_3GP_InternalContext");
+
+ if (M4OSA_NULL == apContext)
+ {
+ M4OSA_TRACE1_0("M4WRITER_3GP_openWrite:\
+ unable to allocate context, returning M4ERR_ALLOC");
+ return (M4OSA_ERR)M4ERR_ALLOC;
+ }
+
+ /**
+ * Reset context variables */
+ apContext->pMP4Context = M4OSA_NULL;
+ apContext->maxAUsizes = 0;
+
+ /**
+ * Return the writer context */
+ *pContext = (M4WRITER_Context *)apContext;
+
+ /**
+ * Launch the openWrite of the MP4 writer */
+ M4OSA_TRACE3_0("M4WRITER_3GP_openWrite: calling M4MP4W_openWrite()");
+
+ err = M4MP4W_openWrite(&apContext->pMP4Context, outputFileDescriptor,
+ pFileWriterPointer, tempFileDescriptor, pFileReaderPointer );
+
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4WRITER_3GP_openWrite: "
+ "M4MP4W_openWrite returns error 0x%x", err);
+ }
+
+ M4OSA_TRACE2_1("M4WRITER_3GP_openWrite: returning 0x%x", err);
+
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
+ * @brief Indicates to the writer that the setup session is ended and that
+ * we will start to write.
+ * @note
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return any error returned by the MP4 core writer startWriting (Its
+ * coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
+{
+ M4WRITER_3GP_InternalContext* apContext =
+ (M4WRITER_3GP_InternalContext*)pContext;
+
+ M4OSA_ERR err;
+
+ M4OSA_TRACE1_1("M4WRITER_3GP_startWriting: pContext=0x%x", pContext);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_startWriting: pContext is M4OSA_NULL");
+
+ /**
+ * Call the MP4 core writer */
+ M4OSA_TRACE3_0("M4WRITER_3GP_startWriting: calling M4MP4W_startWriting()");
+ err = M4MP4W_startWriting(apContext->pMP4Context);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_startWriting returns error 0x%x", err);
+ }
+
+ M4OSA_TRACE2_1("M4WRITER_3GP_startWriting: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_addStream(
+ * M4WRITER_Context pContext,
+ * M4SYS_StreamDescription *pStreamDescription)
+ * @brief Add a stream (audio or video).
+ * @note Decoder specific info properties are correctly set before calling
+ * the core writer add function
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @param streamDescription: (IN) stream description.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pStreamDescription is M4OSA_NULL
+ * (debug only)
+ * @return any error returned by the MP4 core writer addStream
+ * (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_addStream(M4WRITER_Context pContext,
+ M4SYS_StreamDescription* pStreamDescription)
+{
+ M4WRITER_3GP_InternalContext *apContext =
+ (M4WRITER_3GP_InternalContext *)pContext;
+
+ M4OSA_ERR err;
+ M4WRITER_StreamVideoInfos *pVideoInfo = M4OSA_NULL;
+ M4WRITER_StreamAudioInfos *pAudioInfo = M4OSA_NULL;
+ M4MP4W_StreamIDsize sizeValue;
+
+ M4OSA_TRACE1_2("M4WRITER_3GP_addStream: pContext=0x%x, "
+ "pStreamDescription=0x%x",
+ pContext, pStreamDescription);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_addStream: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDescription),M4ERR_PARAMETER,
+ "M4WRITER_3GP_addStream: pStreamDescription is M4OSA_NULL");
+
+ /**
+ * Adapt audio/video stream infos */
+ switch (pStreamDescription->streamType)
+ {
+ case M4SYS_kMPEG_4:
+ case M4SYS_kH264:
+ case M4SYS_kH263:
+ M4OSA_TRACE3_1("M4WRITER_3GP_addStream: "
+ "adding a Video stream (streamType=0x%x)",
+ pStreamDescription->streamType);
+ /**
+ * Common descriptions */
+ pStreamDescription->streamID = VideoStreamID; /**< The only values checked by our
+ core writer are streamID */
+ pStreamDescription->timeScale = 1000; /**< and timeScale */
+
+/* Not recommended for video editing -> write explicitely the 'bitr' box into 'd263' */
+/* Rem : it is REL 5 of 3gpp documentation */
+// /**
+// * Average bit-rate must not be set in H263 to be compatible with Platform4 */
+// if (M4SYS_kH263 == pStreamDescription->streamType)
+// {
+// pStreamDescription->averageBitrate = -1;
+// }
+
+ /**
+ * Decoder specific info */
+ pVideoInfo = (M4WRITER_StreamVideoInfos *)pStreamDescription->decoderSpecificInfo;
+ pStreamDescription->decoderSpecificInfoSize = pVideoInfo->Header.Size;
+ pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pVideoInfo->Header.pBuf;
+ M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: DSI=0x%x, DSIsize=%d",
+ pVideoInfo->Header.pBuf, pVideoInfo->Header.Size);
+ break;
+
+ case M4SYS_kAMR:
+ case M4SYS_kAMR_WB:
+ case M4SYS_kAAC:
+ case M4SYS_kEVRC:
+ M4OSA_TRACE3_1("M4WRITER_3GP_addStream: adding an Audio stream (streamType=0x%x)",
+ pStreamDescription->streamType);
+ /**
+ * Common descriptions */
+ pStreamDescription->streamID = AudioStreamID; /**< The only value checked by our
+ core writer is streamID */
+
+ /**
+ * Decoder specific info */
+ pAudioInfo = (M4WRITER_StreamAudioInfos *)pStreamDescription->decoderSpecificInfo;
+ pStreamDescription->decoderSpecificInfoSize = pAudioInfo->Header.Size;
+ pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pAudioInfo->Header.pBuf;
+ M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Audio: DSI=0x%x, DSIsize=%d",
+ pAudioInfo->Header.pBuf, pAudioInfo->Header.Size);
+ break;
+
+ default:
+ M4OSA_TRACE1_1("M4WRITER_3GP_addStream:\
+ returning M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE (streamType=0x%x)",
+ pStreamDescription->streamType);
+ return (M4OSA_ERR)M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE;
+ break;
+ }
+
+ /**
+ * Call the MP4 core writer */
+ M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_addStream()");
+ err = M4MP4W_addStream(apContext->pMP4Context,pStreamDescription);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_addStream returns error 0x%x", err);
+ M4OSA_TRACE1_1("M4WRITER_3GP_addStream: returning 0x%x", err);
+ return (err);
+ }
+
+ /**
+ * For Video, set the M4MP4W_trackSize Option */
+ switch (pStreamDescription->streamType)
+ {
+ case M4SYS_kMPEG_4:
+ case M4SYS_kH264:
+ case M4SYS_kH263:
+ sizeValue.streamID = VideoStreamID;
+ sizeValue.height = (M4OSA_UInt16)(pVideoInfo->height);
+ sizeValue.width = (M4OSA_UInt16)(pVideoInfo->width);
+ M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: height=%d, width=%d",
+ sizeValue.height, sizeValue.width);
+
+ M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_setOption(M4MP4W_trackSize)");
+ err = M4MP4W_setOption( apContext->pMP4Context, M4MP4W_trackSize,
+ (M4OSA_DataOption)&sizeValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_setOption returns error 0x%x",
+ err);
+ }
+ break;
+ default:
+ break;
+ }
+
+ M4OSA_TRACE2_1("M4WRITER_3GP_addStream: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
+ * @brief Close the writer. The context is freed here.
+ * @note
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return any error returned by the MP4 core writer closeWrite (Its coreID
+ * is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
+{
+ M4WRITER_3GP_InternalContext* apContext=(M4WRITER_3GP_InternalContext*)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_closeWrite: pContext is M4OSA_NULL");
+
+ /**
+ * Call the MP4 core writer */
+ if (M4OSA_NULL != apContext->pMP4Context)
+ {
+ M4OSA_TRACE3_0("M4WRITER_3GP_closeWrite: calling M4MP4W_closeWrite()");
+ err = M4MP4W_closeWrite(apContext->pMP4Context);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite: M4MP4W_closeWrite returns error 0x%x", err);
+ }
+ }
+
+ /**
+ * Deallocate our own context */
+ free(apContext);
+
+ M4OSA_TRACE2_1("M4WRITER_3GP_closeWrite: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_setOption(
+ * M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ * M4OSA_DataOption optionValue)
+ * @brief This function asks the writer to set the value associated with
+ * the optionID. The caller is responsible for allocating/
+ * de-allocating the memory of the value field.
+ * @note The options handled by the component depend on the implementation
+ * of the component.
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @param pptionId: (IN) ID of the option to set.
+ * @param OptionValue : (IN) Value of the option to set.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
+ * @return any error returned by the MP4 core writer setOption (Its coreID
+ * is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_setOption(
+ M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ M4OSA_DataOption optionValue)
+{
+ M4WRITER_3GP_InternalContext* apContext =
+ (M4WRITER_3GP_InternalContext*)pContext;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4MP4W_memAddr memval;
+ M4SYS_StreamIDValue optval;
+
+ M4OSA_TRACE2_3("M4WRITER_3GP_setOption: pContext=0x%x, optionID=0x%x,\
+ optionValue=0x%x", pContext, optionID, optionValue);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==apContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_setOption: pContext is M4OSA_NULL");
+
+ switch (optionID)
+ {
+ /**
+ * Maximum Access Unit size */
+ case M4WRITER_kMaxAUSize:
+ M4OSA_TRACE2_0("setting M4WRITER_kMaxAUSize option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_maxAUsize, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxAUsize) "
+ "returns error 0x%x", err);
+ }
+ break;
+ /**
+ * Maximum chunck size */
+ case M4WRITER_kMaxChunckSize:
+ M4OSA_TRACE2_0("setting M4WRITER_kMaxChunckSize option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_maxChunkSize, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxChunkSize)\
+ returns error 0x%x", err);
+ }
+ break;
+ /**
+ * File string signature */
+ case M4WRITER_kEmbeddedString:
+ M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedString option");
+ /* The given M4OSA_DataOption must actually
+ be a text string */
+ memval.addr = (M4OSA_MemAddr32)optionValue;
+ /**< this is max string size copied by the core */
+ memval.size = 16;
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_embeddedString, &memval);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_embeddedString)\
+ returns error 0x%x", err);
+ }
+ break;
+ /**
+ * File integration tag */
+ case M4WRITER_kIntegrationTag:
+ M4OSA_TRACE2_0("setting M4WRITER_kIntegrationTag option");
+ /* The given M4OSA_DataOption must actually
+ be a text string */
+ memval.addr = (M4OSA_MemAddr32)optionValue;
+ /**< this is max string size copied by the core */
+ memval.size = strlen((const char *)optionValue);
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_integrationTag, &memval);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_integrationTag)"
+ " returns error 0x%x", err);
+ }
+ break;
+ /**
+ * File version signature */
+ case M4WRITER_kEmbeddedVersion:
+ M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedVersion option");
+ /* The given M4OSA_DataOption must actually
+ be a version number */
+
+ /**< Here 0 means both streams */
+ optval.streamID = 0;
+ /**< version number */
+ optval.value = *(M4OSA_UInt32*)optionValue;
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_CamcoderVersion, &optval);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_CamcoderVersion)"
+ " returns error 0x%x", err);
+ }
+ break;
+ /**
+ * Some options are read-only */
+ case M4WRITER_kFileSize:
+ case M4WRITER_kFileSizeAudioEstimated:
+ M4OSA_TRACE2_1("trying to set a read-only option! (ID=0x%x)",
+ optionID);
+ return (M4OSA_ERR)M4ERR_READ_ONLY;
+ break;
+ /**
+ * Maximum filesize limitation */
+ case M4WRITER_kMaxFileSize:
+ M4OSA_TRACE2_0("setting M4WRITER_kMaxFileSize option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_maxFileSize, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxFileSize)\
+ returns error 0x%x", err);
+ }
+ break;
+
+ /**
+ * Maximum file duration limitation */
+ case M4WRITER_kMaxFileDuration:
+ M4OSA_TRACE2_0("setting M4WRITER_kMaxFileDuration option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context,M4MP4W_maxFileDuration, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4WRITER_kMaxFileDuration)"
+ " returns error 0x%x", err);
+ }
+ break;
+
+ /**
+ * Set 'ftyp' atom */
+ case M4WRITER_kSetFtypBox:
+ M4OSA_TRACE2_0("setting M4WRITER_kSetFtypBox option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context, M4MP4W_setFtypBox, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_setFtypBox)\
+ returns error 0x%x", err);
+ }
+ break;
+
+ /**
+ * Decoder Specific Info */
+ case M4WRITER_kDSI:
+ M4OSA_TRACE2_0("setting M4WRITER_kDSI option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context, M4MP4W_DSI, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
+ returns error 0x%x", err);
+ }
+ break;
+ /*+ H.264 Trimming */
+ case M4WRITER_kMUL_PPS_SPS:
+ M4OSA_TRACE2_0("setting M4WRITER_kMUL_PPS_SPS option");
+ err = M4MP4W_setOption(
+ apContext->pMP4Context, M4MP4W_MUL_PPS_SPS, optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
+ returns error 0x%x", err);
+ }
+ break;
+ /*- H.264 Trimming */
+
+ /**
+ * Unknown option */
+ default:
+ M4OSA_TRACE2_1("trying to set an unknown option!\
+ (optionID=0x%x)", optionID);
+ return (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
+ break;
+ }
+
+ M4OSA_TRACE3_1("M4WRITER_3GP_setOption: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_getOption(
+ * M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ * M4OSA_DataOption optionValue)
+ * @brief This function asks the writer to return the value associated with
+ * the optionID. The caller is responsible for allocating/
+ * de-allocating the memory of the value field.
+ * @note The options handled by the component depend on the implementation
+ * of the component.
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @param OptionId: (IN) Id of the option to get.
+ * @param pOptionValue: (OUT) Value of the option to get.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
+ * @return M4ERR_NOT_IMPLEMENTED: This option is not implemented yet.
+ * @return any error returned by the MP4 core writer getOption (Its coreID
+ * is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_getOption(
+ M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ M4OSA_DataOption optionValue)
+{
+ M4WRITER_3GP_InternalContext* apContext =
+ (M4WRITER_3GP_InternalContext*)pContext;
+
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_3("M4WRITER_3GP_getOption: pContext=0x%x, optionID=0x%x,\
+ optionValue=0x%x", pContext, optionID, optionValue);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+ "M4WRITER_3GP_getOption: pContext is M4OSA_NULL");
+
+ switch (optionID)
+ {
+ /**
+ * Maximum Access Unit size */
+ case M4WRITER_kMaxAUSize:
+ M4OSA_TRACE2_0("getting M4WRITER_kMaxAUSize option");
+ err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxAUsize,
+ (M4OSA_DataOption*)&optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxAUsize)"
+ " returns error 0x%x", err);
+ }
+ break;
+ /**
+ * Maximum chunck size */
+ case M4WRITER_kMaxChunckSize:
+ M4OSA_TRACE2_0("getting M4WRITER_kMaxChunckSize option");
+ err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxChunkSize,
+ (M4OSA_DataOption*)&optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxChunkSize)\
+ returns error 0x%x", err);
+ }
+ break;
+ /**
+ * The file size option */
+ case M4WRITER_kFileSize:
+ M4OSA_TRACE2_0("getting M4WRITER_kFileSize option");
+ /* get the current file size */
+ err = M4MP4W_getCurrentFileSize(
+ apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
+ " returns error 0x%x", err);
+ }
+ break;
+ /**
+ * The file size with audio option has its own function call
+ in the MP4 core writer */
+ case M4WRITER_kFileSizeAudioEstimated:
+ M4OSA_TRACE2_0("getting M4WRITER_kFileSizeAudioEstimated option");
+ /* get the current file size ... */
+ err = M4MP4W_getCurrentFileSize(
+ apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
+ " returns error 0x%x", err);
+ }
+ //no more needed 3gp writer has its own mecanism
+ ///* ... add the estimated next max AU size */
+ //*((M4OSA_UInt32*)optionValue) += apContext->maxAUsizes;
+ break;
+ /**
+ * Unknown option */
+ default:
+ M4OSA_TRACE2_1("trying to get an unknown option!\
+ (optionID=0x%x)", optionID);
+ return (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
+ break;
+ }
+
+ M4OSA_TRACE3_1("M4WRITER_3GP_getOption: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_startAU(
+ * M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ * M4SYS_AccessUnit* pAU)
+ * @brief Prepare an Access Unit to be ready to store data
+ * @note
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @param streamID: (IN) Id of the stream to which the Access Unit
+ * is related.
+ * @param pAU: (IN/OUT) Access Unit to be prepared.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
+ * @return M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
+ * AudioStreamID (debug only)
+ * @return any error returned by the MP4 core writer startAU (Its coreID
+ * is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_startAU(
+ M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* pAU)
+{
+ M4WRITER_3GP_InternalContext* apContext =
+ (M4WRITER_3GP_InternalContext*)pContext;
+
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_3("M4WRITER_3GP_startAU: pContext=0x%x, streamID=%d, pAU=0x%x",
+ pContext, streamID, pAU);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
+ "M4WRITER_3GP_startAU: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
+ "M4WRITER_3GP_startAU: pAU is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(
+ ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
+ M4ERR_BAD_STREAM_ID,
+ "M4WRITER_3GP_processAU: Wrong streamID");
+
+ /**
+ * Call the MP4 writer */
+ M4OSA_TRACE3_0("M4WRITER_3GP_startAU: calling M4MP4W_startAU()");
+ err = M4MP4W_startAU(apContext->pMP4Context, streamID, pAU);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_startAU returns error 0x%x", err);
+ }
+
+ M4OSA_TRACE3_2("AU: dataAddress=0x%x, size=%d",
+ pAU->dataAddress, pAU->size);
+
+ /* Convert oversize to a request toward VES automaton */
+ if (M4WAR_MP4W_OVERSIZE == err)
+ {
+ err = M4WAR_WRITER_STOP_REQ;
+ }
+
+ M4OSA_TRACE3_1("M4WRITER_3GP_startAU: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_processAU(
+ * M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ * M4SYS_AccessUnit* pAU)
+ * @brief Write an Access Unit
+ * @note
+ * @param pContext: (IN) Execution context of the 3GP writer,
+ * @param streamID: (IN) Id of the stream to which the Access Unit
+ * is related.
+ * @param pAU: (IN/OUT) Access Unit to be written
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
+ * @return M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
+ * AudioStreamID (debug only)
+ * @return any error returned by the MP4 core writer processAU
+ * (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_processAU(
+ M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* pAU)
+{
+ M4WRITER_3GP_InternalContext* apContext =
+ (M4WRITER_3GP_InternalContext*)pContext;
+
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_3("M4WRITER_3GP_processAU: "
+ "pContext=0x%x, streamID=%d, pAU=0x%x",
+ pContext, streamID, pAU);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
+ "M4WRITER_3GP_processAU: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
+ "M4WRITER_3GP_processAU: pAU is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(
+ ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
+ M4ERR_BAD_STREAM_ID,
+ "M4WRITER_3GP_processAU: Wrong streamID");
+
+ M4OSA_TRACE3_4("M4WRITER_3GP_processAU: AU: "
+ "dataAddress=0x%x, size=%d, CTS=%d, nbFrag=%d",
+ pAU->dataAddress, pAU->size, (M4OSA_UInt32)pAU->CTS, pAU->nbFrag);
+
+ if(pAU->size > apContext->maxAUsizes)
+ {
+ apContext->maxAUsizes = pAU->size;
+ }
+ /**
+ * Call the MP4 writer */
+ M4OSA_TRACE3_0("M4WRITER_3GP_processAU: calling M4MP4W_processAU()");
+ err = M4MP4W_processAU(apContext->pMP4Context, streamID, pAU);
+ if (M4OSA_ERR_IS_ERROR(err))
+ {
+ M4OSA_TRACE1_1("M4MP4W_processAU returns error 0x%x", err);
+ }
+
+ /* Convert oversize to a request toward VES automaton */
+ if(M4WAR_MP4W_OVERSIZE == err)
+ {
+ err = M4WAR_WRITER_STOP_REQ;
+ }
+
+ M4OSA_TRACE3_1("M4WRITER_3GP_processAU: returning 0x%x", err);
+ return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_getInterfaces(
+ * M4WRITER_OutputFileType* Type,
+ * M4WRITER_GlobalInterface** SrcGlobalInterface,
+ * M4WRITER_DataInterface** SrcDataInterface)
+ * @brief Get the 3GPP writer common interface
+ * @note Retrieves the set of functions needed to use the 3GPP writer.
+ * It follows the common writer interface.
+ * @param Type: (OUT) return the type of this writer. Will always be
+ * M4WRITER_k3GPP.
+ * @param SrcGlobalInterface: (OUT) Main set of function to use this
+ * 3GPP writer
+ * @param SrcDataInterface: (OUT) Set of function related to datas
+ * to use this 3GPP writer
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: there is no more available memory
+ * @return M4ERR_PARAMETER: At least one of the parameters is M4OSA_NULL
+ * (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_getInterfaces(
+ M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface)
+{
+ M4WRITER_GlobalInterface *pGlobal;
+ M4WRITER_DataInterface *pData;
+
+ M4OSA_TRACE2_3("M4WRITER_3GP_getInterfaces: "
+ "Type=0x%x, SrcGlobalInterface=0x%x,\
+ SrcDataInterface=0x%x", Type, SrcGlobalInterface, SrcDataInterface);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Type), M4ERR_PARAMETER,
+ "M4WRITER_3GP_getInterfaces: Type is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == SrcGlobalInterface), M4ERR_PARAMETER,
+ "M4WRITER_3GP_getInterfaces: SrcGlobalInterface is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == SrcDataInterface), M4ERR_PARAMETER,
+ "M4WRITER_3GP_getInterfaces: SrcDataInterface is M4OSA_NULL");
+
+ /**
+ * Set the output type */
+ *Type = M4WRITER_k3GPP;
+
+ /**
+ * Allocate the global interface structure */
+ pGlobal = (M4WRITER_GlobalInterface*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4WRITER_GlobalInterface),
+ M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_GlobalInterface");
+ if (M4OSA_NULL == pGlobal)
+ {
+ M4OSA_TRACE1_0("unable to allocate M4WRITER_GlobalInterface,\
+ returning M4ERR_ALLOC");
+ *SrcGlobalInterface = M4OSA_NULL;
+ *SrcDataInterface = M4OSA_NULL;
+ return (M4OSA_ERR)M4ERR_ALLOC;
+ }
+
+ /**
+ * Allocate the data interface structure */
+ pData =
+ (M4WRITER_DataInterface *)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_DataInterface),
+ M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_DataInterface");
+ if (M4OSA_NULL == pData)
+ {
+ M4OSA_TRACE1_0("unable to allocate M4WRITER_DataInterface,\
+ returning M4ERR_ALLOC");
+ free(pGlobal);
+ *SrcGlobalInterface = M4OSA_NULL;
+ *SrcDataInterface = M4OSA_NULL;
+ return (M4OSA_ERR)M4ERR_ALLOC;
+ }
+
+ /**
+ * Fill the global interface structure */
+ pGlobal->pFctOpen = M4WRITER_3GP_openWrite;
+ pGlobal->pFctAddStream = M4WRITER_3GP_addStream;
+ pGlobal->pFctStartWriting = M4WRITER_3GP_startWriting;
+ pGlobal->pFctCloseWrite = M4WRITER_3GP_closeWrite;
+ pGlobal->pFctSetOption = M4WRITER_3GP_setOption;
+ pGlobal->pFctGetOption = M4WRITER_3GP_getOption;
+
+ /**
+ * Fill the data interface structure */
+ pData->pStartAU = M4WRITER_3GP_startAU;
+ pData->pProcessAU = M4WRITER_3GP_processAU;
+
+ /**
+ * Set the return values */
+ *SrcGlobalInterface = pGlobal;
+ *SrcDataInterface = pData;
+
+ M4OSA_TRACE2_0("M4WRITER_3GP_getInterfaces: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
new file mode 100755
index 0000000..62e2ad0
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+******************************************************************************
+ * @file M4MP4W_Utils.c
+ * @brief Utilities and private functions for the MP4 writer
+******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+#include "M4MP4W_Utils.h"
+#include "M4OSA_Error.h"
+#include "M4MP4W_Types.h"
+
+#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context)
+/*******************************************************************************/
+{
+ M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)&c, 1);
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context)
+/*******************************************************************************/
+{
+ M4OSA_ERR err;
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context)
+/*******************************************************************************/
+{
+ M4OSA_ERR err;
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+ M4OSA_Context context)
+/*******************************************************************************/
+{
+ M4OSA_ERR err;
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 24), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+ ERR_CHECK(err == M4NO_ERROR, err);
+ err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
+ M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context)
+/*******************************************************************************/
+{
+ M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)Block, size);
+ return err;
+}
+
+/*******************************************************************************/
+void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr)
+/*******************************************************************************/
+{
+ M4OSA_UChar a, b;
+ M4OSA_UChar* c = (M4OSA_UChar*)valPtr;
+ a = *(c);
+ b = *(c+1);
+ *(c) = *(c+3);
+ *(c+1) = *(c+2);
+ *(c+2) = b;
+ *(c+3) = a;
+}
+
+/*******************************************************************************/
+void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb)
+/*******************************************************************************/
+{
+ M4OSA_UInt32 i;
+ for (i=0; i<nb; i++)
+ M4MP4W_convertInt32BE(&(tab)[i]);
+}
+
+/*******************************************************************************/
+void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize)
+/*******************************************************************************/
+{
+ M4OSA_MemAddr32 ptr2 = (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(newSize, M4MP4_WRITER,
+ (M4OSA_Char *)"realloc");
+ if (M4OSA_NULL != ptr2)
+ {
+ memcpy((void *)ptr2, (void *)ptr, oldSize);
+ }
+ free(ptr);
+ return ptr2;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context)
+/*******************************************************************************/
+{
+#ifdef _M4MP4W_MOOV_FIRST
+ M4OSA_UInt32 i;
+#endif /*_M4MP4W_MOOV_FIRST*/
+ M4MP4W_Mp4FileData* mMp4FileDataPtr = (M4MP4W_Mp4FileData*)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ /*freeContext is now called after closeWrite*/
+ ERR_CHECK( mMp4FileDataPtr->state == M4MP4W_closed, M4ERR_STATE);
+ mMp4FileDataPtr->state = M4MP4W_closed;
+
+ if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
+ {
+ /*delete also other chunks if any*/
+ /*for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->currentChunk; i++)*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+ for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk; i++)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->Chunk[i]);
+ }
+#else
+ if ((M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk) &&
+ (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk[0]))
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->Chunk[0]);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable);
+ }
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*now dynamic*/
+ if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->Chunk);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable);
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->TABLE_STTS != M4OSA_NULL)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS);
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ != M4OSA_NULL)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ);
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+ {
+ free(mMp4FileDataPtr->audioTrackPtr->DSI);
+ mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
+ }
+
+ free(mMp4FileDataPtr->audioTrackPtr);
+ mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
+ }
+ if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
+ {
+ /*delete also other chunks if any*/
+ /*for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+ for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk; i++)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->Chunk[i]);
+ }
+#else
+ if ((M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk) &&
+ (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk[0]))
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable);
+ }
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*now dynamic*/
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->Chunk);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable);
+ }
+
+ if (mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->DSI);
+ mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+ }
+
+ /*now dynamic*/
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ);
+ }
+ if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSS)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS);
+ }
+
+ free(mMp4FileDataPtr->videoTrackPtr);
+ mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
+ }
+
+ if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
+ {
+ free(mMp4FileDataPtr->embeddedString);
+ mMp4FileDataPtr->embeddedString = M4OSA_NULL;
+ }
+
+ free(mMp4FileDataPtr);
+
+ return M4NO_ERROR;
+}
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+/*******************************************************************************/
+M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi)
+/*******************************************************************************/
+{
+ *tab &= 0xFFFF;
+ *tab |= Hi<<16;
+}
+
+/*******************************************************************************/
+M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo)
+/*******************************************************************************/
+{
+ *tab &= 0xFFFF0000;
+ *tab |= Lo;
+}
+
+/*******************************************************************************/
+M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab)
+/*******************************************************************************/
+{
+ return (*tab >> 16) & 0xFFFF;
+}
+
+/*******************************************************************************/
+M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab)
+/*******************************************************************************/
+{
+ return *tab & 0xFFFF;
+}
+#endif
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
new file mode 100755
index 0000000..9ad94e0
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
@@ -0,0 +1,5370 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4MP4W_Writer.c
+ * @brief Implementation of the core MP4 writer
+ ******************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+#include "M4OSA_Error.h"
+#include "M4OSA_Debug.h"
+#include "M4MP4W_Writer.h"
+#include "M4MP4W_Utils.h"
+
+/* Check optimisation flags : BEGIN */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+#ifdef _M4MP4W_MOOV_FIRST
+#error "_M4MP4W_OPTIMIZE_FOR_PHONE should not be used with _M4MP4W_MOOV_FIRST"
+
+#endif
+
+#endif
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+#error "_M4MP4W_UNBUFFERED_VIDEO should be used with _M4MP4W_OPTIMIZE_FOR_PHONE"
+
+#endif
+
+#endif
+/* Check optimisation flags : END */
+
+#ifndef _M4MP4W_DONT_USE_TIME_H
+#include <time.h>
+
+#endif /*_M4MP4W_DONT_USE_TIME_H*/
+
+/*MACROS*/
+#define MAJOR_VERSION 3
+#define MINOR_VERSION 3
+#define REVISION 0
+
+#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
+#define CLEANUPonERR(func) if ((err = func) != M4NO_ERROR) goto cleanup
+
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+/***************/
+/*Static blocks*/
+/***************/
+
+/*CommonBlocks*/
+
+const M4OSA_UChar Default_ftyp [] =
+{
+ 0x00, 0x00, 0x00, 0x18, 'f', 't', 'y', 'p', '3', 'g', 'p', '7', 0x00, 0x00,
+ 0x03, 0x00, '3', 'g', 'p', '7', 'i', 's', 'o', 'm'
+};
+
+const M4OSA_UChar CommonBlock2 [] =
+{
+ 'm', 'd', 'a', 't'
+};
+
+const M4OSA_UChar CommonBlock3 [] =
+{
+ 'm', 'o', 'o', 'v', 0x00, 0x00, 0x00, 0x6C, 'm', 'v', 'h', 'd', 0x00,
+ 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock4 [] =
+{
+ 0x00, 0x00, 0x03, 0xE8
+};
+
+const M4OSA_UChar CommonBlock5 [] =
+{
+ 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+};
+
+const M4OSA_UChar CommonBlock6 [] =
+{
+ 't', 'r', 'a', 'k', 0x00, 0x00, 0x00, 0x5C, 't', 'k', 'h', 'd', 0x00,
+ 0x00, 0x00, 0x01
+};
+
+const M4OSA_UChar CommonBlock7 [] =
+{
+ 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock7bis [] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock8 [] =
+{
+ 'm', 'd', 'i', 'a', 0x00, 0x00, 0x00, 0x20, 'm', 'd', 'h', 'd', 0x00,
+ 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock9 [] =
+{
+ 0x55, 0xC4, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock10 [] =
+{
+ 'm', 'i', 'n', 'f', 0x00, 0x00, 0x00, 0x24, 'd', 'i', 'n', 'f', 0x00,
+ 0x00, 0x00, 0x1C, 'd', 'r', 'e', 'f', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 'u', 'r', 'l', ' ', 0x00, 0x00, 0x00,
+ 0x01
+};
+
+const M4OSA_UChar CommonBlock11 [] =
+{
+ 's', 't', 'b', 'l'
+};
+
+const M4OSA_UChar CommonBlock12 [] =
+{
+ 's', 't', 't', 's', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionHeader [] =
+{
+ 's', 't', 's', 'd', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const M4OSA_UChar SampleDescriptionEntryStart [] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock15 [] =
+{
+ 's', 't', 's', 'z', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock16 [] =
+{
+ 's', 't', 's', 'c', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock17 [] =
+{
+ 's', 't', 'c', 'o', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar BlockSignatureSkipHeader [] =
+{
+ 0x00, 0x00, 0x00, 0x5E, 's', 'k', 'i', 'p'
+};
+/* due to current limitations, size must be 16 */
+const M4OSA_UChar BlockSignatureSkipDefaultEmbeddedString [] =
+{
+ 'N', 'X', 'P', 'S', 'W', ' ', 'C', 'A', 'M', 'C', 'O', 'R', 'D', 'E',
+ 'R', ' '
+};
+/* follows the version (like " 3.0.2"), then " -- " */
+/* due to current limitations, size must be 60 */
+const M4OSA_UChar BlockSignatureSkipDefaultIntegrationTag [] =
+{
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*VideoBlocks*/
+/* 320*240, now no longer hardcoded */
+/* const M4OSA_UChar VideoBlock1[] =
+ { 0x01, 0x40, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00 }; */
+const M4OSA_UChar VideoBlock1_1 [] =
+{
+ 0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 'v', 'i', 'd', 'e', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate1 [] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate2 [] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x18, 0xFF, 0xFF
+};
+
+const M4OSA_UChar VideoBlock4 [] =
+{
+ 's', 't', 's', 's', 0x00, 0x00, 0x00, 0x00
+}; /*STSS*/
+
+const M4OSA_UChar VideoBlock5 [] =
+{
+ 0x00, 0x00, 0x00, 0x14, 'v', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar VideoResolutions [] =
+{
+ 0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00
+};
+
+/*Mp4vBlocks*/
+const M4OSA_UChar Mp4vBlock1 [] =
+{
+ 'm', 'p', '4', 'v'
+};
+
+const M4OSA_UChar Mp4vBlock3 [] =
+{
+ 0x20, 0x11
+};
+
+/*H263Blocks*/
+const M4OSA_UChar H263Block1 [] =
+{
+ 's', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block2 [] =
+{
+ 0x00, 0x00, 0x00, 0x0F, 'd', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block2_bitr [] =
+{
+ 0x00, 0x00, 0x00, 0x1F, 'd', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block3 [] =
+{
+ 'P', 'H', 'L', 'P', 0x00, 0x0A, 0x00
+};
+
+const M4OSA_UChar H263Block4 [] =
+{
+ 0x00, 0x00, 0x00, 0x10, 'b', 'i', 't', 'r'
+};
+
+/*H264Blocks*/
+const M4OSA_UChar H264Block1 [] =
+{
+ 'a', 'v', 'c', '1'
+};
+
+/* Store the avcC field, the version (=1),
+ the profile (=66), the compatibility (=0), */
+
+/* the level (=10),111111 + NAL field Size (= 4 - 1),
+ 111 + number of PPS (=1) */
+
+const M4OSA_UChar H264Block2 [] =
+{
+ // Remove the hardcoded DSI values of H264Block2
+ 'a' , 'v' , 'c' , 'C'
+};
+
+/*AMRBlocks*/
+const M4OSA_UChar AMRBlock1 [] =
+{
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AMRBlock1_1 [] =
+{
+ 0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 's', 'o', 'u', 'n', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AudioSampleDescEntryBoilerplate [] =
+{
+ 0x00, 0x02, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AMRDSIHeader [] =
+{
+ 0x00, 0x00, 0x00, 0x11, 'd', 'a', 'm', 'r'
+};
+
+const M4OSA_UChar AMRDefaultDSI [] =
+{
+ 'P', 'H', 'L', 'P', 0x00, 0x00, 0x80, 0x00, 0x01
+};
+
+const M4OSA_UChar AMRBlock4 [] =
+{
+ 0x00, 0x00, 0x00, 0x10, 's', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00
+};
+
+/*AMR8Blocks*/
+const M4OSA_UChar AMR8Block1 [] =
+{
+ 's', 'a', 'm', 'r'
+};
+
+/*AMR16Blocks*/
+/*const M4OSA_UChar AMR16Block1[] = { 's', 'a', 'w', 'b'};*/
+
+/*AACBlocks*/
+const M4OSA_UChar AACBlock1 [] =
+{
+ 'm', 'p', '4', 'a'
+};
+
+const M4OSA_UChar AACBlock2 [] =
+{
+ 0x40, 0x15
+};
+
+/*MPEGConfigBlocks (AAC & MP4V)*/
+const M4OSA_UChar MPEGConfigBlock0 [] =
+{
+ 'e', 's', 'd', 's', 0x00, 0x00, 0x00, 0x00, 0x03
+};
+
+const M4OSA_UChar MPEGConfigBlock1 [] =
+{
+ 0x00, 0x00, 0x00, 0x04
+};
+
+const M4OSA_UChar MPEGConfigBlock2 [] = { 0x05 };
+const M4OSA_UChar MPEGConfigBlock3 [] =
+{
+ 0x06, 0x01, 0x02
+};
+
+/*EVRCBlocks*/
+const M4OSA_UChar EVRCBlock3_1 [] =
+{
+ 0x00, 0x00, 0x00, 0x0E, 'd', 'e', 'v', 'c'
+};
+
+const M4OSA_UChar EVRCBlock3_2 [] =
+{
+ 'P', 'H', 'L', 'P', 0x00, 0x00
+};
+
+/*EVRC8Blocks*/
+const M4OSA_UChar EVRC8Block1 [] =
+{
+ 's', 'e', 'v', 'c'
+};
+
+/***********/
+/* Methods */
+/***********/
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8 *major, M4OSA_UInt8 *minor,
+ M4OSA_UInt8 *revision )
+/*******************************************************************************/
+{
+ ERR_CHECK(M4OSA_NULL != major, M4ERR_PARAMETER);
+ ERR_CHECK(M4OSA_NULL != minor, M4ERR_PARAMETER);
+ ERR_CHECK(M4OSA_NULL != revision, M4ERR_PARAMETER);
+
+ *major = MAJOR_VERSION;
+ *minor = MINOR_VERSION;
+ *revision = REVISION;
+
+ return M4NO_ERROR;
+}
+
+static M4OSA_UInt32 M4MP4W_STTS_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSZ_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSS_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_CHUNK_ALLOC_NB;
+static M4OSA_UInt32 M4MP4W_STTS_AUDIO_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_CHUNK_AUDIO_ALLOC_NB;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+/* stsc[ ] table is splitted at 12 bits */
+#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 4095 /* 0=notused */
+
+#else
+#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 10 /* 0=notused */
+
+#endif
+
+#endif
+
+/*******************************************************************************/
+
+M4OSA_ERR M4MP4W_initializeAllocationParameters(M4MP4W_Mp4FileData *Ptr )
+/*******************************************************************************/
+{
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4OSA_UInt32 maxMemory, vesMemory;
+ M4OSA_UInt32 nbVideoFrames, nbAudioFrames;
+ M4OSA_UInt32 averageVideoChunk;
+
+ /*-----------*/
+ /* NB_FRAMES */
+ /*-----------*/
+
+ /* magical formula : memory = vesMemory + 12 * framerate * duration */
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ vesMemory = 0x32000; /* 200 kB */
+
+#else
+
+ vesMemory = 0x3E800; /* 250 kB */
+
+#endif
+
+#define VIDEO_POOL_MEMORY 1000000
+
+ maxMemory = VIDEO_POOL_MEMORY;
+
+ if (maxMemory < vesMemory) {
+ return M4ERR_ALLOC;
+ }
+
+ nbVideoFrames = ( maxMemory - vesMemory) / 12;
+
+ M4OSA_TRACE1_1("M4MP4W: %d images max", nbVideoFrames);
+
+ /* VIDEO */
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+ /* assume an average of 25 fpc : reference = 15 fps * 2s * 0.8 */
+
+ averageVideoChunk = 2500;
+
+#else
+
+ if (M4MP4W_VIDEO_MAX_AU_PER_CHUNK > 0)
+ {
+ averageVideoChunk = 100 * M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 20
+ * (M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 1); /* margin 20% */
+ }
+ else
+ {
+ /* assume an average of 50 fpc */
+ averageVideoChunk = 5000;
+ }
+
+#endif
+
+ M4MP4W_STTS_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt32);
+ M4MP4W_STSZ_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt16);
+ M4MP4W_STSS_ALLOC_SIZE = nbVideoFrames * sizeof(
+ M4OSA_UInt32); /* very conservative (all images are intra) */
+
+ M4MP4W_CHUNK_ALLOC_NB = ( nbVideoFrames * 100) / averageVideoChunk + 1;
+
+ /* AUDIO */
+
+ nbAudioFrames = nbVideoFrames;
+ /* audio is 5 fps, which is the smallest framerate for video */
+
+ M4MP4W_STTS_AUDIO_ALLOC_SIZE = 100; /* compressed */
+ M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 100; /* compressed */
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 10 + 1;
+
+#else
+
+ M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 38 + 1;
+
+#endif
+
+ return M4NO_ERROR;
+
+#else
+
+ /* VIDEO 5 min at 25 fps null-enc */
+
+ M4MP4W_STTS_ALLOC_SIZE = 20000;
+ M4MP4W_STSZ_ALLOC_SIZE = 18000;
+ M4MP4W_STSS_ALLOC_SIZE = 5000;
+ M4MP4W_CHUNK_ALLOC_NB = 500;
+
+ /* AUDIO 2 min aac+ null-enc */
+
+ M4MP4W_STTS_AUDIO_ALLOC_SIZE = 32000;
+ M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 20000;
+ M4MP4W_CHUNK_AUDIO_ALLOC_NB = 1000;
+
+ return M4NO_ERROR;
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_openWrite(M4OSA_Context *contextPtr,
+ void *outputFileDescriptor,
+ M4OSA_FileWriterPointer *fileWriterFunction,
+ void *tempFileDescriptor,
+ M4OSA_FileReadPointer *fileReaderFunction )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = M4OSA_NULL;
+
+ ERR_CHECK(M4OSA_NULL != contextPtr, M4ERR_PARAMETER);
+ ERR_CHECK(M4OSA_NULL != outputFileDescriptor, M4ERR_PARAMETER);
+ ERR_CHECK(M4OSA_NULL != fileWriterFunction, M4ERR_PARAMETER);
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+ /* Optional, feature won't be used if NULL */
+
+ M4OSA_TRACE2_1("tempFileDescriptor = %p", tempFileDescriptor);
+
+ if (M4OSA_NULL == tempFileDescriptor)
+ {
+ M4OSA_TRACE1_0(
+ "tempFileDescriptor is NULL, RESERVED_MOOV_DISK_SPACE feature not used");
+ }
+
+#else /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+ /* Not used : ERR_CHECK(M4OSA_NULL != tempFileDescriptor, M4ERR_PARAMETER); */
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+ /* Not used : ERR_CHECK(M4OSA_NULL != fileReaderFunction, M4ERR_PARAMETER); */
+
+ /* The context reuse mode was suppressed*/
+
+ mMp4FileDataPtr =
+ (M4MP4W_Mp4FileData *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_Mp4FileData),
+ M4MP4_WRITER, (M4OSA_Char *)"MP4 writer context");
+ ERR_CHECK(mMp4FileDataPtr != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->url = outputFileDescriptor;
+ mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
+ mMp4FileDataPtr->MaxChunkSize = M4MP4W_DefaultMaxChunkSize; /*default */
+ mMp4FileDataPtr->MaxAUSize = M4MP4W_DefaultMaxAuSize; /*default */
+ mMp4FileDataPtr->InterleaveDur =
+ M4MP4W_DefaultInterleaveDur; /*default = 0, i.e. not used*/
+ mMp4FileDataPtr->MaxFileSize = 0; /*default = 0, i.e. not used*/
+ mMp4FileDataPtr->camcoderVersion = 0; /*default is " 0.0.0"*/
+ mMp4FileDataPtr->embeddedString =
+ M4OSA_NULL; /*default is in BlockSignatureSkipDefaultEmbeddedString */
+ mMp4FileDataPtr->integrationTag = M4OSA_NULL; /*default is 0 */
+ mMp4FileDataPtr->MaxFileDuration = 0; /*default = 0, i.e. not used*/
+
+ mMp4FileDataPtr->fileWriterFunctions = fileWriterFunction;
+ mMp4FileDataPtr->hasAudio = M4OSA_FALSE;
+ mMp4FileDataPtr->hasVideo = M4OSA_FALSE;
+ mMp4FileDataPtr->state = M4MP4W_opened;
+ mMp4FileDataPtr->duration = 0; /*i*/
+ /*patch for integrationTag 174 -> 238 (+64)*/
+ mMp4FileDataPtr->filesize =
+ 238; /*initialization with constant part in ftyp+mdat+moov+skip*/
+
+ mMp4FileDataPtr->estimateAudioSize = M4OSA_FALSE;
+ mMp4FileDataPtr->audioMsChunkDur =
+ 0; /*set and used only when estimateAudioSize is true*/
+ mMp4FileDataPtr->audioMsStopTime =
+ 0; /*set and used only when estimateAudioSize is true*/
+
+ mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
+ /* + CRLV6775 -H.264 trimming */
+ mMp4FileDataPtr->bMULPPSSPS = M4OSA_FALSE;
+ /* - CRLV6775 -H.264 trimming */
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->absoluteCurrentPos =
+ 32; /*init with ftyp + beginning of mdat size*/
+
+#endif
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+
+ mMp4FileDataPtr->safetyFileUrl = tempFileDescriptor;
+ mMp4FileDataPtr->cleanSafetyFile =
+ M4OSA_FALSE; /* No need to clean it just yet. */
+
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+ /* ftyp atom */
+
+ memset((void *) &mMp4FileDataPtr->ftyp,0,
+ sizeof(mMp4FileDataPtr->ftyp));
+
+ *contextPtr = mMp4FileDataPtr;
+
+ M4MP4W_initializeAllocationParameters(mMp4FileDataPtr);
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_addStream(M4OSA_Context context,
+ M4SYS_StreamDescription *streamDescPtr )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+ ERR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
+
+ ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+ || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+ mMp4FileDataPtr->state = M4MP4W_ready;
+
+ switch (streamDescPtr->streamType)
+ {
+ case M4SYS_kAMR:
+ case M4SYS_kAAC:
+ case M4SYS_kEVRC:
+ /*Audio*/
+ ERR_CHECK(streamDescPtr->streamID == AudioStreamID,
+ M4ERR_PARAMETER);
+
+ /*check if an audio track has already been added*/
+ ERR_CHECK(mMp4FileDataPtr->hasAudio == M4OSA_FALSE,
+ M4ERR_BAD_CONTEXT);
+
+ /*check if alloc need to be done*/
+ if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
+ {
+ mMp4FileDataPtr->audioTrackPtr = (M4MP4W_AudioTrackData
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_AudioTrackData),
+ M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_AudioTrackData");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+ /**
+ * We must init these pointers in case an alloc bellow fails */
+ mMp4FileDataPtr->audioTrackPtr->Chunk = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
+ mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
+
+ /*now dynamic*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->audioTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+ * sizeof(M4OSA_UChar *),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+#else
+
+ mMp4FileDataPtr->audioTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UChar *),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+ mMp4FileDataPtr->audioTrackPtr->Chunk[0] = M4OSA_NULL;
+
+ mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkOffsetTable");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STTS_AUDIO_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STTS");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks = 1;
+
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSizeTable");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSampleNbTable");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkTimeMsTable");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk = 0;
+ }
+ mMp4FileDataPtr->hasAudio = M4OSA_TRUE;
+ mMp4FileDataPtr->filesize += 402;
+ mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+ mMp4FileDataPtr->MaxChunkSize; /* init value */
+ mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+ mMp4FileDataPtr->MaxAUSize;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS = 0;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb = 0;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = 0;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.timescale =
+ streamDescPtr->timeScale;
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[0] = 0; /*init*/
+ mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
+ mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable[0] = 0; /*init*/
+ mMp4FileDataPtr->audioTrackPtr->currentChunk =
+ 0; /*1st chunk is Chunk[0]*/
+ mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->audioTrackPtr->currentStsc = 0;
+
+#endif
+
+ mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_ready;
+ mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks = 0;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
+
+ mMp4FileDataPtr->audioTrackPtr->avgBitrate =
+ streamDescPtr->averageBitrate;
+ mMp4FileDataPtr->audioTrackPtr->maxBitrate =
+ streamDescPtr->maxBitrate;
+
+ if (streamDescPtr->streamType == M4SYS_kAMR)
+ {
+
+ mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+ M4SYS_kAMR;
+ ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
+ mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+ 160; /*AMR8+timescale=8000 => sample duration 160 constant*/
+
+ /*Use given DSI if passed, else use default value*/
+ if (streamDescPtr->decoderSpecificInfoSize != 0)
+ {
+ /*amr DSI is 9 bytes long !*/
+ mMp4FileDataPtr->audioTrackPtr->dsiSize =
+ 9; /*always 9 for amr*/
+ ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 9,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->audioTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(9, M4MP4_WRITER,
+ (M4OSA_Char *)"audioTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+ M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
+ (void *)streamDescPtr->decoderSpecificInfo,
+ 9);
+ }
+ else
+ {
+ mMp4FileDataPtr->audioTrackPtr->DSI =
+ M4OSA_NULL; /*default static block will be used*/
+ mMp4FileDataPtr->audioTrackPtr->dsiSize =
+ 0; /*but the actual static dsi is 9 bytes !*/
+ }
+ }
+ else if (streamDescPtr->streamType == M4SYS_kEVRC)
+ {
+
+ mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+ M4SYS_kEVRC;
+ ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
+ mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+ 160; /*EVRC+timescale=8000 => sample duration 160 constant*/
+
+ /*Use given DSI if passed, else use default value*/
+ if (streamDescPtr->decoderSpecificInfoSize != 0)
+ {
+ /*evrc DSI is 6 bytes long !*/
+ mMp4FileDataPtr->audioTrackPtr->dsiSize =
+ 6; /*always 6 for evrc*/
+ ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 6,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->audioTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(6, M4MP4_WRITER,
+ (M4OSA_Char *)"audioTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+ M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
+ (void *)streamDescPtr->decoderSpecificInfo,
+ 6);
+ }
+ else
+ {
+ mMp4FileDataPtr->audioTrackPtr->DSI =
+ M4OSA_NULL; /*default static block will be used*/
+ mMp4FileDataPtr->audioTrackPtr->dsiSize =
+ 0; /*but the actual static dsi is 6 bytes !*/
+ }
+ }
+ else /*M4SYS_kAAC*/
+ {
+ /*avg bitrate should be set*/
+ ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
+ ERR_CHECK(streamDescPtr->maxBitrate != -1, M4ERR_PARAMETER);
+
+ mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+ M4SYS_kAAC;
+ mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+ 0; /*don't know for aac, so set 0*/
+
+ mMp4FileDataPtr->audioTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+
+ if (mMp4FileDataPtr->audioTrackPtr->dsiSize != 0)
+ {
+ mMp4FileDataPtr->audioTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
+ streamDescPtr->decoderSpecificInfoSize,
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+ M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->audioTrackPtr->DSI,
+ (void *)streamDescPtr->decoderSpecificInfo,
+ streamDescPtr->decoderSpecificInfoSize);
+ }
+ else
+ {
+ /*no dsi: return bad parameter ?*/
+ return M4ERR_PARAMETER;
+ }
+ }
+
+ break;
+
+ case (M4SYS_kMPEG_4):
+ case (M4SYS_kH264):
+ case (M4SYS_kH263):
+ /*Video*/
+ ERR_CHECK(streamDescPtr->streamID == VideoStreamID,
+ M4ERR_PARAMETER);
+
+ /*check if a video track has already been added*/
+ ERR_CHECK(mMp4FileDataPtr->hasVideo == M4OSA_FALSE,
+ M4ERR_BAD_CONTEXT);
+
+ /*check if alloc need to be done*/
+ if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
+ {
+ mMp4FileDataPtr->videoTrackPtr = (M4MP4W_VideoTrackData
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4MP4W_VideoTrackData),
+ M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_VideoTrackData");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+ /**
+ * We must init these pointers in case an alloc bellow fails */
+ mMp4FileDataPtr->videoTrackPtr->Chunk = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSS = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+
+ /*now dynamic*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->videoTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
+ * sizeof(M4OSA_UChar *),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+#else
+ /*re-use the same chunk and flush it when full*/
+
+ mMp4FileDataPtr->videoTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UChar *),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->Chunk[0] = M4OSA_NULL;
+
+ mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkOffsetTable");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSizeTable");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
+ * sizeof(M4OSA_UInt32),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSampleNbTable");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
+ (M4MP4W_Time32 *)M4OSA_32bitAlignedMalloc(M4MP4W_CHUNK_ALLOC_NB
+ * sizeof(M4MP4W_Time32),
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkTimeMsTable");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk = 0;
+ /*tables are now dynamic*/
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STTS_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STTS");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks = 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+ (M4OSA_UInt16 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSZ_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSZ_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
+
+#endif
+
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks = 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(M4MP4W_STSS_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSS");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks = 1;
+ }
+ mMp4FileDataPtr->hasVideo = M4OSA_TRUE;
+ mMp4FileDataPtr->filesize += 462;
+ mMp4FileDataPtr->videoTrackPtr->width = M4MP4W_DefaultWidth;
+ mMp4FileDataPtr->videoTrackPtr->height = M4MP4W_DefaultHeight;
+ mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+ mMp4FileDataPtr->MaxAUSize;
+ mMp4FileDataPtr->videoTrackPtr->CommonData.trackType =
+ streamDescPtr->streamType;
+ mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+ mMp4FileDataPtr->MaxChunkSize; /* init value */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk =
+ M4MP4W_VIDEO_MAX_AU_PER_CHUNK;
+
+#endif
+
+ ERR_CHECK(streamDescPtr->timeScale == 1000, M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->CommonData.timescale = 1000;
+ mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS = 0;
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb = 0;
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize = 0;
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[0] = 0; /*init*/
+ mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
+ mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable[0] = 0; /*init*/
+ mMp4FileDataPtr->videoTrackPtr->currentChunk =
+ 0; /*1st chunk is Chunk[0]*/
+ mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->videoTrackPtr->currentStsc = 0;
+
+#endif
+
+ mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb = 0;
+ mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_ready;
+
+ if (streamDescPtr->streamType == M4SYS_kH263)
+ {
+ if (( streamDescPtr->averageBitrate == -1)
+ || (streamDescPtr->maxBitrate == -1))
+ {
+ /*the bitrate will not be written if the bitrate information
+ is not fully set */
+ mMp4FileDataPtr->videoTrackPtr->avgBitrate = -1;
+ mMp4FileDataPtr->videoTrackPtr->maxBitrate = -1;
+ }
+ else
+ {
+ /*proprietary storage of h263 bitrate.
+ Warning: not the actual bitrate (bit set to 1).*/
+ mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+ streamDescPtr->averageBitrate;
+ mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+ streamDescPtr->maxBitrate;
+ }
+
+ if (( 0 != streamDescPtr->decoderSpecificInfoSize)
+ && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
+ {
+ /*decoder specific info size is supposed to be always 7 bytes long */
+ ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 7,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+ mMp4FileDataPtr->videoTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
+ streamDescPtr->decoderSpecificInfoSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
+ M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
+ (void *)streamDescPtr->decoderSpecificInfo,
+ streamDescPtr->decoderSpecificInfoSize);
+ }
+ else
+ {
+ /*use the default dsi*/
+ mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+ }
+ }
+
+ if (streamDescPtr->streamType == M4SYS_kMPEG_4)
+ {
+ mMp4FileDataPtr->filesize += 22; /*extra bytes (from h263)*/
+ /* allow DSI to be M4OSA_NULL, in which case the actual DSI will be
+ set by setOption. */
+ if (( 0 == streamDescPtr->decoderSpecificInfoSize)
+ || (M4OSA_NULL == streamDescPtr->decoderSpecificInfo))
+ {
+ mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+ }
+ else
+ {
+ /*MP4V specific*/
+ /*decoder specific info size is supposed to be always <
+ 105 so that ESD size can be coded with 1 byte*/
+ /*(this should not be restrictive because dsi is always shorter !)*/
+ ERR_CHECK(streamDescPtr->decoderSpecificInfoSize < 105,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+ mMp4FileDataPtr->videoTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
+ streamDescPtr->decoderSpecificInfoSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
+ M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
+ (void *)streamDescPtr->decoderSpecificInfo,
+ streamDescPtr->decoderSpecificInfoSize);
+ mMp4FileDataPtr->filesize +=
+ streamDescPtr->decoderSpecificInfoSize;
+ }
+ /*avg bitrate should be set*/
+ ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+ streamDescPtr->averageBitrate;
+ mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+ streamDescPtr->averageBitrate;
+ }
+
+ if (streamDescPtr->streamType == M4SYS_kH264)
+ {
+ /* H264 specific information */
+ mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+ streamDescPtr->averageBitrate;
+ mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+ streamDescPtr->maxBitrate;
+
+ if ((0 != streamDescPtr->decoderSpecificInfoSize)
+ && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
+ {
+ /* + H.264 trimming */
+ if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
+ {
+ M4OSA_UInt16 SPSLength, PPSLength;
+ M4OSA_UInt16 *DSI;
+ /* Store the DSI size */
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize
+ - 24;
+
+ /* Copy the DSI (SPS + PPS) */
+ mMp4FileDataPtr->videoTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
+ streamDescPtr->decoderSpecificInfoSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ DSI =
+ (M4OSA_UInt16 *)streamDescPtr->decoderSpecificInfo;
+ SPSLength = DSI[6];
+ PPSLength = DSI[10];
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
+ (void *)((streamDescPtr->
+ decoderSpecificInfo)+12), 2);
+ memcpy(
+ (void *)((mMp4FileDataPtr->videoTrackPtr->
+ DSI)+2), (void *)((streamDescPtr->
+ decoderSpecificInfo)+28), SPSLength);
+
+ memcpy(
+ (void *)((mMp4FileDataPtr->videoTrackPtr->
+ DSI)+2 + SPSLength),
+ (void *)((streamDescPtr->
+ decoderSpecificInfo)+20), 2);
+ memcpy(
+ (void *)((mMp4FileDataPtr->videoTrackPtr->
+ DSI)+4 + SPSLength),
+ (void *)((streamDescPtr->
+ decoderSpecificInfo)+28 + SPSLength),
+ PPSLength);
+ /* - H.264 trimming */
+ }
+ else
+ {
+ /* Store the DSI size */
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+
+ /* Copy the DSI (SPS + PPS) */
+ mMp4FileDataPtr->videoTrackPtr->DSI =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(
+ streamDescPtr->decoderSpecificInfoSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+ != M4OSA_NULL, M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
+ (void *)streamDescPtr->
+ decoderSpecificInfo,
+ streamDescPtr->decoderSpecificInfoSize);
+ }
+ }
+ else
+ {
+ /*use the default dsi*/
+ mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+ mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+ }
+ }
+ break;
+
+ default:
+ err = M4ERR_PARAMETER;
+ }
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 fileModeAccess = M4OSA_kFileWrite | M4OSA_kFileCreate;
+ M4OSA_UInt32 i;
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+ mMp4FileDataPtr->state = M4MP4W_writing;
+
+ /*audio microstate */
+ /* if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)*/
+ if (mMp4FileDataPtr->hasAudio)
+ {
+ ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState == M4MP4W_ready),
+ M4ERR_STATE);
+ mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
+
+ /* First audio chunk allocation */
+ mMp4FileDataPtr->audioTrackPtr->Chunk[0] = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk[0]");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk[0] != M4OSA_NULL,
+ M4ERR_ALLOC);
+ }
+
+ /*video microstate*/
+ /* if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)*/
+ if (mMp4FileDataPtr->hasVideo)
+ {
+ ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState == M4MP4W_ready),
+ M4ERR_STATE);
+ mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
+
+ /* First video chunk allocation */
+ mMp4FileDataPtr->videoTrackPtr->Chunk[0] = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk[0]");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk[0] != M4OSA_NULL,
+ M4ERR_ALLOC);
+ }
+
+ if (mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+ {
+ /*set audioMsChunkDur (duration in ms before a new chunk is created)
+ for audio size estimation*/
+ ERR_CHECK(mMp4FileDataPtr->hasVideo, M4ERR_BAD_CONTEXT);
+ ERR_CHECK(mMp4FileDataPtr->hasAudio, M4ERR_BAD_CONTEXT);
+
+ mMp4FileDataPtr->audioMsChunkDur =
+ 20 * mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+ / mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
+
+ if (( mMp4FileDataPtr->InterleaveDur != 0)
+ && (mMp4FileDataPtr->InterleaveDur
+ < 20 *mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+ / mMp4FileDataPtr->audioTrackPtr->MaxAUSize))
+ {
+ mMp4FileDataPtr->audioMsChunkDur = mMp4FileDataPtr->InterleaveDur;
+ }
+ }
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+ /*open file in write binary mode*/
+
+ err = mMp4FileDataPtr->fileWriterFunctions->openWrite(
+ &mMp4FileDataPtr->fileWriterContext,
+ mMp4FileDataPtr->url, fileModeAccess);
+ ERR_CHECK((M4NO_ERROR == err), err);
+
+ /*ftyp atom*/
+ if (mMp4FileDataPtr->ftyp.major_brand != 0)
+ {
+ /* Put customized ftyp box */
+ err =
+ M4MP4W_putBE32(16 + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ err = M4MP4W_putBE32(M4MPAC_FTYP_TAG,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+
+ for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
+ {
+ err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ }
+ }
+ else
+ {
+ /* Put default ftyp box */
+ err = M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ }
+
+ /*init mdat value with 0 but the right value is set just before the file is closed*/
+ err = M4MP4W_putBE32(0, mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+ err = M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+ ERR_CHECK((M4NO_ERROR == err), err);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+
+ if (0 != mMp4FileDataPtr->MaxFileSize
+ && M4OSA_NULL != mMp4FileDataPtr->safetyFileUrl)
+ {
+ M4OSA_ERR err2 = M4NO_ERROR;
+ M4OSA_Context safetyFileContext = M4OSA_NULL;
+ M4OSA_UInt32 safetyFileSize = 0, addendum = 0;
+ M4OSA_UChar dummyData[100]; /* To fill the safety file with */
+
+ err =
+ mMp4FileDataPtr->fileWriterFunctions->openWrite(&safetyFileContext,
+ mMp4FileDataPtr->safetyFileUrl, fileModeAccess);
+ ERR_CHECK((M4NO_ERROR == err), err);
+
+ mMp4FileDataPtr->cleanSafetyFile = M4OSA_TRUE;
+
+ /* 10% seems to be a reasonable worst case, but also provision for 1kb of moov overhead.*/
+ safetyFileSize = 1000 + (mMp4FileDataPtr->MaxFileSize * 10 + 99) / 100;
+
+ /* Here we add space to take into account the fact we have to flush any pending
+ chunk in closeWrite, this space is the sum of the maximum chunk sizes, for each
+ track. */
+
+#ifndef _M4MP4W_UNBUFFERED_VIDEO
+
+ if (mMp4FileDataPtr->hasVideo)
+ {
+ safetyFileSize += mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+ }
+
+#endif
+
+ if (mMp4FileDataPtr->hasAudio)
+ {
+ safetyFileSize += mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+ }
+
+ memset((void *)dummyData, 0xCA,sizeof(dummyData)); /* For extra safety. */
+
+ for ( i = 0;
+ i < (safetyFileSize + sizeof(dummyData) - 1) / sizeof(dummyData);
+ i++ )
+ {
+ err = mMp4FileDataPtr->fileWriterFunctions->writeData(
+ safetyFileContext, dummyData, sizeof(dummyData));
+
+ if (M4NO_ERROR != err)
+ break;
+ /* Don't return from the function yet, as we need to close the file first. */
+ }
+
+ /* I don't need to keep it open. */
+ err2 =
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(safetyFileContext);
+
+ if (M4NO_ERROR != err)
+ {
+ return err;
+ }
+ else
+ ERR_CHECK((M4NO_ERROR == err2), err2);
+
+ M4OSA_TRACE1_0("Safety file correctly created");
+ }
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_newAudioChunk( M4OSA_Context context,
+ M4OSA_UInt32 *leftSpaceInChunk )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ M4OSA_Double scale_audio;
+
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4OSA_UInt32 reallocNb;
+
+#endif
+
+ /* video only */
+
+ if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
+ return M4NO_ERROR;
+
+ M4OSA_TRACE1_0(" M4MP4W_newAudioChunk - flush audio");
+ M4OSA_TRACE1_2("current chunk = %d offset = 0x%x",
+ mMp4FileDataPtr->audioTrackPtr->currentChunk,
+ mMp4FileDataPtr->absoluteCurrentPos);
+
+ scale_audio = 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+#ifndef _M4MP4W_MOOV_FIRST
+ /*flush chunk*/
+
+ err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
+ mMp4FileDataPtr->audioTrackPtr->currentPos,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
+ M4OSA_TRACE2_1(
+ "M4MP4W_newAudioChunk: putBlock error when flushing chunk: %#X",
+ err);
+ /* Ouch, we got an error writing to the file, but we need to properly react so that the
+ state is still consistent and we can properly close the file so that what has been
+ recorded so far is not lost. Yay error recovery! */
+
+ /* First, we do not know where we are in the file. Put us back at where we were before
+ attempting to write the data. That way, we're consistent with the chunk state data. */
+ err = mMp4FileDataPtr->fileWriterFunctions->seek(
+ mMp4FileDataPtr->fileWriterContext,
+ M4OSA_kFileSeekBeginning, &temp);
+
+ M4OSA_TRACE2_3(
+ "Backtracking to position 0x%08X, seek returned %d and position %08X",
+ mMp4FileDataPtr->absoluteCurrentPos, err, temp);
+
+ /* Then, do not update any info whatsoever in the writing state. This will have the
+ consequence that it will be as if the chunk has not been flushed yet, and therefore
+ it will be done as part of closeWrite (where there could be room to do so,
+ if some emergency room is freed for that purpose). */
+
+ /* And lastly (for here), return that we've reached the limit of available space. */
+
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+ /*update chunk offset*/
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ mMp4FileDataPtr->absoluteCurrentPos;
+
+ /*add chunk size to absoluteCurrentPos*/
+ mMp4FileDataPtr->absoluteCurrentPos +=
+ mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*update chunk info */
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ mMp4FileDataPtr->audioTrackPtr->currentPos;
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
+
+ mMp4FileDataPtr->audioTrackPtr->currentChunk += 1;
+ /*if audio amount of data is not estimated*/
+ if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+ mMp4FileDataPtr->filesize += 16;
+
+ /*alloc new chunk*/
+ /*only if not already allocated*/
+ if (mMp4FileDataPtr->audioTrackPtr->currentChunk
+ > mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
+ {
+ /*update LastAllocatedChunk ( -> = currentChunk)*/
+ mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk += 1;
+
+ /*max nb of chunk is now dynamic*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if (mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
+ + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ {
+ M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio chunk table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ if (((mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
+ % M4MP4W_CHUNK_AUDIO_ALLOC_NB) == 0)
+ {
+ reallocNb = mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
+ + M4MP4W_CHUNK_AUDIO_ALLOC_NB;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->audioTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk,
+ ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ * sizeof(M4OSA_UChar *),
+ reallocNb * sizeof(M4OSA_UChar *));
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+#else
+
+ mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ chunkOffsetTable,
+ ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ chunkSizeTable,
+ ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable,
+ ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
+ (M4MP4W_Time32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ chunkTimeMsTable,
+ ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ * sizeof(M4MP4W_Time32),
+ reallocNb * sizeof(M4MP4W_Time32));
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->audioTrackPtr->
+ Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk] = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->currentChunk");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->
+ Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ }
+
+ /*update leftSpaceInChunk, currentPos and currentChunkDur*/
+ *leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+ mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ /* check wether to use a new stsc or not */
+
+ if (mMp4FileDataPtr->audioTrackPtr->currentStsc > 0)
+ {
+ if (( mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
+ currentStsc] & 0xFFF) != (mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc
+ - 1] & 0xFFF))
+ mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
+ }
+ else
+ mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
+
+ /* max nb of chunk is now dynamic */
+ if (mMp4FileDataPtr->audioTrackPtr->currentStsc
+ + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+ {
+ M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio stsc table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+ /* set nb of samples in the new chunk to 0 */
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] =
+ 0 + (mMp4FileDataPtr->audioTrackPtr->currentChunk << 12);
+
+#else
+ /*set nb of samples in the new chunk to 0*/
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] = 0;
+
+#endif
+
+ /*set time of the new chunk to lastCTS (for initialization, but updated further to the
+ CTS of the last sample in the chunk)*/
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ (M4OSA_UInt32)(mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+ * scale_audio);
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_newVideoChunk( M4OSA_Context context,
+ M4OSA_UInt32 *leftSpaceInChunk )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ M4OSA_Double scale_video;
+
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4OSA_UInt32 reallocNb;
+
+#endif
+
+ /* audio only */
+
+ if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
+ return M4NO_ERROR;
+
+ M4OSA_TRACE1_0("M4MP4W_newVideoChunk - flush video");
+ M4OSA_TRACE1_2("current chunk = %d offset = 0x%x",
+ mMp4FileDataPtr->videoTrackPtr->currentChunk,
+ mMp4FileDataPtr->absoluteCurrentPos);
+
+ scale_video = 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+ /* samples are already written to file */
+#else
+ /*flush chunk*/
+
+ err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
+ mMp4FileDataPtr->videoTrackPtr->currentPos,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
+ M4OSA_TRACE2_1(
+ "M4MP4W_newVideoChunk: putBlock error when flushing chunk: %#X",
+ err);
+ /* Ouch, we got an error writing to the file, but we need to properly react so that the
+ state is still consistent and we can properly close the file so that what has been
+ recorded so far is not lost. Yay error recovery! */
+
+ /* First, we do not know where we are in the file. Put us back at where we were before
+ attempting to write the data. That way, we're consistent with the chunk state data. */
+ err = mMp4FileDataPtr->fileWriterFunctions->seek(
+ mMp4FileDataPtr->fileWriterContext,
+ M4OSA_kFileSeekBeginning, &temp);
+
+ M4OSA_TRACE2_3(
+ "Backtracking to position 0x%08X, seek returned %d and position %08X",
+ mMp4FileDataPtr->absoluteCurrentPos, err, temp);
+ /* Then, do not update any info whatsoever in the writing state. This will have the
+ consequence that it will be as if the chunk has not been flushed yet, and therefore it
+ will be done as part of closeWrite (where there could be room to do so, if some
+ emergency room is freed for that purpose). */
+
+ /* And lastly (for here), return that we've reached the limit of available space.
+ We don't care about the error originally returned by putBlock. */
+
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#endif
+
+ /*update chunk offset*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ mMp4FileDataPtr->absoluteCurrentPos;
+
+ /*add chunk size to absoluteCurrentPos*/
+ mMp4FileDataPtr->absoluteCurrentPos +=
+ mMp4FileDataPtr->videoTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*update chunk info before to go for a new one*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ mMp4FileDataPtr->videoTrackPtr->currentPos;
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+ * scale_video);
+
+ mMp4FileDataPtr->videoTrackPtr->currentChunk += 1;
+ mMp4FileDataPtr->filesize += 16;
+
+ /*alloc new chunk*/
+ /*only if not already allocated*/
+ if (mMp4FileDataPtr->videoTrackPtr->currentChunk
+ > mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
+ {
+ /*update LastAllocatedChunk ( -> = currentChunk)*/
+ mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk += 1;
+
+ /*max nb of chunk is now dynamic*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if ( mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
+ + 3 > M4MP4W_CHUNK_ALLOC_NB)
+ {
+ M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video chunk table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ if (((mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
+ % M4MP4W_CHUNK_ALLOC_NB) == 0)
+ {
+ reallocNb = mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
+ + M4MP4W_CHUNK_ALLOC_NB;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->videoTrackPtr->Chunk =
+ (M4OSA_UChar ** )M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk,
+ ( reallocNb
+ - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4OSA_UChar *),
+ reallocNb * sizeof(M4OSA_UChar *));
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+ M4ERR_ALLOC);
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ chunkOffsetTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ chunkSizeTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+ * sizeof(M4OSA_UInt32),
+ reallocNb * sizeof(M4OSA_UInt32));
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
+ (M4MP4W_Time32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ chunkTimeMsTable, ( reallocNb
+ - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4MP4W_Time32),
+ reallocNb * sizeof(M4MP4W_Time32));
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ mMp4FileDataPtr->videoTrackPtr->
+ Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk] = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->MaxChunkSize");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->
+ Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ }
+
+ /*update leftSpaceInChunk, currentPos and currentChunkDur*/
+ *leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+ mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ /* check wether to use a new stsc or not */
+
+ if (mMp4FileDataPtr->videoTrackPtr->currentStsc > 0)
+ {
+ if ((mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+ currentStsc] & 0xFFF) != (mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc
+ - 1] & 0xFFF))
+ mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
+ }
+ else
+ mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
+
+ /* max nb of chunk is now dynamic */
+ if (mMp4FileDataPtr->videoTrackPtr->currentStsc
+ + 3 > M4MP4W_CHUNK_ALLOC_NB)
+ {
+ M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video stsc table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+ /* set nb of samples in the new chunk to 0 */
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] =
+ 0 + (mMp4FileDataPtr->videoTrackPtr->currentChunk << 12);
+
+#else
+ /*set nb of samples in the new chunk to 0*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] = 0;
+
+#endif
+
+ /*set time of the new chunk to lastCTS (for initialization, but updated further to the
+ CTS of the last sample in the chunk)*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+ * scale_video);
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_startAU( M4OSA_Context context, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit *auPtr )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+ M4OSA_UInt32 leftSpaceInChunk;
+ M4MP4W_Time32 chunkDurMs;
+
+ M4OSA_Double scale_audio;
+ M4OSA_Double scale_video;
+
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+ ERR_CHECK(auPtr != M4OSA_NULL, M4ERR_PARAMETER);
+
+ M4OSA_TRACE2_0("----- M4MP4W_startAU -----");
+
+ /*check macro state*/
+ ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
+
+ if (streamID == AudioStreamID) /*audio stream*/
+ {
+ M4OSA_TRACE2_0("M4MP4W_startAU -> audio");
+
+ scale_audio =
+ 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+ /*audio microstate*/
+ ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
+ == M4MP4W_writing), M4ERR_STATE);
+ mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing_startAU;
+
+ leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+ - mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+ M4OSA_TRACE2_2("audio %d %d",
+ mMp4FileDataPtr->audioTrackPtr->currentPos, leftSpaceInChunk);
+
+ chunkDurMs =
+ (M4OSA_UInt32)(( mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+ * scale_audio) - mMp4FileDataPtr->audioTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->
+ currentChunk]);
+
+ if ((leftSpaceInChunk < mMp4FileDataPtr->audioTrackPtr->MaxAUSize)
+ || (( mMp4FileDataPtr->InterleaveDur != 0)
+ && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur)))
+ {
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+ /* only if there is at least 1 video sample in the chunk */
+
+ if ((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
+ && (mMp4FileDataPtr->videoTrackPtr->currentPos > 0))
+ {
+ /* close the opened video chunk before creating a new audio one */
+ err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
+
+ if (err != M4NO_ERROR)
+ return err;
+ }
+
+#endif
+ /* not enough space in current chunk: create a new one */
+
+ err = M4MP4W_newAudioChunk(context, &leftSpaceInChunk);
+
+ if (err != M4NO_ERROR)
+ return err;
+ }
+
+ auPtr->size = leftSpaceInChunk;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ auPtr->dataAddress = (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->
+ Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
+ + mMp4FileDataPtr->audioTrackPtr->currentPos);
+
+#else
+
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->Chunk[0]
+ + mMp4FileDataPtr->audioTrackPtr->currentPos);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ }
+ else if (streamID == VideoStreamID) /*video stream*/
+ {
+ M4OSA_TRACE2_0("M4MP4W_startAU -> video");
+
+ scale_video =
+ 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+ /*video microstate*/
+ ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
+ == M4MP4W_writing), M4ERR_STATE);
+ mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing_startAU;
+
+ leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize
+ - mMp4FileDataPtr->videoTrackPtr->currentPos;
+
+ chunkDurMs =
+ (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+ * scale_video) - mMp4FileDataPtr->videoTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->
+ currentChunk]);
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+
+#endif
+
+ M4OSA_TRACE2_2("video %d %d",
+ mMp4FileDataPtr->videoTrackPtr->currentPos, leftSpaceInChunk);
+
+ if (( leftSpaceInChunk < mMp4FileDataPtr->videoTrackPtr->MaxAUSize)
+ || (( mMp4FileDataPtr->InterleaveDur != 0)
+ && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur))
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ || (( mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk != 0)
+ && (( mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+ currentStsc] & 0xFFF)
+ == mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk))
+
+#endif
+
+ )
+ {
+ /*not enough space in current chunk: create a new one*/
+ err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
+
+ if (err != M4NO_ERROR)
+ return err;
+ }
+
+ M4OSA_TRACE2_3("startAU: size 0x%x pos 0x%x chunk %u", auPtr->size,
+ mMp4FileDataPtr->videoTrackPtr->currentPos,
+ mMp4FileDataPtr->videoTrackPtr->currentChunk);
+
+ M4OSA_TRACE3_1("adr = 0x%p", auPtr->dataAddress);
+
+ if (auPtr->dataAddress)
+ {
+ M4OSA_TRACE3_3(" data = %08X %08X %08X", auPtr->dataAddress[0],
+ auPtr->dataAddress[1], auPtr->dataAddress[2]);
+ }
+
+ auPtr->size = leftSpaceInChunk;
+#ifdef _M4MP4W_MOOV_FIRST
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264)
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
+ Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+ + mMp4FileDataPtr->videoTrackPtr->currentPos + 4);
+ else
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
+ Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+ + mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+#else
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264)
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0] + 4);
+ else
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
+
+#else
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264)
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
+ + mMp4FileDataPtr->videoTrackPtr->currentPos
+ + 4); /* In H264, we must start by the length of the NALU, coded in 4 bytes */
+ else
+ auPtr->dataAddress =
+ (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
+ + mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ }
+ else
+ return M4ERR_BAD_STREAM_ID;
+
+ M4OSA_TRACE1_3("M4MPW_startAU: start address:%p, size:%lu, stream:%d",
+ auPtr->dataAddress, auPtr->size, streamID);
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_processAU( M4OSA_Context context, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit *auPtr )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4MP4W_Time32 delta;
+ M4MP4W_Time32 lastSampleDur;
+ M4OSA_UInt32 i;
+ /*expectedSize is the max filesize to forecast when adding a new AU:*/
+ M4OSA_UInt32 expectedSize =
+ 32; /*initialized with an estimation of the max metadata space needed for an AU.*/
+ M4OSA_Double scale_audio = 0.0;
+ M4OSA_Double scale_video = 0.0;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ /*check macro state*/
+ ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
+
+ M4OSA_TRACE2_0("M4MP4W_processAU");
+
+ if (streamID == AudioStreamID)
+ scale_audio =
+ 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+ if (streamID == VideoStreamID)
+ scale_video =
+ 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+ /* PL 27/10/2008: after the resurgence of the AAC 128 bug, I added a debug check that
+ the encoded data didn't overflow the available space in the AU */
+
+ switch( streamID )
+ {
+ case AudioStreamID:
+ M4OSA_DEBUG_IF1(auPtr->size
+ + mMp4FileDataPtr->audioTrackPtr->currentPos
+ > mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+ M4ERR_CONTEXT_FAILED,
+ "Uh oh. Buffer overflow in the writer. Abandon ship!");
+ M4OSA_DEBUG_IF2(auPtr->size
+ > mMp4FileDataPtr->audioTrackPtr->MaxAUSize,
+ M4ERR_CONTEXT_FAILED,
+ "Oops. An AU went over the declared Max AU size.\
+ You might wish to investigate that.");
+ break;
+
+ case VideoStreamID:
+ M4OSA_DEBUG_IF1(auPtr->size
+ + mMp4FileDataPtr->videoTrackPtr->currentPos
+ > mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+ M4ERR_CONTEXT_FAILED,
+ "Uh oh. Buffer overflow in the writer. Abandon ship!");
+ M4OSA_DEBUG_IF2(auPtr->size
+ > mMp4FileDataPtr->videoTrackPtr->MaxAUSize,
+ M4ERR_CONTEXT_FAILED,
+ "Oops. An AU went over the declared Max AU size.\
+ You might wish to investigate that.");
+ break;
+ }
+
+ /*only if not in the case audio with estimateAudioSize
+ (else, size already estimated at this point)*/
+ if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+ || (streamID == VideoStreamID))
+ {
+ /*check filesize if needed*/
+ if (mMp4FileDataPtr->MaxFileSize != 0)
+ {
+ expectedSize += mMp4FileDataPtr->filesize + auPtr->size;
+
+ if ((streamID == VideoStreamID)
+ && (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264))
+ {
+ expectedSize += 4;
+ }
+
+ if (expectedSize > mMp4FileDataPtr->MaxFileSize)
+ {
+ M4OSA_TRACE1_0("processAU : !! FILESIZE EXCEEDED !!");
+
+ /* patch for autostop is MaxFileSize exceeded */
+ M4OSA_TRACE1_0("M4MP4W_processAU : stop at targeted filesize");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+ }
+ }
+
+ /*case audioMsStopTime has already been set during video processing,
+ and now check it for audio*/
+ if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+ && (streamID == AudioStreamID))
+ {
+ if (mMp4FileDataPtr->audioMsStopTime <= (auPtr->CTS *scale_audio))
+ {
+ /* bugfix: if a new chunk was just created, cancel it before to close */
+ if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
+ && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
+ {
+ mMp4FileDataPtr->audioTrackPtr->currentChunk--;
+ }
+ M4OSA_TRACE1_0("M4MP4W_processAU : audio stop time reached");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+ }
+
+ if (streamID == AudioStreamID) /*audio stream*/
+ {
+ M4OSA_TRACE2_0("M4MP4W_processAU -> audio");
+
+ /*audio microstate*/
+ ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
+ == M4MP4W_writing_startAU), M4ERR_STATE);
+ mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
+
+ mMp4FileDataPtr->audioTrackPtr->currentPos += auPtr->size;
+ /* Warning: time conversion cast 64to32! */
+ delta = (M4MP4W_Time32)auPtr->CTS
+ - mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
+
+ /* DEBUG stts entries which are equal to 0 */
+ M4OSA_TRACE2_1("A_DELTA = %ld\n", delta);
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+ == 0) /*test if first AU*/
+ {
+ /*set au size*/
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = auPtr->size;
+
+ /*sample duration is a priori constant in audio case, */
+ /*but if an Au at least has different size, a stsz table will be created */
+
+ /*mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta; */
+ /*TODO test sample duration? (should be 20ms in AMR8, 160 tics with timescale 8000) */
+ }
+ else
+ {
+ /*check if au size is constant (audio) */
+ /*0 sample size means non constant size*/
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize != 0)
+ {
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize
+ != auPtr->size)
+ {
+ /*first AU with different size => non constant size => STSZ table needed*/
+ /*computation of the nb of block of size M4MP4W_STSZ_ALLOC_SIZE to allocate*/
+ mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks =
+ 1 + mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sampleNb
+ * 4 / M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
+ (M4OSA_UInt32 *)M4OSA_32bitAlignedMalloc(
+ mMp4FileDataPtr->audioTrackPtr->
+ nbOfAllocatedStszBlocks
+ * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
+ M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STSZ");
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
+ != M4OSA_NULL, M4ERR_ALLOC);
+
+ for ( i = 0;
+ i < mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ i++ )
+ {
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ[i] =
+ mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sampleSize;
+ }
+ mMp4FileDataPtr->audioTrackPtr->
+ TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sampleNb] = auPtr->size;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize =
+ 0; /*used as a flag in that case*/
+ /*more bytes in the file in that case:*/
+ if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+ mMp4FileDataPtr->filesize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sampleNb;
+ }
+ }
+ /*else table already exists*/
+ else
+ {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if (4 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb + 3)
+ >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
+ *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_processAU : audio stsz table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ if (4 *mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+ >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
+ *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
+ {
+ mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks +=
+ 1;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ TABLE_STSZ, ( mMp4FileDataPtr->audioTrackPtr->
+ nbOfAllocatedStszBlocks - 1)
+ * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
+ mMp4FileDataPtr->audioTrackPtr->
+ nbOfAllocatedStszBlocks
+ * M4MP4W_STSZ_AUDIO_ALLOC_SIZE);
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+ mMp4FileDataPtr->audioTrackPtr->
+ TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sampleNb] = auPtr->size;
+
+ if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+ mMp4FileDataPtr->filesize += 4;
+ }
+ }
+
+ if (delta > mMp4FileDataPtr->audioTrackPtr->sampleDuration)
+ {
+ /* keep track of real sample duration*/
+ mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta;
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+ == 0) /*test if first AU*/
+ {
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] = 1;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = 0;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
+ mMp4FileDataPtr->filesize += 8;
+ }
+ else if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+ == 1) /*test if second AU*/
+ {
+#ifndef DUPLICATE_STTS_IN_LAST_AU
+
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] += 1;
+
+#endif /*DUPLICATE_STTS_IN_LAST_AU*/
+
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = delta;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb += 1;
+ mMp4FileDataPtr->filesize += 8;
+ }
+ else
+ {
+ /*retrieve last sample delta*/
+ lastSampleDur = mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+ * (mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1) - 1];
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if (8 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+ + 3) >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
+ *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
+ {
+ M4OSA_TRACE1_0("M4MP4W_processAU : audio stts table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ if (8 *mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+ >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
+ *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
+ {
+ mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks += 1;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+ TABLE_STTS, ( mMp4FileDataPtr->audioTrackPtr->
+ nbOfAllocatedSttsBlocks
+ - 1) * M4MP4W_STTS_AUDIO_ALLOC_SIZE,
+ mMp4FileDataPtr->audioTrackPtr->
+ nbOfAllocatedSttsBlocks
+ * M4MP4W_STTS_AUDIO_ALLOC_SIZE);
+ ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+ if (delta != lastSampleDur) /*new entry in the table*/
+ {
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1)] = 1;
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1) + 1] = delta;
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb +=
+ 1;
+ mMp4FileDataPtr->filesize += 8;
+ }
+ else
+ {
+ /*increase of 1 the number of consecutive AUs with same duration*/
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1) - 2] += 1;
+ }
+ }
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb += 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] +=
+ 1;
+
+#else
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] +=
+ 1;
+
+#endif
+ /* Warning: time conversion cast 64to32! */
+
+ mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS =
+ (M4MP4W_Time32)auPtr->CTS;
+ }
+ else if (streamID == VideoStreamID) /*video stream*/
+ {
+ M4OSA_TRACE2_0("M4MP4W_processAU -> video");
+
+ /* In h264, the size of the AU must be added to the data */
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264)
+ {
+ /* Add the size of the NALU in BE */
+ M4OSA_MemAddr8 pTmpDataAddress = M4OSA_NULL;
+ auPtr->dataAddress -= 1;
+ pTmpDataAddress = (M4OSA_MemAddr8)auPtr->dataAddress;
+
+ // bit manipulation
+ *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 24) & 0x000000FF);
+ *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 16) & 0x000000FF);
+ *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 8) & 0x000000FF);
+ *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size) & 0x000000FF);
+
+ auPtr->size += 4;
+ }
+
+ /*video microstate*/
+ ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
+ == M4MP4W_writing_startAU), M4ERR_STATE);
+ mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+ /* samples are written to file now */
+
+ err = M4MP4W_putBlock((M4OSA_UChar *)auPtr->dataAddress, auPtr->size,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos
+ + mMp4FileDataPtr->videoTrackPtr->currentPos;
+ M4OSA_TRACE2_1(
+ "M4MP4W_processAU: putBlock error when writing unbuffered video sample: %#X",
+ err);
+ /* Ouch, we got an error writing to the file, but we need to properly react so that
+ the state is still consistent and we can properly close the file so that what has
+ been recorded so far is not lost. Yay error recovery! */
+
+ /* First, we do not know where we are in the file. Put us back at where we were before
+ attempting to write the data. That way, we're consistent with the chunk and sample
+ state data.absoluteCurrentPos is only updated for chunks, it points to the beginning
+ of the chunk,therefore we need to add videoTrackPtr->currentPos to know where we
+ were in the file. */
+ err = mMp4FileDataPtr->fileWriterFunctions->seek(
+ mMp4FileDataPtr->fileWriterContext,
+ M4OSA_kFileSeekBeginning, &temp);
+
+ M4OSA_TRACE2_3(
+ "Backtracking to position 0x%08X, seek returned %d and position %08X",
+ mMp4FileDataPtr->absoluteCurrentPos
+ + mMp4FileDataPtr->videoTrackPtr->currentPos, err, temp);
+
+ /* Then, do not update any info whatsoever in the writing state. This will have the
+ consequence that it will be as if the sample has never been written, so the chunk
+ will be merely closed after the previous sample (the sample we attempted to write
+ here is lost). */
+
+ /* And lastly (for here), return that we've reached the limit of available space.
+ We don't care about the error originally returned by putBlock. */
+
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#endif
+
+ mMp4FileDataPtr->videoTrackPtr->currentPos += auPtr->size;
+
+ /* Warning: time conversion cast 64to32! */
+ delta = (M4MP4W_Time32)auPtr->CTS
+ - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
+
+ /* DEBUG stts entries which are equal to 0 */
+ M4OSA_TRACE2_1("V_DELTA = %ld\n", delta);
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if (2 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb + 3)
+ >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+ *M4MP4W_STSZ_ALLOC_SIZE)
+ {
+ M4OSA_TRACE1_0("M4MP4W_processAU : video stsz table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+ mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
+ (M4OSA_UInt16)auPtr->size;
+ mMp4FileDataPtr->filesize += 4;
+
+#else
+
+ if (4 *mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+ >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+ *M4MP4W_STSZ_ALLOC_SIZE)
+ {
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks += 1;
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+ ( mMp4FileDataPtr->videoTrackPtr->
+ nbOfAllocatedStszBlocks
+ - 1) * M4MP4W_STSZ_ALLOC_SIZE,
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+ * M4MP4W_STSZ_ALLOC_SIZE);
+
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ != M4OSA_NULL,
+ M4ERR_ALLOC);
+ }
+
+ mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
+ auPtr->size;
+ mMp4FileDataPtr->filesize += 4;
+
+#endif
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+ == 0) /*test if first AU*/
+ {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 1);
+ M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 0);
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0] = 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 0;
+
+#endif
+
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
+ mMp4FileDataPtr->filesize += 8;
+ }
+ else if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+ == 1 ) /*test if second AU*/
+ {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0],
+ (M4OSA_UInt16)delta);
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = delta;
+
+#endif
+
+ }
+ else
+ {
+ /*retrieve last sample delta*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ lastSampleDur = M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1]);
+
+ if (4 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+ + 3) >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
+ *M4MP4W_STTS_ALLOC_SIZE)
+ {
+ M4OSA_TRACE1_0("M4MP4W_processAU : video stts table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ lastSampleDur = mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+ * (mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1) + 1];
+
+ if (8 *mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+ >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
+ *M4MP4W_STTS_ALLOC_SIZE)
+ {
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks += 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS, ( mMp4FileDataPtr->videoTrackPtr->
+ nbOfAllocatedSttsBlocks
+ - 1) * M4MP4W_STTS_ALLOC_SIZE,
+ mMp4FileDataPtr->videoTrackPtr->
+ nbOfAllocatedSttsBlocks
+ * M4MP4W_STTS_ALLOC_SIZE);
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+ if (delta != lastSampleDur) /*new entry in the table*/
+ {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb], 1);
+ M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb], (M4OSA_UInt16)delta);
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb)] = 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+ *(mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb)+1] = delta;
+
+#endif
+
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb +=
+ 1;
+ mMp4FileDataPtr->filesize += 8;
+ }
+ else
+ {
+ /*increase of 1 the number of consecutive AUs with same duration*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1] += 1;
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1)] += 1;
+
+#endif
+
+ }
+ }
+
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb += 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] +=
+ 1;
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] +=
+ 1;
+
+#endif
+
+ if (auPtr->attribute == AU_RAP)
+ {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ if (4 *(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb + 3)
+ >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
+ *M4MP4W_STSS_ALLOC_SIZE)
+ {
+ M4OSA_TRACE1_0("M4MP4W_processAU : video stss table is full");
+ return M4WAR_MP4W_OVERSIZE;
+ }
+
+#else
+
+ if (4 *mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb
+ >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
+ *M4MP4W_STSS_ALLOC_SIZE)
+ {
+ mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks += 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
+ (M4OSA_UInt32 *)M4MP4W_realloc(
+ (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STSS, ( mMp4FileDataPtr->videoTrackPtr->
+ nbOfAllocatedStssBlocks
+ - 1) * M4MP4W_STSS_ALLOC_SIZE,
+ mMp4FileDataPtr->videoTrackPtr->
+ nbOfAllocatedStssBlocks
+ * M4MP4W_STSS_ALLOC_SIZE);
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
+ != M4OSA_NULL, M4ERR_ALLOC);
+ }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STSS[mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb] =
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
+ mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb += 1;
+ mMp4FileDataPtr->filesize += 4;
+ }
+
+ /* Warning: time conversion cast 64to32! */
+ mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS =
+ (M4MP4W_Time32)auPtr->CTS;
+ }
+ else
+ return M4ERR_BAD_STREAM_ID;
+
+ /* I moved some state modification to after we know the sample has been written correctly. */
+ if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+ && (streamID == VideoStreamID))
+ {
+ mMp4FileDataPtr->audioMsStopTime =
+ (M4MP4W_Time32)(auPtr->CTS * scale_video);
+ }
+
+ if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+ || (streamID == VideoStreamID))
+ {
+ /*update fileSize*/
+ mMp4FileDataPtr->filesize += auPtr->size;
+ }
+
+ if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+ && (streamID == VideoStreamID))
+ {
+ /*update filesize with estimated audio data that will be added later. */
+ /*Warning: Assumption is made that: */
+ /* - audio samples have constant size (e.g. no sid). */
+ /* - max audio sample size has been set, and is the actual sample size. */
+
+ ERR_CHECK(mMp4FileDataPtr->audioMsChunkDur != 0,
+ M4WAR_MP4W_NOT_EVALUABLE);
+ mMp4FileDataPtr->filesize -=
+ (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+ * scale_video) * (0.05/*always 50 AMR samples per second*/
+ *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
+ + 16/*additional data for a new chunk*/
+ / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
+
+ mMp4FileDataPtr->filesize += (M4OSA_UInt32)(( auPtr->CTS * scale_video)
+ * (0.05/*always 50 AMR samples per second*/
+ *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
+ + 16/*additional data for a new chunk*/
+ / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
+ }
+
+ M4OSA_TRACE1_4("processAU : size 0x%x mode %d filesize %lu limit %lu",
+ auPtr->size, auPtr->attribute, mMp4FileDataPtr->filesize,
+ mMp4FileDataPtr->MaxFileSize);
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_ERR err2 = M4NO_ERROR, err3 = M4NO_ERROR;
+
+ /*Warning: test should be done here to ensure context->pContext is not M4OSA_NULL,
+ but C is not C++...*/
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+ M4OSA_UChar camcoder_maj, camcoder_min, camcoder_rev; /*camcoder version*/
+ M4OSA_Bool bAudio =
+ (( mMp4FileDataPtr->hasAudio)
+ && (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+ != 0)); /*((mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL) &&
+ (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb != 0));*/
+ M4OSA_Bool bVideo =
+ (( mMp4FileDataPtr->hasVideo)
+ && (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+ != 0)); /*((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL) &&
+ (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb != 0));*/
+ M4OSA_Bool bH263 = M4OSA_FALSE;
+ M4OSA_Bool bH264 = M4OSA_FALSE;
+ M4OSA_Bool bMP4V = M4OSA_FALSE;
+ M4OSA_Bool bAAC = M4OSA_FALSE;
+ M4OSA_Bool bEVRC = M4OSA_FALSE;
+
+ /*intermediate variables*/
+ M4OSA_UInt32 A, B, N, AB4N;
+
+ /*Trak variables*/
+ M4OSA_UInt32 a_trakId = AudioStreamID; /* (audio=1)*/
+ /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+ M4OSA_UInt32 a_trakOffset = 32;
+ M4OSA_UInt32 a_sttsSize = 24; /* A (audio=24)*/
+ M4OSA_UInt32 a_stszSize = 20; /* B (audio=20)*/
+ M4OSA_UInt32 a_trakSize = 402; /* (audio=402)*/
+ M4OSA_UInt32 a_mdiaSize = 302; /* (audio=302)*/
+ M4OSA_UInt32 a_minfSize = 229; /* (audio=229)*/
+ M4OSA_UInt32 a_stblSize = 169; /* (audio=169)*/
+ M4OSA_UInt32 a_stsdSize = 69; /* (audio=69 )*/
+ M4OSA_UInt32 a_esdSize = 53; /* (audio=53 )*/
+ M4OSA_UInt32 a_dataSize = 0; /* temp: At the end, = currentPos*/
+ M4MP4W_Time32 a_trakDuration = 0; /* equals lastCTS*/
+ M4MP4W_Time32 a_msTrakDuration = 0;
+ M4OSA_UInt32 a_stscSize = 28; /* 16+12*nbchunksaudio*/
+ M4OSA_UInt32 a_stcoSize = 20; /* 16+4*nbchunksaudio*/
+
+ M4OSA_UInt32 v_trakId = VideoStreamID; /* (video=2)*/
+ /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+ M4OSA_UInt32 v_trakOffset = 32;
+ M4OSA_UInt32 v_sttsSize = 0; /* A (video=16+8J)*/
+ M4OSA_UInt32 v_stszSize = 0; /* B (video=20+4K)*/
+ M4OSA_UInt32 v_trakSize = 0; /* (h263=A+B+4N+426), (mp4v=A+B+dsi+4N+448) */
+ M4OSA_UInt32 v_mdiaSize = 0; /* (h263=A+B+4N+326), (mp4v=A+B+dsi+4N+348) */
+ M4OSA_UInt32 v_minfSize = 0; /* (h263=A+B+4N+253), (mp4v=A+B+dsi+4N+275) */
+ M4OSA_UInt32 v_stblSize = 0; /* (h263=A+B+4N+189), (mp4v=A+B+dsi+4N+211) */
+ M4OSA_UInt32 v_stsdSize = 0; /* (h263=117) , (mp4v=139+dsi )*/
+ M4OSA_UInt32 v_esdSize = 0; /* (h263=101) , (mp4v=153+dsi )*/
+ M4OSA_UInt32 v_dataSize = 0; /* temp: At the end, = currentPos*/
+ M4MP4W_Time32 v_trakDuration = 0; /* equals lastCTS*/
+ M4MP4W_Time32 v_msTrakDuration = 0;
+ M4OSA_UInt32 v_stscSize = 28; /* 16+12*nbchunksvideo*/
+ M4OSA_UInt32 v_stcoSize = 20; /* 16+4*nbchunksvideo*/
+
+ /*video variables*/
+ M4OSA_UInt32 v_stssSize = 0; /* 4*N+16 STSS*/
+
+ /*aac & mp4v temp variable*/
+ M4OSA_UInt8 dsi = 0;
+
+ /*H264 variables*/
+ M4OSA_UInt32 v_avcCSize = 0; /* dsi+15*/
+
+ /*MP4V variables*/
+ M4OSA_UInt32 v_esdsSize = 0; /* dsi+37*/
+ M4OSA_UInt8 v_ESDescriptorSize =
+ 0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+ M4OSA_UInt8 v_DCDescriptorSize = 0; /* dsi+15*/
+
+ /*AAC variables*/
+ M4OSA_UInt32 a_esdsSize = 0; /* dsi+37*/
+ M4OSA_UInt8 a_ESDescriptorSize =
+ 0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+ M4OSA_UInt8 a_DCDescriptorSize = 0; /* dsi+15*/
+
+ /*General variables*/
+
+ /* audio chunk size + video chunk size*/
+ M4OSA_UInt32 mdatSize = 8;
+ M4OSA_UInt32 moovSize = 116; /* 116 + 402(audio) + (A+B+4N+426)(h263) or */
+ /* (A+B+dsi+4N+448)(mp4v) */
+ M4OSA_UInt32 creationTime; /* C */
+
+ /*flag to set up the chunk interleave strategy*/
+ M4OSA_Bool bInterleaveAV =
+ (bAudio && bVideo && (mMp4FileDataPtr->InterleaveDur != 0));
+
+ M4OSA_Context fileWriterContext = mMp4FileDataPtr->fileWriterContext;
+
+ M4OSA_UInt32 i;
+
+ M4OSA_Double scale_audio = 0.0;
+ M4OSA_Double scale_video = 0.0;
+ M4MP4W_Time32 delta;
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+ M4OSA_FilePosition moovPos, mdatPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ /*macro state */
+ mMp4FileDataPtr->state = M4MP4W_closed;
+
+ /*if no data !*/
+ if ((!bAudio) && (!bVideo))
+ {
+ err = M4NO_ERROR; /*would be better to return a warning ?*/
+ goto cleanup;
+ }
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+ /* Remove safety file to make room for what needs to be written out here
+ (chunk flushing and moov). */
+
+ if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
+ {
+ M4OSA_Context tempContext;
+ err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
+ mMp4FileDataPtr->safetyFileUrl,
+ M4OSA_kFileWrite | M4OSA_kFileCreate);
+
+ if (M4NO_ERROR != err)
+ goto cleanup;
+ err = mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
+
+ if (M4NO_ERROR != err)
+ goto cleanup;
+ mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
+ mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
+ }
+
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+ if (bVideo)
+ {
+ if ((M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
+ || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
+ || (M4OSA_NULL
+ == mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
+ || (M4OSA_NULL
+ == mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
+ || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
+ || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
+ || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSS))
+ {
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+ fileWriterContext); /**< close the stream anyway */
+ M4MP4W_freeContext(context); /**< Free the context content */
+ return M4ERR_ALLOC;
+ }
+
+ /*video microstate*/
+ mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_closed;
+
+ /*current chunk is the last one and gives the total number of video chunks (-1)*/
+ for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
+ {
+ v_dataSize += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+ }
+
+#ifndef _M4MP4W_MOOV_FIRST
+#ifndef _M4MP4W_UNBUFFERED_VIDEO
+ /*flush chunk*/
+
+ if (mMp4FileDataPtr->videoTrackPtr->currentPos > 0)
+ {
+ err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
+ mMp4FileDataPtr->videoTrackPtr->currentPos,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+
+ if (M4NO_ERROR != err)
+ goto cleanup;
+ }
+
+#endif
+
+ M4OSA_TRACE1_0("flush video | CLOSE");
+ M4OSA_TRACE1_3("current chunk = %d offset = 0x%x size = 0x%08X",
+ mMp4FileDataPtr->videoTrackPtr->currentChunk,
+ mMp4FileDataPtr->absoluteCurrentPos,
+ mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+ /*update chunk offset*/
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ mMp4FileDataPtr->absoluteCurrentPos;
+
+ /*add chunk size to absoluteCurrentPos*/
+ mMp4FileDataPtr->absoluteCurrentPos +=
+ mMp4FileDataPtr->videoTrackPtr->currentPos;
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*update last chunk size, and add this value to v_dataSize*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ mMp4FileDataPtr->videoTrackPtr->currentPos;
+ v_dataSize +=
+ mMp4FileDataPtr->videoTrackPtr->currentPos; /*add last chunk size*/
+
+ v_trakDuration = mMp4FileDataPtr->videoTrackPtr->
+ CommonData.lastCTS; /* equals lastCTS*/
+
+ /* bugfix: if a new chunk was just created, cancel it before to close */
+ if ((mMp4FileDataPtr->videoTrackPtr->currentChunk != 0)
+ && (mMp4FileDataPtr->videoTrackPtr->currentPos == 0))
+ {
+ mMp4FileDataPtr->videoTrackPtr->currentChunk--;
+ }
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ if ((mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+ currentStsc] & 0xFFF) == 0)
+ {
+ mMp4FileDataPtr->videoTrackPtr->currentStsc--;
+ }
+
+#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+ /* Last sample duration */
+ /* If we have the file duration we use it, else we duplicate the last AU */
+
+ if (mMp4FileDataPtr->MaxFileDuration > 0)
+ {
+ /* use max file duration to calculate delta of last AU */
+ delta = mMp4FileDataPtr->MaxFileDuration
+ - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
+ v_trakDuration = mMp4FileDataPtr->MaxFileDuration;
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
+ {
+ /* if more than 1 frame, create a new stts entry (else already created) */
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb++;
+ }
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1], 1);
+ M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1], delta);
+
+#else
+
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+ *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+ - 1)] = 1;
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+ *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+ - 1) + 1] = delta;
+
+#endif
+
+ }
+ else
+ {
+ /* duplicate the delta of the previous frame */
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
+ {
+ /* if more than 1 frame, duplicate the stts entry (else already exists) */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ v_trakDuration +=
+ M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1]);
+ mMp4FileDataPtr->videoTrackPtr->
+ TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1] += 1;
+
+#else
+
+ v_trakDuration += mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+ * (mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1) + 1];
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+ mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb - 1)] += 1;
+
+#endif
+
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4MP4W_closeWrite : ! videoTrackPtr,\
+ cannot know the duration of the unique AU !");
+ /* If there is an audio track, we use it as a file duration
+ (and so, as AU duration...) */
+ if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_closeWrite : ! Let's use the audio track duration !");
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] =
+ (M4OSA_UInt32)(
+ mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+ * (1000.0 / mMp4FileDataPtr->audioTrackPtr->
+ CommonData.timescale));
+ v_trakDuration =
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1];
+ }
+ /* Else, we use a MAGICAL value (66 ms) */
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_closeWrite : ! No audio track -> use magical value (66) !"); /* */
+ mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 66;
+ v_trakDuration = 66;
+ }
+ }
+ }
+
+ /* Calculate table sizes */
+ A = v_sttsSize = 16 + 8 * mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sttsTableEntryNb; /* A (video=16+8J)*/
+ B = v_stszSize = 20 + 4 * mMp4FileDataPtr->videoTrackPtr->
+ CommonData.sampleNb; /* B (video=20+4K)*/
+ N = mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb;
+ AB4N = A + B + 4 * N;
+
+ scale_video =
+ 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+ v_msTrakDuration = (M4OSA_UInt32)(v_trakDuration * scale_video);
+
+ /*Convert integers in the table from LE into BE*/
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb);
+ M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
+ 2 * (mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb));
+
+#endif
+
+ M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
+ mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb);
+
+ if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH263)
+ {
+ bH263 = M4OSA_TRUE;
+ v_trakSize = AB4N + 426; /* (h263=A+B+4N+426)*/
+ v_mdiaSize = AB4N + 326; /* (h263=A+B+4N+326)*/
+ v_minfSize = AB4N + 253; /* (h263=A+B+4N+253)*/
+ v_stblSize = AB4N + 189; /* (h263=A+B+4N+189)*/
+ v_stsdSize = 117; /* (h263=117)*/
+ v_esdSize = 101; /* (h263=101)*/
+
+ moovSize += AB4N + 426;
+
+ if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
+ {
+ /*the optional 'bitr' atom is appended to the dsi,so filesize is 16 bytes bigger*/
+ v_trakSize += 16;
+ v_mdiaSize += 16;
+ v_minfSize += 16;
+ v_stblSize += 16;
+ v_stsdSize += 16;
+ v_esdSize += 16;
+ moovSize += 16;
+ }
+ }
+ else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kH264)
+ {
+ bH264 = M4OSA_TRUE;
+ /* For H264 there is no default DSI, and its presence is mandatory,
+ so check the DSI has been set*/
+ if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
+ || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_closeWrite: error, no H264 DSI has been set!");
+ err = M4ERR_STATE;
+ goto cleanup;
+ }
+
+ /*H264 sizes of the atom*/
+
+ // Remove the hardcoded DSI values of H264Block2
+ // TODO: check bMULPPSSPS case
+ v_avcCSize = sizeof(M4OSA_UInt32) + sizeof(H264Block2) +
+ mMp4FileDataPtr->videoTrackPtr->dsiSize;
+
+ v_trakSize = AB4N + v_avcCSize + 411;
+ v_mdiaSize = AB4N + v_avcCSize + 311;
+ v_minfSize = AB4N + v_avcCSize + 238;
+ v_stblSize = AB4N + v_avcCSize + 174;
+ v_stsdSize = v_avcCSize + 102;
+ v_esdSize = v_avcCSize + 86;
+
+ moovSize += AB4N + v_avcCSize + 411;
+
+ }
+ else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+ == M4SYS_kMPEG_4)
+ {
+ bMP4V = M4OSA_TRUE;
+ /* For MPEG4 there is no default DSI, and its presence is mandatory,
+ so check the DSI has been set*/
+ if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
+ || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_closeWrite: error, no MPEG4 DSI has been set!");
+ err = M4ERR_STATE;
+ goto cleanup;
+ }
+
+ /*MP4V variables*/
+ dsi = mMp4FileDataPtr->videoTrackPtr->dsiSize;
+ v_esdsSize = 37 + dsi; /* dsi+37*/
+ v_ESDescriptorSize =
+ 23
+ + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+ v_DCDescriptorSize = 15 + dsi; /* dsi+15*/
+
+ v_trakSize = AB4N + dsi + 448; /* (mp4v=A+B+dsi+4N+448) */
+ v_mdiaSize = AB4N + dsi + 348; /* (mp4v=A+B+dsi+4N+348) */
+ v_minfSize = AB4N + dsi + 275; /* (mp4v=A+B+dsi+4N+275) */
+ v_stblSize = AB4N + dsi + 211; /* (mp4v=A+B+dsi+4N+211) */
+ v_stsdSize = dsi + 139; /* (mp4v=139+dsi)*/
+ v_esdSize = dsi + 123; /* (mp4v=123+dsi)*/
+
+ moovSize += AB4N + dsi + 448;
+ }
+
+ /*video variables*/
+ v_stssSize = 16 + 4 * N; /* 4*N+16 STSS*/
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ /* stsc update */
+
+ v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+ v_stblSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+ v_minfSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+ v_mdiaSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+ v_trakSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+ moovSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+
+ /* stco update */
+ v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_stblSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_minfSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_mdiaSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_trakSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ moovSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+
+#else
+ /*stsc/stco update*/
+
+ v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_stblSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_minfSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_mdiaSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ v_trakSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ moovSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+
+#endif
+
+ /*update last chunk time*/
+
+ mMp4FileDataPtr->videoTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+ v_msTrakDuration;
+ }
+
+ if (bAudio)
+ {
+ if ((M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
+ || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
+ || (M4OSA_NULL
+ == mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
+ || (M4OSA_NULL
+ == mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
+ || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STTS))
+ {
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+ fileWriterContext); /**< close the stream anyway */
+ M4MP4W_freeContext(context); /**< Free the context content */
+ return M4ERR_ALLOC;
+ }
+
+ /*audio microstate*/
+ mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_closed;
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType == M4SYS_kAAC)
+ {
+ bAAC =
+ M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
+ dsi = mMp4FileDataPtr->audioTrackPtr->dsiSize; /*variable size*/
+
+ a_esdsSize = 37 + dsi; /* dsi+37*/
+ a_ESDescriptorSize =
+ 23
+ + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+ a_DCDescriptorSize = 15 + dsi; /* dsi+15*/
+
+ a_esdSize = dsi + 73; /*overwrite a_esdSize with aac value*/
+ /*add dif. between amr & aac sizes: (- 53 + dsi + 37)*/
+ a_stsdSize += dsi + 20;
+ a_stblSize += dsi + 20;
+ a_minfSize += dsi + 20;
+ a_mdiaSize += dsi + 20;
+ a_trakSize += dsi + 20;
+ moovSize += dsi + 20;
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
+ == M4SYS_kEVRC)
+ {
+ bEVRC =
+ M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
+
+ /* evrc dsi is only 6 bytes while amr dsi is 9 bytes,all other blocks are unchanged */
+ a_esdSize -= 3;
+ a_stsdSize -= 3;
+ a_stblSize -= 3;
+ a_minfSize -= 3;
+ a_mdiaSize -= 3;
+ a_trakSize -= 3;
+ moovSize -= 3;
+ }
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
+ {
+ if (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ)
+ {
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+ fileWriterContext); /**< close the stream anyway */
+ M4MP4W_freeContext(context); /**< Free the context content */
+ return M4ERR_ALLOC;
+ }
+ /*Convert integers in the table from LE into BE*/
+ M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb);
+ a_stszSize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ a_stblSize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ a_minfSize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ a_mdiaSize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ a_trakSize +=
+ 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+ }
+
+ moovSize += 402;
+
+ /*current chunk is the last one and gives the total number of audio chunks (-1)*/
+ for ( i = 0; i < mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+ {
+ a_dataSize += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+ }
+
+#ifndef _M4MP4W_MOOV_FIRST
+ /*flush chunk*/
+
+ if (mMp4FileDataPtr->audioTrackPtr->currentPos > 0)
+ {
+ err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
+ mMp4FileDataPtr->audioTrackPtr->currentPos,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext);
+
+ if (M4NO_ERROR != err)
+ goto cleanup;
+ }
+
+ M4OSA_TRACE1_0("flush audio | CLOSE");
+ M4OSA_TRACE1_2("current chunk = %d offset = 0x%x",
+ mMp4FileDataPtr->audioTrackPtr->currentChunk,
+ mMp4FileDataPtr->absoluteCurrentPos);
+
+ /*update chunk offset*/
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ mMp4FileDataPtr->absoluteCurrentPos;
+
+ /*add chunk size to absoluteCurrentPos*/
+ mMp4FileDataPtr->absoluteCurrentPos +=
+ mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*update last chunk size, and add this value to a_dataSize*/
+
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ mMp4FileDataPtr->audioTrackPtr->currentPos;
+ a_dataSize +=
+ mMp4FileDataPtr->audioTrackPtr->currentPos; /*add last chunk size*/
+
+ /* bugfix: if a new chunk was just created, cancel it before to close */
+ if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
+ && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
+ {
+ mMp4FileDataPtr->audioTrackPtr->currentChunk--;
+ }
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+ if ((mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
+ currentStsc] & 0xFFF) == 0)
+ {
+ mMp4FileDataPtr->audioTrackPtr->currentStsc--;
+ }
+
+#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+ a_trakDuration = mMp4FileDataPtr->audioTrackPtr->
+ CommonData.lastCTS; /* equals lastCTS*/
+ /* add last sample dur */
+
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb != 1)
+ {
+#ifdef DUPLICATE_STTS_IN_LAST_AU
+ /*increase of 1 the number of consecutive AUs with same duration*/
+
+ mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+ *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+ - 1) - 2] += 1;
+
+#endif /*DUPLICATE_STTS_IN_LAST_AU*/
+
+ a_trakDuration += mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+ * (mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1) - 1];
+ }
+ else if (0 == mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS)
+ {
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
+ == M4SYS_kAMR)
+ {
+ if (12200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 32
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (10200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 27
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (7950 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 21
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (7400 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 20
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (6700 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 18
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (5900 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 16
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (5150 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 14
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ else if (4750 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+ {
+ a_trakDuration = a_dataSize / 13
+ * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+ }
+ }
+ }
+
+ scale_audio =
+ 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+ a_msTrakDuration = (M4OSA_UInt32)(a_trakDuration * scale_audio);
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+ /* stsc update */
+
+ a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+ a_stblSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+ a_minfSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+ a_mdiaSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+ a_trakSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+ moovSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+
+ /* stso update */
+ a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_stblSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_minfSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_mdiaSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_trakSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+
+#else
+ /*stsc/stco update*/
+
+ a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_stblSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_minfSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_mdiaSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ a_trakSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ moovSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+
+#endif
+
+ /* compute the new size of stts*/
+
+ a_sttsSize = 16 + 8 * (mMp4FileDataPtr->audioTrackPtr->
+ CommonData.sttsTableEntryNb - 1);
+
+ moovSize += a_sttsSize - 24;
+ a_mdiaSize += a_sttsSize - 24;
+ a_minfSize += a_sttsSize - 24;
+ a_stblSize += a_sttsSize - 24;
+ a_trakSize += a_sttsSize - 24;
+
+ /*update last chunk time*/
+ mMp4FileDataPtr->audioTrackPtr->
+ chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+ a_msTrakDuration;
+ }
+
+ /* changing the way the mdat size is computed.
+ The real purpose of the mdat size is to know the amount to skip to get to the next
+ atom, which is the moov; the size of media in the mdat is almost secondary. Therefore,
+ it is of utmost importance that the mdat size "points" to where the moov actually
+ begins. Now, the moov begins right after the last data we wrote, so how could the sum
+ of all chunk sizes be different from the total size of what has been written? Well, it
+ can happen when the writing was unexpectedly stopped (because of lack of disk space,
+ for instance), in this case a chunk may be partially written (the partial write is not
+ necessarily erased) but it may not be reflected in the chunk size list (which may
+ believe it hasn't been written or on the contrary that it has been fully written). In
+ the case of such a mismatch, there is either unused data in the mdat (not very good,
+ but tolerable) or when reading the last chunk it will read the beginning of the moov
+ as part of the chunk (which means the last chunk won't be correctly decoded), both of
+ which are still better than losing the whole recording. In the long run it'll probably
+ be attempted to always clean up back to a consistent state, but at any rate it is
+ always safer to have the mdat size be computed using the position where the moov
+ actually begins, rather than using the size it is thought the mdat has.
+
+ Therefore, I will record where we are just before writing the moov, to serve when
+ updating the mdat size. */
+
+ /* mdatSize += a_dataSize + v_dataSize; *//*TODO allow for multiple chunks*/
+
+ /* End of Pierre Lebeaupin 19/12/2007: changing the way the mdat size is computed. */
+
+ /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+ a_trakOffset += moovSize;
+ v_trakOffset += moovSize/*+ a_dataSize*/;
+
+ if (bInterleaveAV == M4OSA_FALSE)
+ v_trakOffset += a_dataSize;
+
+ /*system time since 1970 */
+#ifndef _M4MP4W_DONT_USE_TIME_H
+
+ time((time_t *)&creationTime);
+ /*convert into time since 1/1/1904 00h00 (normative)*/
+ creationTime += 2082841761; /*nb of sec between 1904 and 1970*/
+
+#else /*_M4MP4W_DONT_USE_TIME_H*/
+
+ creationTime =
+ 0xBBD09100; /* = 7/11/2003 00h00 ; in hexa because of code scrambler limitation with
+ large integers */
+
+#endif /*_M4MP4W_DONT_USE_TIME_H*/
+
+ mMp4FileDataPtr->duration =
+ max(a_msTrakDuration, v_msTrakDuration); /*max audio/video*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+ /*open file in write binary mode*/
+
+ err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&fileWriterContext,
+ mMp4FileDataPtr->url, 0x22);
+ ERR_CHECK(err == M4NO_ERROR, err);
+
+ /*ftyp atom*/
+ if (mMp4FileDataPtr->ftyp.major_brand != 0)
+ {
+ M4OSA_UInt32 i;
+
+ /* Put customized ftyp box */
+ CLEANUPonERR(M4MP4W_putBE32(16
+ + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(M4MPAC_FTYP_TAG,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+
+ for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
+ {
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+ }
+ }
+ else
+ {
+ /* Put default ftyp box */
+ CLEANUPonERR(M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
+ mMp4FileDataPtr->fileWriterFunctions,
+ mMp4FileDataPtr->fileWriterContext));
+ }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+#ifndef _M4MP4W_MOOV_FIRST
+ /* seek is used to get the current position relative to the start of the file. */
+ /* ... or rather, seek used to be used for that, but it has been found this functionality
+ is not reliably, or sometimes not at all, implemented in the various OSALs, so we now avoid
+ using it. */
+ /* Notice this new method assumes we're at the end of the file, this will break if ever we
+ are overwriting a larger file. */
+
+ CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->getOption(
+ mMp4FileDataPtr->fileWriterContext,
+ M4OSA_kFileWriteGetFileSize, (M4OSA_DataOption *) &moovPos));
+ /* moovPos will be used after writing the moov. */
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ CLEANUPonERR(M4MP4W_putBE32(moovSize, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock3, sizeof(CommonBlock3),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock4, sizeof(CommonBlock4),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->duration,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock5, sizeof(CommonBlock5),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ if (bAudio)
+ {
+ CLEANUPonERR(M4MP4W_putBE32(a_trakSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_trakId,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_msTrakDuration,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(AMRBlock1, sizeof(AMRBlock1),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+ CLEANUPonERR(M4MP4W_putBE32(a_mdiaSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_trakDuration,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(AMRBlock1_1, sizeof(AMRBlock1_1),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+ CLEANUPonERR(M4MP4W_putBE32(a_minfSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_stblSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_sttsSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ /*invert the table data to bigendian*/
+ M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
+ 2 * (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+ - 1));
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+ *)mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
+ ( mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1)
+ * 8,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+
+ /* stsd */
+ CLEANUPonERR(M4MP4W_putBE32(a_stsdSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
+ sizeof(SampleDescriptionHeader),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(a_esdSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* sample desc entry inside stsd */
+ if (bAAC)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(AACBlock1, sizeof(AACBlock1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ }
+ else if (bEVRC)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(EVRC8Block1, sizeof(EVRC8Block1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*evrc*/
+ }
+ else /*AMR8*/
+ {
+ CLEANUPonERR(M4MP4W_putBlock(AMR8Block1, sizeof(AMR8Block1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*amr8*/
+ }
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
+ sizeof(SampleDescriptionEntryStart),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(AudioSampleDescEntryBoilerplate,
+ sizeof(AudioSampleDescEntryBoilerplate),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale
+ << 16,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* DSI inside sample desc entry */
+ if (bAAC)
+ {
+ CLEANUPonERR(M4MP4W_putBE32(a_esdsSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
+ sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putByte(a_ESDescriptorSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
+ sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putByte(a_DCDescriptorSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(AACBlock2, sizeof(AACBlock2),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(
+ M4MP4W_putBE24(mMp4FileDataPtr->audioTrackPtr->avgBitrate * 5,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->maxBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->avgBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
+ sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->audioTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->DSI,
+ mMp4FileDataPtr->audioTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
+ sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*aac*/
+ }
+ else if (bEVRC)
+ {
+ M4OSA_UInt8 localDsi[6];
+ M4OSA_UInt32 localI;
+
+ CLEANUPonERR(M4MP4W_putBlock(EVRCBlock3_1, sizeof(EVRCBlock3_1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*audio*/
+
+ /* copy the default block in a local variable*/
+ for ( localI = 0; localI < 6; localI++ )
+ {
+ localDsi[localI] = EVRCBlock3_2[localI];
+ }
+ /* computes the number of sample per au */
+ /* and stores it in the DSI*/
+ /* assumes a char is enough to store the data*/
+ localDsi[5] =
+ (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
+ / 160)/*EVRC 1 frame duration*/;
+
+ if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+ {
+ /* copy vendor name */
+ for ( localI = 0; localI < 4; localI++ )
+ {
+ localDsi[localI] = (M4OSA_UInt8)(
+ mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+ }
+ }
+ CLEANUPonERR(M4MP4W_putBlock(localDsi, 6,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*audio*/
+ }
+ else /*AMR8*/
+ {
+ M4OSA_UInt8 localDsi[9];
+ M4OSA_UInt32 localI;
+
+ CLEANUPonERR(M4MP4W_putBlock(AMRDSIHeader, sizeof(AMRDSIHeader),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* copy the default block in a local variable*/
+ for ( localI = 0; localI < 9; localI++ )
+ {
+ localDsi[localI] = AMRDefaultDSI[localI];
+ }
+ /* computes the number of sample per au */
+ /* and stores it in the DSI*/
+ /* assumes a char is enough to store the data*/
+ /* ALERT! The potential of the following line of code to explode in our face
+ is enormous when anything (sample rate or whatever) will change. This
+ calculation would be MUCH better handled by the VES or whatever deals with
+ the encoder more directly. */
+ localDsi[8] =
+ (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
+ / 160)/*AMR NB 1 frame duration*/;
+
+ if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+ {
+ /* copy vendor name */
+ for ( localI = 0; localI < 4; localI++ )
+ {
+ localDsi[localI] = (M4OSA_UInt8)(
+ mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+ }
+
+ /* copy the Mode Set */
+ for ( localI = 5; localI < 7; localI++ )
+ {
+ localDsi[localI] = (M4OSA_UInt8)(
+ mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+ }
+ }
+ CLEANUPonERR(M4MP4W_putBlock(localDsi, 9,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*audio*/
+ }
+
+ /*end trak*/
+ CLEANUPonERR(M4MP4W_putBE32(a_stszSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /*0 value for samplesize means not constant AU size*/
+ if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
+ {
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+ *)mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
+ mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb * 4,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ }
+
+ CLEANUPonERR(M4MP4W_putBE32(a_stscSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentStsc
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentStsc; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(
+ ( mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i]
+ >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->audioTrackPtr->
+ chunkSampleNbTable[i] & 0xFFF),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#else
+
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(i + 1,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#endif
+
+ CLEANUPonERR(M4MP4W_putBE32(a_stcoSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(a_trakOffset,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ a_trakOffset += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+
+ if (( bInterleaveAV == M4OSA_TRUE)
+ && (mMp4FileDataPtr->videoTrackPtr->currentChunk >= i))
+ {
+ a_trakOffset +=
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+ }
+ }
+
+#else
+
+ for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ CLEANUPonERR(M4MP4W_putBlock(AMRBlock4, sizeof(AMRBlock4),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+ }
+
+ if (bVideo)
+ {
+ /*trak*/
+ CLEANUPonERR(M4MP4W_putBE32(v_trakSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_trakId,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_msTrakDuration,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* In the track header width and height are 16.16 fixed point values,
+ so shift left the regular integer value by 16. */
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->width << 16,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->height
+ << 16,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+ CLEANUPonERR(M4MP4W_putBE32(v_mdiaSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(creationTime,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.timescale,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_trakDuration,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(VideoBlock1_1, sizeof(VideoBlock1_1),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBE32(v_minfSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_stblSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_sttsSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ for ( i = 0;
+ i < mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb;
+ i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Lo(
+ &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Hi(
+ &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video*/
+ }
+
+#else
+
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+ *)mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
+ ( mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb) * 8,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+#endif
+
+ /* stsd */
+
+ CLEANUPonERR(M4MP4W_putBE32(v_stsdSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
+ sizeof(SampleDescriptionHeader),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(v_esdSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* sample desc entry inside stsd */
+ if (bMP4V)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock1, sizeof(Mp4vBlock1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ }
+
+ if (bH263)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H263Block1, sizeof(H263Block1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ }
+
+ if (bH264)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H264Block1, sizeof(H264Block1),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h264*/
+ }
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
+ sizeof(SampleDescriptionEntryStart),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate1,
+ sizeof(SampleDescriptionEntryVideoBoilerplate1),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->width,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->height,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBlock(VideoResolutions, sizeof(VideoResolutions),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate2,
+ sizeof(SampleDescriptionEntryVideoBoilerplate2),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+ /* DSI inside sample desc entry */
+ if (bH263)
+ {
+ /* The h263 dsi given through the api must be 7 bytes, that is, it shall not include
+ the optional bitrate box. However, if the bitrate information is set in the stream
+ handler, a bitrate box is appended here to the dsi */
+ if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H263Block2_bitr,
+ sizeof(H263Block2_bitr),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /* d263 box with bitr atom */
+
+ if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ }
+ else
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+ mMp4FileDataPtr->videoTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+ CLEANUPonERR(M4MP4W_putBlock(H263Block4, sizeof(H263Block4),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ /* Pierre Lebeaupin 2008/04/29: the two following lines used to be swapped;
+ I changed to this order in order to conform to 3GPP. */
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ }
+ else
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H263Block2, sizeof(H263Block2),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /* d263 box */
+
+ if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h263*/
+ }
+ else
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+ mMp4FileDataPtr->videoTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+ }
+ }
+
+ if (bMP4V)
+ {
+ M4OSA_UInt32 bufferSizeDB = 5 * mMp4FileDataPtr->videoTrackPtr->
+ avgBitrate; /*bufferSizeDB set to 5 times the bitrate*/
+
+ CLEANUPonERR(M4MP4W_putBE32(v_esdsSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
+ sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putByte(v_ESDescriptorSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
+ sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putByte(v_DCDescriptorSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock3, sizeof(Mp4vBlock3),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBE24(bufferSizeDB,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
+ sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->videoTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+ mMp4FileDataPtr->videoTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
+ sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*mp4v*/
+ }
+
+ if (bH264)
+ {
+ M4OSA_UInt16 ppsLentgh = 0; /* PPS length */
+ M4OSA_UInt16 spsLentgh = 0; /* SPS length */
+ M4OSA_UChar *tmpDSI = mMp4FileDataPtr->videoTrackPtr->DSI; /* DSI */
+ M4OSA_UInt16 NumberOfPPS;
+ M4OSA_UInt16 lCntPPS;
+
+ /* Put the avcC (header + DSI) size */
+ CLEANUPonERR(M4MP4W_putBE32(v_avcCSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h264*/
+ /* Put the avcC header */
+ CLEANUPonERR(M4MP4W_putBlock(H264Block2, sizeof(H264Block2),
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*h264*/
+ /* Put the DSI (SPS + PPS) int the 3gp format*/
+ /* SPS length in BE */
+
+ if ((0x01 != mMp4FileDataPtr->videoTrackPtr->DSI[0]) ||
+ (0x42 != mMp4FileDataPtr->videoTrackPtr->DSI[1]))
+ {
+ M4OSA_TRACE1_2("!!! M4MP4W_closeWrite ERROR : invalid AVCC 0x%X 0x%X",
+ mMp4FileDataPtr->videoTrackPtr->DSI[0],
+ mMp4FileDataPtr->videoTrackPtr->DSI[1]);
+ return M4ERR_PARAMETER;
+ }
+ // Do not strip the DSI
+ CLEANUPonERR( M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+ mMp4FileDataPtr->videoTrackPtr->dsiSize,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext) );/*h264*/
+
+ }
+
+ /*end trak*/
+ CLEANUPonERR(M4MP4W_putBE32(v_stszSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
+ i++ )
+ {
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video*/
+ }
+
+#else
+
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+ *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+ mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb * 4,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+#endif
+
+ CLEANUPonERR(M4MP4W_putBE32(v_stscSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentStsc
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentStsc; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(
+ ( mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i]
+ >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->videoTrackPtr->
+ chunkSampleNbTable[i] & 0xFFF),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#else
+
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
+ {
+ CLEANUPonERR(M4MP4W_putBE32(i + 1,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#endif
+
+ CLEANUPonERR(M4MP4W_putBE32(v_stcoSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
+ + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+ for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
+ {
+ if (( bInterleaveAV == M4OSA_TRUE)
+ && (mMp4FileDataPtr->audioTrackPtr->currentChunk >= i))
+ {
+ v_trakOffset +=
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+ }
+ CLEANUPonERR(M4MP4W_putBE32(v_trakOffset,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ v_trakOffset += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+ }
+
+#else
+
+ for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
+ {
+ CLEANUPonERR(M4MP4W_putBE32(
+ mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ CLEANUPonERR(M4MP4W_putBE32(v_stssSize,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBlock(VideoBlock4, sizeof(VideoBlock4),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(
+ M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb,
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+ *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
+ mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb * 4,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ CLEANUPonERR(M4MP4W_putBlock(VideoBlock5, sizeof(VideoBlock5),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+ }
+#ifdef _M4MP4W_MOOV_FIRST
+ /*mdat*/
+
+ CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /*write data, according to the interleave mode (default is not interleaved)*/
+ if (bInterleaveAV == M4OSA_FALSE)
+ {
+ if (bAudio)
+ {
+ for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk;
+ i++ )
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*audio (previously a_dataSize)*/
+ }
+ }
+
+ if (bVideo)
+ {
+ for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk;
+ i++ )
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video (previously a_dataSize)*/
+ }
+ }
+ }
+ else /*in this mode, we have audio and video to interleave*/
+ {
+ for ( i = 0; i <= max(mMp4FileDataPtr->audioTrackPtr->currentChunk,
+ mMp4FileDataPtr->videoTrackPtr->currentChunk); i++ )
+ {
+ if (i <= mMp4FileDataPtr->audioTrackPtr->currentChunk)
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
+ mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*audio (previously a_dataSize)*/
+ }
+
+ if (i <= mMp4FileDataPtr->videoTrackPtr->currentChunk)
+ {
+ CLEANUPonERR(
+ M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
+ mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
+ mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext)); /*video (previously a_dataSize)*/
+ }
+ }
+ }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+ /*skip*/
+
+ CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipHeader,
+ sizeof(BlockSignatureSkipHeader), mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+
+ /* Write embedded string */
+ if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultEmbeddedString,
+ sizeof(BlockSignatureSkipDefaultEmbeddedString),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ }
+ else
+ {
+ CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->embeddedString, 16,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ }
+
+ /* Write ves core version */
+ camcoder_maj = (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion / 100);
+ camcoder_min =
+ (M4OSA_UChar)(( mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj)
+ / 10);
+ camcoder_rev =
+ (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj - 10
+ * camcoder_min);
+
+ CLEANUPonERR(M4MP4W_putByte(' ', mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_maj + '0'),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_min + '0'),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+ CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_rev + '0'),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ /* Write integration tag */
+ CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar *)" -- ", 4,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+ if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
+ {
+ CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultIntegrationTag,
+ sizeof(BlockSignatureSkipDefaultIntegrationTag),
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ }
+ else
+ {
+ CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->integrationTag, 60,
+ mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+ }
+
+#ifndef _M4MP4W_MOOV_FIRST
+ /*overwrite mdat size*/
+
+ if (mMp4FileDataPtr->ftyp.major_brand != 0)
+ mdatPos= 16 + mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4;
+ else
+ mdatPos = 24;
+
+ moovPos = moovPos - mdatPos;
+ mdatSize = moovPos;
+
+ CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->seek(fileWriterContext,
+ M4OSA_kFileSeekBeginning, &mdatPos)); /*seek after ftyp...*/
+ CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
+ fileWriterContext));
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+cleanup:
+
+ /**
+ * Close the file even if an error occured */
+ if (M4OSA_NULL != mMp4FileDataPtr->fileWriterContext)
+ {
+ err2 =
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(mMp4FileDataPtr->
+ fileWriterContext); /**< close the stream anyway */
+
+ if (M4NO_ERROR != err2)
+ {
+ M4OSA_TRACE1_1(
+ "M4MP4W_closeWrite: fileWriterFunctions->closeWrite returns 0x%x",
+ err2);
+ }
+ mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
+ }
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+ /* Remove safety file if still present (here it is cleanup in case of error and NOT the normal
+ removal of the safety file to free emergency space for the moov). */
+
+ if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
+ {
+ M4OSA_Context tempContext;
+ err3 = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
+ mMp4FileDataPtr->safetyFileUrl,
+ M4OSA_kFileWrite | M4OSA_kFileCreate);
+
+ if (M4NO_ERROR != err2)
+ err2 = err3;
+
+ if (M4NO_ERROR
+ != err3) /* No sense closing if we couldn't open in the first place. */
+ {
+ err3 =
+ mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
+
+ if (M4NO_ERROR != err2)
+ err2 = err3;
+ }
+ mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
+ mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
+ }
+
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+ /* Delete embedded string */
+
+ if (M4OSA_NULL != mMp4FileDataPtr->embeddedString)
+ {
+ free(mMp4FileDataPtr->embeddedString);
+ mMp4FileDataPtr->embeddedString = M4OSA_NULL;
+ }
+
+ /* Delete integration tag */
+ if (M4OSA_NULL != mMp4FileDataPtr->integrationTag)
+ {
+ free(mMp4FileDataPtr->integrationTag);
+ mMp4FileDataPtr->integrationTag = M4OSA_NULL;
+ }
+
+ /**
+ * M4MP4W_freeContext() is now a private method, called only from here*/
+ err3 = M4MP4W_freeContext(context);
+
+ if (M4NO_ERROR != err3)
+ {
+ M4OSA_TRACE1_1("M4MP4W_closeWrite: M4MP4W_freeContext returns 0x%x",
+ err3);
+ }
+
+ /**
+ * Choose which error code to return */
+ if (M4NO_ERROR != err)
+ {
+ /**
+ * We give priority to main error */
+ M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err=0x%x", err);
+ return err;
+ }
+ else if (M4NO_ERROR != err2)
+ {
+ /**
+ * Error from closeWrite is returned if there is no main error */
+ M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err2=0x%x", err2);
+ return err2;
+ }
+ else
+ {
+ /**
+ * Error from M4MP4W_freeContext is returned only if there is no main error and
+ no close error */
+ M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err3=0x%x", err3);
+ return err3;
+ }
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getOption( M4OSA_Context context, M4OSA_OptionID option,
+ M4OSA_DataOption *valuePtr )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
+ M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
+ M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
+ /* M4MP4W_WriteCallBack* callBackPtr = M4OSA_NULL;*/
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+ || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+
+ switch( option )
+ {
+ case (M4MP4W_maxAUperChunk):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_maxChunkSize):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (AudioStreamID):
+ if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ streamIDvaluePtr->value =
+ mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+ break;
+
+ case (VideoStreamID):
+ if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ streamIDvaluePtr->value =
+ mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+ break;
+
+ case (0): /*all streams*/
+ streamIDvaluePtr->value = mMp4FileDataPtr->MaxChunkSize;
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ break;
+
+ case (M4MP4W_maxChunkInter):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (0): /*all streams*/
+ streamIDvaluePtr->value = (M4OSA_UInt32)mMp4FileDataPtr->
+ InterleaveDur; /*time conversion !*/
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ case (M4MP4W_embeddedString):
+ memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
+ /*memAddrPtr must have been already allocated by the caller
+ and memAddrPtr->size initialized with the max possible length in bytes*/
+ ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
+ ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
+ /*memAddrPtr->size is updated with the actual size of the string*/
+ memAddrPtr->size = 16;
+ /*if no value was set, return the default string */
+ if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
+ memcpy((void *)memAddrPtr->addr,
+ (void *)mMp4FileDataPtr->embeddedString, 16);
+ else
+ memcpy((void *)memAddrPtr->addr,
+ (void *)BlockSignatureSkipDefaultEmbeddedString,
+ 16);
+ break;
+
+ case (M4MP4W_integrationTag):
+ memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
+ /*memAddrPtr must have been already allocated by the caller
+ and memAddrPtr->size initialized with the max possible length in bytes*/
+ ERR_CHECK(memAddrPtr->size >= 60, M4ERR_PARAMETER);
+ ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
+ /*memAddrPtr->size is updated with the actual size of the string*/
+ memAddrPtr->size = 60;
+ /*if no value was set, return the default string 0 */
+ if (mMp4FileDataPtr->integrationTag != M4OSA_NULL)
+ memcpy((void *)memAddrPtr->addr,
+ (void *)mMp4FileDataPtr->integrationTag, 60);
+ else
+ memcpy((void *)memAddrPtr->addr,
+ (void *)BlockSignatureSkipDefaultIntegrationTag,
+ 60);
+ break;
+
+ case (M4MP4W_CamcoderVersion):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (0): /*all streams*/
+ streamIDvaluePtr->value = mMp4FileDataPtr->camcoderVersion;
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ case (M4MP4W_preWriteCallBack):
+ return M4ERR_NOT_IMPLEMENTED;
+ /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
+ *callBackPtr = mMp4FileDataPtr->PreWriteCallBack;
+ break;*/
+
+ case (M4MP4W_postWriteCallBack):
+ return M4ERR_NOT_IMPLEMENTED;
+ /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
+ *callBackPtr = mMp4FileDataPtr->PostWriteCallBack;
+ break;*/
+
+ case (M4MP4W_maxAUsize):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (AudioStreamID):
+ if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ streamIDvaluePtr->value =
+ mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
+ break;
+
+ case (VideoStreamID):
+ if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ streamIDvaluePtr->value =
+ mMp4FileDataPtr->videoTrackPtr->MaxAUSize;
+ break;
+
+ case (0): /*all streams*/
+ streamIDvaluePtr->value = mMp4FileDataPtr->MaxAUSize;
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ break;
+
+ case (M4MP4W_IOD):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_ESD):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_SDP):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_trackSize):
+ streamIDsizePtr = (M4MP4W_StreamIDsize *)(*valuePtr);
+ streamIDsizePtr->width = mMp4FileDataPtr->videoTrackPtr->width;
+ streamIDsizePtr->height = mMp4FileDataPtr->videoTrackPtr->height;
+ break;
+
+ case (M4MP4W_estimateAudioSize):
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+ streamIDvaluePtr->value =
+ (M4OSA_UInt32)mMp4FileDataPtr->estimateAudioSize;
+ break;
+
+ case (M4MP4W_MOOVfirst):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_V2_MOOF):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_V2_tblCompres):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ default:
+ return M4ERR_BAD_OPTION_ID;
+ }
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_setOption( M4OSA_Context context, M4OSA_OptionID option,
+ M4OSA_DataOption value )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
+ M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
+ M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
+ M4SYS_StreamIDmemAddr *streamIDmemAddrPtr;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ /* Verify state */
+ switch( option )
+ {
+ case M4MP4W_maxFileDuration:
+ case M4MP4W_DSI:
+ /* this param can be set at the end of a recording */
+ ERR_CHECK((mMp4FileDataPtr->state != M4MP4W_closed), M4ERR_STATE);
+ break;
+
+ case M4MP4W_setFtypBox:
+ /* this param can only be set before starting any write */
+ ERR_CHECK(mMp4FileDataPtr->state == M4MP4W_opened, M4ERR_STATE);
+ break;
+
+ default:
+ /* in general params can be set at open or ready stage */
+ ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+ || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+ }
+
+ /* Set option */
+ switch( option )
+ {
+ case (M4MP4W_maxAUperChunk):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_maxChunkSize):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (AudioStreamID):
+ if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+ return
+ M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
+ else
+ {
+ mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+ streamIDvaluePtr->value;
+ }
+
+ break;
+
+ case (VideoStreamID):
+ if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+ return
+ M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
+ else
+ {
+ mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+ streamIDvaluePtr->value;
+ }
+ break;
+
+ case (0): /*all streams*/
+
+ /*In M4MP4W_opened state, no stream is present yet, so only global value
+ needs to be updated.*/
+ mMp4FileDataPtr->MaxChunkSize = streamIDvaluePtr->value;
+
+ if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+ {
+ mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+ streamIDvaluePtr->value;
+ }
+
+ if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+ {
+ mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+ streamIDvaluePtr->value;
+ }
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ case (M4MP4W_maxChunkInter):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (0): /*all streams*/
+ mMp4FileDataPtr->InterleaveDur =
+ (M4MP4W_Time32)streamIDvaluePtr->
+ value; /*time conversion!*/
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ /*not meaningfull to set this parameter on a streamID basis*/
+ }
+ break;
+
+ case (M4MP4W_maxFileSize):
+ mMp4FileDataPtr->MaxFileSize = *(M4OSA_UInt32 *)value;
+ break;
+
+ case (M4MP4W_embeddedString):
+ memAddrPtr = (M4MP4W_memAddr *)value;
+ /*
+ * If memAddrPtr->size > 16 bytes, then the string will be truncated.
+ * If memAddrPtr->size < 16 bytes, then return M4ERR_PARAMETER
+ */
+ ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
+
+ if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
+ {
+ mMp4FileDataPtr->embeddedString =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(16, M4MP4_WRITER,
+ (M4OSA_Char *)"embeddedString");
+ ERR_CHECK(mMp4FileDataPtr->embeddedString != M4OSA_NULL,
+ M4ERR_ALLOC);
+ }
+ /*else, just overwrite the previously set string*/
+ memcpy((void *)mMp4FileDataPtr->embeddedString,
+ (void *)memAddrPtr->addr, 16);
+ break;
+
+ case (M4MP4W_integrationTag):
+ memAddrPtr = (M4MP4W_memAddr *)value;
+ /*
+ * If memAddrPtr->size > 60 bytes, then the string will be truncated.
+ * If memAddrPtr->size < 60 bytes, then pad with 0
+ */
+ if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
+ {
+ mMp4FileDataPtr->integrationTag =
+ (M4OSA_UChar *)M4OSA_32bitAlignedMalloc(60, M4MP4_WRITER,
+ (M4OSA_Char *)"integrationTag");
+ ERR_CHECK(mMp4FileDataPtr->integrationTag != M4OSA_NULL,
+ M4ERR_ALLOC);
+ }
+ /*else, just overwrite the previously set string*/
+ if (memAddrPtr->size < 60)
+ {
+ memcpy((void *)mMp4FileDataPtr->integrationTag,
+ (void *)BlockSignatureSkipDefaultIntegrationTag,
+ 60);
+ memcpy((void *)mMp4FileDataPtr->integrationTag,
+ (void *)memAddrPtr->addr, memAddrPtr->size);
+ }
+ else
+ {
+ memcpy((void *)mMp4FileDataPtr->integrationTag,
+ (void *)memAddrPtr->addr, 60);
+ }
+ break;
+
+ case (M4MP4W_CamcoderVersion):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (0): /*all streams*/
+ mMp4FileDataPtr->camcoderVersion = streamIDvaluePtr->value;
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ /*not meaningfull to set this parameter on a streamID basis*/
+ }
+ break;
+
+ case (M4MP4W_preWriteCallBack):
+ return M4ERR_NOT_IMPLEMENTED;
+ /*mMp4FileDataPtr->PreWriteCallBack = *(M4MP4W_WriteCallBack*)value;
+ break;*/
+
+ case (M4MP4W_postWriteCallBack):
+ return M4ERR_NOT_IMPLEMENTED;
+ /*mMp4FileDataPtr->PostWriteCallBack = *(M4MP4W_WriteCallBack*)value;
+ break;*/
+
+ case (M4MP4W_maxAUsize):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+ switch( streamIDvaluePtr->streamID )
+ {
+ case (AudioStreamID):
+
+ /*if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)*/
+ if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+ streamIDvaluePtr->value;
+ break;
+
+ case (VideoStreamID):
+
+ /*if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)*/
+ if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+ return M4ERR_BAD_STREAM_ID;
+ else
+ mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+ streamIDvaluePtr->value;
+ break;
+
+ case (0): /*all streams*/
+
+ mMp4FileDataPtr->MaxAUSize = streamIDvaluePtr->value;
+
+ if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+ mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+ streamIDvaluePtr->value;
+
+ if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+ mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+ streamIDvaluePtr->value;
+
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ case (M4MP4W_IOD):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_ESD):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_SDP):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_trackSize):
+
+ streamIDsizePtr = (M4MP4W_StreamIDsize *)value;
+
+ if ((streamIDsizePtr->streamID != VideoStreamID)
+ || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
+ return M4ERR_BAD_STREAM_ID;
+ else
+ {
+ mMp4FileDataPtr->videoTrackPtr->width = streamIDsizePtr->width;
+ mMp4FileDataPtr->videoTrackPtr->height =
+ streamIDsizePtr->height;
+ }
+ break;
+
+ case (M4MP4W_estimateAudioSize):
+
+ streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+ /*shall not set this option before audio and video streams were added*/
+ /*nonsense to set this option if not in case audio+video*/
+ if ((mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+ || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
+ return M4ERR_STATE;
+
+ mMp4FileDataPtr->estimateAudioSize =
+ (M4OSA_Bool)streamIDvaluePtr->value;
+ break;
+
+ case (M4MP4W_MOOVfirst):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_V2_MOOF):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_V2_tblCompres):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (M4MP4W_maxFileDuration):
+ mMp4FileDataPtr->MaxFileDuration = *(M4OSA_UInt32 *)value;
+ break;
+
+ case (M4MP4W_setFtypBox):
+ {
+ M4OSA_UInt32 size;
+
+ ERR_CHECK(( (M4MP4C_FtypBox *)value)->major_brand != 0,
+ M4ERR_PARAMETER);
+
+ /* Copy structure */
+ mMp4FileDataPtr->ftyp = *(M4MP4C_FtypBox *)value;
+
+ /* Update global position variables with the difference between common and
+ user block */
+ size =
+ mMp4FileDataPtr->ftyp.nbCompatibleBrands * sizeof(M4OSA_UInt32);
+
+ mMp4FileDataPtr->absoluteCurrentPos = 8/*mdat*/ + 16 + size;
+ mMp4FileDataPtr->filesize = 218/*mdat+moov+skip*/ + 16 + size;
+ }
+ break;
+
+ case (M4MP4W_DSI):
+ {
+ streamIDmemAddrPtr = (M4SYS_StreamIDmemAddr *)value;
+
+ /* Nested switch! Whee! */
+ switch( streamIDmemAddrPtr->streamID )
+ {
+ case (AudioStreamID):
+ return M4ERR_NOT_IMPLEMENTED;
+
+ case (VideoStreamID):
+
+ /* Protect DSI setting : only once allowed on a given stream */
+
+ switch( mMp4FileDataPtr->videoTrackPtr->
+ CommonData.trackType )
+ {
+ case M4SYS_kH263:
+ if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+ || (M4OSA_NULL
+ != mMp4FileDataPtr->videoTrackPtr->DSI))
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: dsi already set !");
+ return M4ERR_STATE;
+ }
+
+ if ((0 == streamIDmemAddrPtr->size)
+ || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: Bad H263 dsi!");
+ return M4ERR_PARAMETER;
+ }
+
+ /*decoder specific info size is supposed to be always 7
+ bytes long */
+ ERR_CHECK(streamIDmemAddrPtr->size == 7,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamIDmemAddrPtr->size;
+ mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+ != M4OSA_NULL, M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->
+ DSI,
+ (void *)streamIDmemAddrPtr->addr,
+ streamIDmemAddrPtr->size);
+
+ break;
+
+ case M4SYS_kMPEG_4:
+ if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+ || (M4OSA_NULL
+ != mMp4FileDataPtr->videoTrackPtr->DSI))
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: dsi already set !");
+ return M4ERR_STATE;
+ }
+
+ if ((0 == streamIDmemAddrPtr->size)
+ || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: Bad MPEG4 dsi!");
+ return M4ERR_PARAMETER;
+ }
+
+ /*MP4V specific*/
+ ERR_CHECK(streamIDmemAddrPtr->size < 105,
+ M4ERR_PARAMETER);
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamIDmemAddrPtr->size;
+ mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+ != M4OSA_NULL, M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->
+ DSI,
+ (void *)streamIDmemAddrPtr->addr,
+ streamIDmemAddrPtr->size);
+ mMp4FileDataPtr->filesize +=
+ streamIDmemAddrPtr->size;
+
+ break;
+
+ case M4SYS_kH264:
+ if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+ || (M4OSA_NULL
+ != mMp4FileDataPtr->videoTrackPtr->DSI))
+ {
+ /* + H.264 trimming */
+ if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
+ {
+ free(mMp4FileDataPtr->videoTrackPtr->DSI);
+
+ // Do not strip the DSI
+ /* Store the DSI size */
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamIDmemAddrPtr->size;
+ M4OSA_TRACE1_1("M4MP4W_setOption: in set option DSI size =%d"\
+ ,mMp4FileDataPtr->videoTrackPtr->dsiSize);
+ /* Copy the DSI (SPS + PPS) */
+ mMp4FileDataPtr->videoTrackPtr->DSI =
+ (M4OSA_UChar*)M4OSA_32bitAlignedMalloc(
+ streamIDmemAddrPtr->size, M4MP4_WRITER,
+ (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI !=
+ M4OSA_NULL, M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->DSI,
+ (void *)streamIDmemAddrPtr->addr,
+ streamIDmemAddrPtr->size);
+
+ break;
+ /* - H.264 trimming */
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: dsi already set !");
+ return M4ERR_STATE;
+ }
+ }
+
+ if (( 0 == streamIDmemAddrPtr->size)
+ || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+ {
+ M4OSA_TRACE1_0(
+ "M4MP4W_setOption: Bad H264 dsi!");
+ return M4ERR_PARAMETER;
+ }
+
+ /* Store the DSI size */
+ mMp4FileDataPtr->videoTrackPtr->dsiSize =
+ (M4OSA_UInt8)streamIDmemAddrPtr->size;
+
+ /* Copy the DSI (SPS + PPS) */
+ mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+ *)M4OSA_32bitAlignedMalloc(streamIDmemAddrPtr->size,
+ M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+ ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+ != M4OSA_NULL, M4ERR_ALLOC);
+ memcpy(
+ (void *)mMp4FileDataPtr->videoTrackPtr->
+ DSI,
+ (void *)streamIDmemAddrPtr->addr,
+ streamIDmemAddrPtr->size);
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+ }
+ break;
+ /* H.264 Trimming */
+ case M4MP4W_MUL_PPS_SPS:
+ mMp4FileDataPtr->bMULPPSSPS = *(M4OSA_Int8 *)value;
+ /* H.264 Trimming */
+ break;
+
+ default:
+ return M4ERR_BAD_OPTION_ID;
+ }
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getState( M4OSA_Context context, M4MP4W_State *state,
+ M4SYS_StreamID streamID )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ switch( streamID )
+ {
+ case (0):
+ *state = mMp4FileDataPtr->state;
+ break;
+
+ case (AudioStreamID):
+ if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+ {
+ *state = mMp4FileDataPtr->audioTrackPtr->microState;
+ }
+ else
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ case (VideoStreamID):
+ if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+ {
+ *state = mMp4FileDataPtr->videoTrackPtr->microState;
+ }
+ else
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+ break;
+
+ default:
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context context,
+ M4OSA_UInt32 *pCurrentFileSize )
+/*******************************************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+ ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+ ERR_CHECK(pCurrentFileSize != M4OSA_NULL, M4ERR_PARAMETER);
+ *pCurrentFileSize = mMp4FileDataPtr->filesize;
+
+ return err;
+}
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
diff --git a/libvideoeditor/vss/Android.mk b/libvideoeditor/vss/Android.mk
new file mode 100755
index 0000000..1d4ec7f
--- /dev/null
+++ b/libvideoeditor/vss/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles) \ No newline at end of file
diff --git a/libvideoeditor/vss/common/inc/From2iToMono_16.h b/libvideoeditor/vss/common/inc/From2iToMono_16.h
new file mode 100755
index 0000000..c6ffb3f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/From2iToMono_16.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FROM2ITOMONO_16_H_
+#define _FROM2ITOMONO_16_H_
+
+
+void From2iToMono_16( const short *src,
+ short *dst,
+ short n);
+
+/**********************************************************************************/
+
+#endif /* _FROM2ITOMONO_16_H_ */
+
+/**********************************************************************************/
+
diff --git a/libvideoeditor/vss/common/inc/LVM_Types.h b/libvideoeditor/vss/common/inc/LVM_Types.h
new file mode 100755
index 0000000..a28974d
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/LVM_Types.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/****************************************************************************************
+ * @file name: LVM_Types.h
+
+*****************************************************************************************/
+
+/****************************************************************************************/
+/* */
+/* Header file defining the standard LifeVibes types for use in the application layer */
+/* interface of all LifeVibes modules */
+/* */
+/****************************************************************************************/
+
+#ifndef LVM_TYPES_H
+#define LVM_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/****************************************************************************************/
+/* */
+/* definitions */
+/* */
+/****************************************************************************************/
+
+#define LVM_NULL 0 /* NULL pointer */
+
+#define LVM_TRUE 1 /* Booleans */
+#define LVM_FALSE 0
+
+#define LVM_MAXINT_8 127 /* Maximum positive integer size */
+#define LVM_MAXINT_16 32767
+#define LVM_MAXINT_32 2147483647
+#define LVM_MAXENUM 2147483647
+
+#define LVM_MODULEID_MASK 0xFF00 /* Mask to extract the calling module ID
+ from callbackId */
+#define LVM_EVENTID_MASK 0x00FF /* Mask to extract the callback event from
+ callbackId */
+
+/* Memory table*/
+#define LVM_MEMREGION_PERSISTENT_SLOW_DATA 0 /* Offset to the instance memory region */
+#define LVM_MEMREGION_PERSISTENT_FAST_DATA 1 /* Offset to the persistent data memory
+ region */
+#define LVM_MEMREGION_PERSISTENT_FAST_COEF 2 /* Offset to the persistent coefficient
+ memory region */
+#define LVM_MEMREGION_TEMPORARY_FAST 3 /* Offset to temporary memory region */
+
+
+/****************************************************************************************/
+/* */
+/* Basic types */
+/* */
+/****************************************************************************************/
+
+typedef char LVM_CHAR; /* ASCII character */
+
+typedef char LVM_INT8; /* Signed 8-bit word */
+typedef unsigned char LVM_UINT8; /* Unsigned 8-bit word */
+
+typedef short LVM_INT16; /* Signed 16-bit word */
+typedef unsigned short LVM_UINT16; /* Unsigned 16-bit word */
+
+typedef long LVM_INT32; /* Signed 32-bit word */
+typedef unsigned long LVM_UINT32; /* Unsigned 32-bit word */
+
+
+/****************************************************************************************/
+/* */
+/* Standard Enumerated types */
+/* */
+/****************************************************************************************/
+
+/* Operating mode */
+typedef enum
+{
+ LVM_MODE_OFF = 0,
+ LVM_MODE_ON = 1,
+ LVM_MODE_DUMMY = LVM_MAXENUM
+} LVM_Mode_en;
+
+
+/* Format */
+typedef enum
+{
+ LVM_STEREO = 0,
+ LVM_MONOINSTEREO = 1,
+ LVM_MONO = 2,
+ LVM_SOURCE_DUMMY = LVM_MAXENUM
+} LVM_Format_en;
+
+
+/* Word length */
+typedef enum
+{
+ LVM_16_BIT = 0,
+ LVM_32_BIT = 1,
+ LVM_WORDLENGTH_DUMMY = LVM_MAXENUM
+} LVM_WordLength_en;
+
+
+/* LVM sampling rates */
+typedef enum
+{
+ LVM_FS_8000 = 0,
+ LVM_FS_11025 = 1,
+ LVM_FS_12000 = 2,
+ LVM_FS_16000 = 3,
+ LVM_FS_22050 = 4,
+ LVM_FS_24000 = 5,
+ LVM_FS_32000 = 6,
+ LVM_FS_44100 = 7,
+ LVM_FS_48000 = 8,
+ LVM_FS_INVALID = LVM_MAXENUM-1,
+ LVM_FS_DUMMY = LVM_MAXENUM
+} LVM_Fs_en;
+
+
+/* Memory Types */
+typedef enum
+{
+ LVM_PERSISTENT_SLOW_DATA = LVM_MEMREGION_PERSISTENT_SLOW_DATA,
+ LVM_PERSISTENT_FAST_DATA = LVM_MEMREGION_PERSISTENT_FAST_DATA,
+ LVM_PERSISTENT_FAST_COEF = LVM_MEMREGION_PERSISTENT_FAST_COEF,
+ LVM_TEMPORARY_FAST = LVM_MEMREGION_TEMPORARY_FAST,
+ LVM_MEMORYTYPE_DUMMY = LVM_MAXENUM
+} LVM_MemoryTypes_en;
+
+
+/* Memory region definition */
+typedef struct
+{
+ LVM_UINT32 Size; /* Region size in bytes */
+ LVM_MemoryTypes_en Type; /* Region type */
+ void *pBaseAddress; /* Pointer to the region base address */
+} LVM_MemoryRegion_st;
+
+
+/****************************************************************************************/
+/* */
+/* Standard Function Prototypes */
+/* */
+/****************************************************************************************/
+typedef LVM_INT32 (*LVM_Callback)(void *pCallbackData, /* Pointer to the callback
+ data structure */
+ void *pGeneralPurpose, /* General purpose pointer
+ (e.g. to a data structure
+ needed in the callback) */
+ LVM_INT16 GeneralPurpose ); /* General purpose variable
+ (e.g. to be used as callback ID) */
+
+
+/****************************************************************************************/
+/* */
+/* End of file */
+/* */
+/****************************************************************************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* LVM_TYPES_H */
diff --git a/libvideoeditor/vss/common/inc/M4AD_Common.h b/libvideoeditor/vss/common/inc/M4AD_Common.h
new file mode 100755
index 0000000..f6e596d
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AD_Common.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @fil M4AD_Common.h
+ * @brief Audio Shell Decoder common interface declaration
+ * @note This file declares the common interfaces that audio decoder shells must implement
+ ************************************************************************
+*/
+#ifndef __M4AD_COMMON_H__
+#define __M4AD_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+#include "M4OSA_CoreID.h"
+#include "M4DA_Types.h"
+#include "M4TOOL_VersionInfo.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef M4OSA_Void* M4AD_Context;
+
+/**
+ ************************************************************************
+ * enum M4AD_OptionID
+ * @brief This enum defines the Audio decoder options.
+ * @note These options can be read from or written to a decoder via
+ * M4AD_getOption_fct/M4AD_setOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+ /**
+ * Set the flag of presence of protection */
+ M4AD_kOptionID_ProtectionAbsent = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x01),
+
+ /**
+ * Set the number of frames per bloc */
+ M4AD_kOptionID_NbFramePerBloc = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x02),
+
+ /**
+ * Set the AAC decoder user parameters */
+ M4AD_kOptionID_UserParam = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x03),
+
+
+ /**
+ * Get the AAC steam type */
+ M4AD_kOptionID_StreamType = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x10),
+
+ /**
+ * Get the number of used bytes in the latest decode
+ (used only when decoding AAC from ADIF file) */
+ M4AD_kOptionID_UsedBytes = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x11),
+
+ /* Reader Interface */
+ M4AD_kOptionID_3gpReaderInterface = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x012),
+
+ /* Audio Access Unit */
+ M4AD_kOptionID_AudioAU = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x13),
+
+ /* Reader error code */
+ M4AD_kOptionID_GetAudioAUErrCode = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x14),
+
+ /* Number of channels */
+ M4AD_kOptionID_AudioNbChannels = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x15),
+
+ /* Sampling frequency */
+ M4AD_kOptionID_AudioSampFrequency = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x16),
+
+ /* Audio AU CTS */
+ M4AD_kOptionID_AuCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AUDIO, 0x17)
+
+} M4AD_OptionID;
+
+
+
+typedef enum
+{
+ M4_kUnknown = 0, /* Unknown stream type */
+ M4_kAAC, /* M4_kAAC_MAIN or M4_kAAC_LC or M4_kAAC_SSR or M4_kAAC_LTP */
+ M4_kAACplus, /* Decoder type is AAC plus */
+ M4_keAACplus /* Decoder type is enhanced AAC plus */
+} M4_AACType;
+
+/**
+ ************************************************************************
+ * enum M4AD_Type
+ * @brief This enum defines the audio types used to create decoders
+ * @note This enum is used internally by the VPS to identify a currently supported
+ * audio decoder interface. Each decoder is registered with one of this type associated.
+ * When a decoder instance is needed, this type is used to identify
+ * and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+ M4AD_kTypeAMRNB = 0,
+ M4AD_kTypeAMRWB,
+ M4AD_kTypeAAC,
+ M4AD_kTypeMP3,
+ M4AD_kTypePCM,
+ M4AD_kTypeBBMusicEngine,
+ M4AD_kTypeWMA,
+ M4AD_kTypeRMA,
+ M4AD_kTypeADPCM,
+ M4AD_kType_NB /* number of decoders, keep it as last enum entry */
+
+} M4AD_Type ;
+
+
+
+/**
+ ************************************************************************
+ * structure M4AD_Buffer
+ * @brief Structure to describe a buffer
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 m_dataAddress;
+ M4OSA_UInt32 m_bufferSize;
+ int64_t m_timeStampUs;
+} M4AD_Buffer;
+
+/**
+ ************************************************************************
+ * @brief Creates an instance of the decoder
+ * @note Allocates the context
+ *
+ * @param pContext: (OUT) Context of the decoder
+ * @param pStreamHandler: (IN) Pointer to an audio stream description
+ * @param pUserData: (IN) Pointer to User data
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+
+typedef M4OSA_ERR (M4AD_create_fct)(M4AD_Context *pContext,
+ M4_AudioStreamHandler *pStreamHandler, void* pUserData);
+
+
+/**
+ ************************************************************************
+ * @brief Destroys the instance of the decoder
+ * @note After this call the context is invalid
+ *
+ * @param context: (IN) Context of the decoder
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_destroy_fct) (M4AD_Context context);
+
+/**
+ ************************************************************************
+ * @brief Decodes the given audio data
+ * @note Parses and decodes the next audio frame, from the given buffer.
+ * This function changes pInputBufferSize value according to the amount
+ * of data actually read.
+ *
+ * @param context: (IN) Context of the decoder
+ * @param inputBuffer: (IN/OUT)Input Data buffer. It contains at least one audio frame.
+ * The size of the buffer must be updated inside the
+ * function to reflect the size of the actually decoded data.
+ * (e.g. the first frame in pInputBuffer)
+ * @param decodedPCMBuffer: (OUT) Output PCM buffer (decoded data).
+ * @param jumping: (IN) M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_step_fct) (M4AD_Context context, M4AD_Buffer *pInputBuffer,
+ M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping);
+
+/**
+ ************************************************************************
+ * @brief Gets the decoder version
+ * @note The version is given in a M4_VersionInfo structure
+ *
+ * @param pValue: (OUT) Pointer to the version structure
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER The given pointer is null (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_getVersion_fct)(M4_VersionInfo* pVersionInfo);
+
+
+/**
+ ************************************************************************
+ * @brief This function creates the AAC core decoder according to
+ * the stream properties and to the options that may
+ * have been set using M4AD_setOption_fct
+ * @note Creates an instance of the AAC decoder
+ * @note This function is used especially by the AAC decoder
+ *
+ * @param pContext: (IN/OUT) Context of the decoder
+ * @param pStreamHandler: (IN) Pointer to an audio stream description
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_start_fct) (M4AD_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief Reset the instance of the decoder
+ *
+ * @param context: (IN) Context of the decoder
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_reset_fct) (M4AD_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief set en option value of the audio decoder
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) indicates the option to set
+ * @param pValue: (IN) pointer to structure or value (allocated by user)
+ * where option is stored
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_setOption_fct) (M4AD_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief Get en option value of the audio decoder
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) indicates the option to set
+ * @param pValue: (OUT) pointer to structure or value (allocated by user)
+ * where option is stored
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_getOption_fct) (M4AD_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue);
+/**
+ ************************************************************************
+ * structure M4AD_Interface
+ * @brief This structure defines the generic audio decoder interface
+ * @note This structure stores the pointers to functions of one audio decoder type.
+ * The decoder type is one of the M4AD_Type
+ ************************************************************************
+*/
+typedef struct _M4AD_Interface
+{
+
+ M4AD_create_fct* m_pFctCreateAudioDec;
+ M4AD_start_fct* m_pFctStartAudioDec;
+ M4AD_step_fct* m_pFctStepAudioDec;
+ M4AD_getVersion_fct* m_pFctGetVersionAudioDec;
+ M4AD_destroy_fct* m_pFctDestroyAudioDec;
+ M4AD_reset_fct* m_pFctResetAudioDec;
+ M4AD_setOption_fct* m_pFctSetOptionAudioDec;
+ M4AD_getOption_fct* m_pFctGetOptionAudioDec;
+
+} M4AD_Interface;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*__M4AD_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4AD_Null.h b/libvideoeditor/vss/common/inc/M4AD_Null.h
new file mode 100755
index 0000000..78140cd
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AD_Null.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+ * @file M4AD_Null.h
+ * @brief Implementation of the decoder public interface that do nothing
+ * @note This file defines the getInterface function.
+*************************************************************************
+*/
+#ifndef __M4AD_NULL_H__
+#define __M4AD_NULL_H__
+
+#include "M4AD_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType : pointer on an M4AD_Type (allocated by the caller)
+ * that will be filled with the decoder type supported by this decoder
+ * @param pDecoderInterface : address of a pointer that will be set to the interface implemented
+ * by this decoder. The interface is a structure allocated by the
+ * function and must be un-allocated by the caller.
+ *
+ * @return : M4NO_ERROR if OK
+ * M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4AD_NULL_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4AIR_API.h b/libvideoeditor/vss/common/inc/M4AIR_API.h
new file mode 100755
index 0000000..7541362
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AIR_API.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4AIR_API.h
+ * @brief Area of Interest Resizer API
+ * @note
+*************************************************************************
+*/
+#ifndef M4AIR_API_H
+#define M4AIR_API_H
+
+/******************************* INCLUDES *******************************/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Memory.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4Common_types.h"
+
+/************************ M4AIR TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum M4AIR_InputFormatType
+ * @brief The following enumeration lists the different accepted format for the AIR.
+ * To be available, the associated compilation flag must be defined, else,
+ * the AIR will return an error (compilation flag : M4AIR_XXXXXX_FORMAT_SUPPORTED).
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4AIR_kYUV420P,
+ M4AIR_kYUV420AP,
+ M4AIR_kJPG
+}M4AIR_InputFormatType ;
+
+
+/**
+ ******************************************************************************
+ * struct M4AIR_Coordinates
+ * @brief The following structure is used to retrieve X and Y coordinates in a given picture.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 m_x; /**< X coordinate */
+ M4OSA_UInt32 m_y; /**< Y coordinate */
+}M4AIR_Coordinates;
+
+
+/**
+ ******************************************************************************
+ * struct M4AIR_Size
+ * @brief The following structure is used to retrieve the dimension of a given picture area.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 m_width; /**< Width */
+ M4OSA_UInt32 m_height; /**< Height */
+}M4AIR_Size;
+
+
+/**
+ ******************************************************************************
+ * struct M4AIR_Params
+ * @brief The following structure is used to retrieve the parameters needed to get a resized ROI (Region of interest).
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4AIR_Coordinates m_inputCoord; /**< X and Y positionning in the input of the first interesting pixel (top-left) */
+ M4AIR_Size m_inputSize; /**< Size of the interesting area inside input (width and height)*/
+ M4AIR_Size m_outputSize; /**< Size of the output */
+ M4OSA_Bool m_bOutputStripe; /**< Flag to know if we will have to provide output per stripe or not */
+ M4COMMON_Orientation m_outputOrientation; /**< Desired orientation of the AIR output */
+}M4AIR_Params;
+
+
+
+
+/*********************** M4AIR ERRORS DEFINITIONS **********************/
+
+/* This error means that the requested video format is not supported. */
+#define M4ERR_AIR_FORMAT_NOT_SUPPORTED M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000001)
+
+/* This error means that the input or output size is incorrect */
+#define M4ERR_AIR_ILLEGAL_FRAME_SIZE M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000002)
+
+
+
+/********************** M4AIR PUBLIC API DEFINITIONS ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
+ * @brief This function initialize an instance of the AIR.
+ * @param pContext: (IN/OUT) Address of the context to create
+ * @param inputFormat: (IN) input format type.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @brief This function destroys an instance of the AIR component
+ * @param pContext: (IN) Context identifying the instance to destroy
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_STATE: Internal state is incompatible with this function call.
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief This function will configure the AIR.
+ * @note It will set the input and output coordinates and sizes,
+ * and indicates if we will proceed in stripe or not.
+ * In case a M4AIR_get in stripe mode was on going, it will cancel this previous
+ * processing and reset the get process.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param pParams->m_inputCoord: (IN) X,Y coordinates of the first valid pixel in input.
+ * @param pParams->m_inputSize: (IN) input ROI size.
+ * @param pParams->m_outputSize: (IN) output size.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief This function will provide the requested resized area of interest according to
+ * settings provided in M4AIR_configure.
+ * @note In case the input format type is JPEG, input plane(s)
+ * in pIn is not used. In normal mode, dimension specified in output plane(s) structure
+ * must be the same than the one specified in M4AIR_configure. In stripe mode, only
+ * the width will be the same, height will be taken as the stripe height (typically 16).
+ * In normal mode, this function is call once to get the full output picture. In stripe
+ * mode, it is called for each stripe till the whole picture has been retrieved,and
+ * the position of the output stripe in the output picture is internally incremented
+ * at each step.
+ * Any call to M4AIR_configure during stripe process will reset this one to the
+ * beginning of the output picture.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pIn: (IN) Plane structure containing input Plane(s).
+ * @param pOut: (IN/OUT) Plane structure containing output Plane(s).
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut);
+
+
+
+#endif /* M4AIR_API_H */
diff --git a/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h b/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
new file mode 100755
index 0000000..9d710f3
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4AMRR_CoreReader.h
+ * @brief Implementation of AMR parser
+ * @note This file contains the API def. for AMR Parser.
+ ******************************************************************************
+*/
+#ifndef __M4AMR_COREREADER_H__
+#define __M4AMR_COREREADER_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "M4OSA_Types.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_Stream.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4OSA_Time.h"
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ ******************************************************************************
+ * AMR reader Errors & Warnings definition
+ ******************************************************************************
+*/
+#define M4ERR_AMR_INVALID_FRAME_TYPE M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000001)
+#define M4ERR_AMR_NOT_COMPLIANT M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000002)
+
+/**
+ ******************************************************************************
+ * enumeration M4AMRR_State
+ * @brief This enum defines the AMR reader states
+ * @note These states are used internaly, but can be retrieved from outside the reader.
+ ******************************************************************************
+*/
+typedef enum{
+ M4AMRR_kOpening = 0x0100,
+ M4AMRR_kOpened = 0x0101,
+ M4AMRR_kReading = 0x0200,
+ M4AMRR_kReading_nextAU = 0x0201,
+ M4AMRR_kClosed = 0x300
+}M4AMRR_State;
+
+/**
+*******************************************************************************
+* M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+* M4OSA_FileReaderPointer* pFileFunction);
+* @brief M4AMRR_OpenRead parses the meta data of the AMR and allocates data structure
+* @note This function opens the file and creates a context for AMR Parser.
+* - sets context to null if error occured.
+* @param pContext(OUT) : AMR Reader context allocated in the function
+* @param pFileDesscriptor(IN): File descriptor of the input file
+* @param pFileFunction(IN) : pointer to file function for file access
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : pContext and/or pFileDescriptor is NULL
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_FILE_NOT_FOUND : file cannot be found
+* @returns M4AMRR_ERR_AMR_NOT_COMPLIANT : Tthe input is not a AMR file
+* @returns M4OSA_FILE : See OSAL file Spec. for details.
+*******************************************************************************
+*/
+M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+ M4OSA_FileReadPointer* pFileFunction);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
+* @brief Reads the next available stream in the file
+* @note Get the stream description of the stream.
+* - This function assumes that there is only one stream in AMR file.
+* @param Context(IN/OUT) : AMR Reader context
+* @param pStreamDesc(OUT): Description of the next read stream
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+* @returns M4AMRR_WAR_NO_MORE_STREAM : There are no more streams in the file.
+******************************************************************************
+*/
+
+M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
+* @brief Prepares the AMR reading of the specified stream Ids
+* @note This function changes the state of the reader reading.
+* @param Context(IN/OUT) : AMR Reader context
+* @param pStreamIDs(IN) : Array of stream Ids to be prepared.
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+* @returns M4ERR_BAD_STREAM_ID : Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+* @brief Reads the access unit into the providing stream
+* @note This function allocates the memory to dataAddress filed and copied the data.
+* -The Application should not free the dataAddress pointer.
+* @param Context(IN/OUT) : AMR Reader context
+* @param StreamID(IN) : Selects the stream
+* @param pAu(IN/OUT) : Access Unit
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+* @returns M4ERR_BAD_STREAM_ID : Atleast one of the stream Id. does not exist.
+* @returns M4WAR_NO_DATA_YET : there is no enough data on the stream for new access unit
+* @returns M4WAR_END_OF_STREAM : There are no more access unit in the stream
+* @returns M4AMRR_ERR_INVALID_FRAME_TYPE : current frame has no valid frame type.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+* @brief Notify the ARM Reader that application will no longer use "AU"
+* @note This function frees the memory pointed by pAu->dataAddress pointer
+* -Changes the state of the reader back to reading.
+* @param Context(IN/OUT) : AMR Reader context
+* @param StreamID(IN) : Selects the stream
+* @param pAu(IN) : Access Unit
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+* @returns M4ERR_BAD_STREAM_ID : Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+* M4SYS_seekAccessMode seekMode, M4OSA_Time* pObtainCTS);
+* @brief The function seeks the targeted time in the give stream by streamId.
+* @note Each frame is of 20 ms duration,, builds the seek table and points
+* the file pointer to starting for the required AU.
+* @param Context(IN/OUT) : AMR Reader context
+* @param StreamID(IN) : Array of stream IDs.
+* @param time(IN) : targeted time
+* @param seekMode(IN) : Selects the seek mode
+* @param pObtainCTS(OUT) : Returned time nearest to target.
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+* @returns M4ERR_BAD_STREAM_ID : Atleast one of the stream Id. does not exist.
+* @returns M4WAR_INVALID_TIME : time cannot be reached.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+ M4SYS_SeekAccessMode seekMode, M4OSA_Time* pObtainCTS);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
+* @brief AMR reader closes the file
+* @param Context(IN?OUT) : AMR Reader context
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_ALLOC : Memory allocation failed
+* @returns M4ERR_STATE : this function cannot be called in this state.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
+* @brief Gets the current state of the AMR reader
+* @param Context(IN/OUT) : AMR Reader context
+* @param pState(OUT) : Core AMR reader state
+* @param streamId(IN) : Selects the stream 0 for all
+*
+* @returns M4NO_ERROR : There is no error
+* @returns M4ERR_PARAMETER : atleast one parament is NULL
+* @returns M4ERR_BAD_CONTEXT : The provided context is not valid
+* @returns M4ERR_BAD_STREAM_ID : Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AMRR_getVersion (M4_VersionInfo *pVersion)
+ * @brief Gets the current version of the AMR reader
+ * @param version(OUT) : the structure that stores the version numbers
+ *
+ * @returns M4NO_ERROR : There is no error
+ * @returns M4ERR_PARAMETER : version is NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getVersion (M4_VersionInfo *pVersion);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AMRR_getmaxAUsize (M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
+ * @brief Computes the maximum access unit size of a stream
+ *
+ * @param Context (IN) Context of the reader
+ * @param pMaxAuSize (OUT) Maximum Access Unit size in the stream
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: One of the input pointer is M4OSA_NULL (Debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus*/
+#endif /*__M4AMR_COREREADER_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4Common_types.h b/libvideoeditor/vss/common/inc/M4Common_types.h
new file mode 100755
index 0000000..9e6a0fb
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4Common_types.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4Common_Types.h
+ * @brief defines common structures
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef M4COMMON_TYPES_H
+#define M4COMMON_TYPES_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+
+/**
+ ************************************************************************
+ * structure M4COMMON_MetadataType
+ ************************************************************************
+*/
+typedef enum
+{
+ M4COMMON_kUnknownMetaDataType,
+ /* Local files */
+ M4COMMON_kTagID3v1, /**< Metadata from TAG ID3 V1 */
+ M4COMMON_kTagID3v2, /**< Metadata from TAG ID3 V2 */
+ M4COMMON_kASFContentDesc, /**< Metadata from ASF content description */
+
+ M4COMMON_k3GppAssetMovieBox, /**< Metadata from a 3gpp file (movie box) */
+ M4COMMON_k3GppAssetTrackBox, /**< Metadata from a 3gpp file (track box) */
+
+ /* Streaming */
+ M4COMMON_kMetaDataSdpSession, /**< Metadata from an SDP file (Session level) */
+ M4COMMON_kMetaDataSdpAudio, /**< Metadata from an SDP file (media audio level) */
+ M4COMMON_kMetaDataSdpVideo, /**< Metadata from an SDP file (media video level) */
+
+ M4COMMON_kJpegExif /**< EXIF in JPEG */
+} M4COMMON_MetadataType;
+
+/**
+ ************************************************************************
+ * enumeration M4VPS_EncodingFormat
+ * @brief Text encoding format
+ ************************************************************************
+*/
+typedef enum
+{
+ M4COMMON_kEncFormatUnknown = 0, /**< Unknown format */
+ M4COMMON_kEncFormatASCII = 1, /**< ISO-8859-1. Terminated with $00 */
+ M4COMMON_kEncFormatUTF8 = 2, /**< UTF-8 encoded Unicode . Terminated with $00 */
+ M4COMMON_kEncFormatUTF16 = 3 /**< UTF-16 encoded Unicode. Terminated with $00 00 */
+} M4COMMON_EncodingFormat;
+
+/**
+ ************************************************************************
+ * structure M4VPS_String
+ * @brief This structure defines string attribute
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Void* m_pString; /**< Pointer to text */
+ M4OSA_UInt32 m_uiSize; /**< Text size in bytes */
+ M4COMMON_EncodingFormat m_EncodingFormat; /**< Text encoding format */
+
+} M4COMMON_String;
+
+/**
+ ************************************************************************
+ * structure M4COMMON_Buffer
+ * @brief This structure defines generic buffer attribute
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 m_pBuffer; /**< Pointer to buffer */
+ M4OSA_UInt32 m_size; /**< size of buffer in bytes */
+} M4COMMON_Buffer;
+
+typedef enum
+{
+ M4COMMON_kMimeType_NONE,
+ M4COMMON_kMimeType_JPG,
+ M4COMMON_kMimeType_PNG,
+ M4COMMON_kMimeType_BMP, /* bitmap, with header */
+ M4COMMON_kMimeType_RGB24, /* raw RGB 24 bits */
+ M4COMMON_kMimeType_RGB565, /* raw, RGB 16 bits */
+ M4COMMON_kMimeType_YUV420,
+ M4COMMON_kMimeType_MPEG4_IFrame /* RC: to support PV art */
+
+} M4COMMON_MimeType;
+
+/* picture type definition from id3v2 tag*/
+typedef enum
+{
+ M4COMMON_kPicType_Other = 0x00,
+ M4COMMON_kPicType_32_32_Icon = 0x01,
+ M4COMMON_kPicType_Other_Icon = 0x02,
+ M4COMMON_kPicType_FrontCover = 0x03,
+ M4COMMON_kPicType_BackCover = 0x04,
+ M4COMMON_kPicType_LeafletPage = 0x05,
+ M4COMMON_kPicType_Media = 0x06,
+ M4COMMON_kPicType_LeadArtist = 0x07,
+ M4COMMON_kPicType_Artist = 0x08,
+ M4COMMON_kPicType_Conductor = 0x09,
+ M4COMMON_kPicType_Orchestra = 0x0A,
+ M4COMMON_kPicType_Composer = 0x0B,
+ M4COMMON_kPicType_Lyricist = 0x0C,
+ M4COMMON_kPicType_RecordingLocation = 0x0D,
+ M4COMMON_kPicType_DuringRecording = 0x0E,
+ M4COMMON_kPicType_DuringPerformance = 0x0F,
+ M4COMMON_kPicType_MovieScreenCapture = 0x10,
+ M4COMMON_kPicType_BrightColouredFish = 0x11,
+ M4COMMON_kPicType_Illustration = 0x12,
+ M4COMMON_kPicType_ArtistLogo = 0x13,
+ M4COMMON_kPicType_StudioLogo = 0x14
+} M4COMMON_PictureType;
+
+/**
+ ******************************************************************************
+ * enum M4COMMON_Orientation
+ * @brief This enum defines the possible orientation of a frame as described
+ * in the EXIF standard.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4COMMON_kOrientationUnknown = 0,
+ M4COMMON_kOrientationTopLeft,
+ M4COMMON_kOrientationTopRight,
+ M4COMMON_kOrientationBottomRight,
+ M4COMMON_kOrientationBottomLeft,
+ M4COMMON_kOrientationLeftTop,
+ M4COMMON_kOrientationRightTop,
+ M4COMMON_kOrientationRightBottom,
+ M4COMMON_kOrientationLeftBottom
+}M4COMMON_Orientation ;
+
+/**
+ ******************************************************************************
+ * structure M4EXIFC_Location
+ * @brief The Image GPS location (example : 48°52.21' )
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Float degrees;
+ M4OSA_Float minsec;
+} M4COMMON_Location;
+
+/**
+ ************************************************************************
+ * structure M4COMMON_MetaDataAlbumArt
+ * @brief This structure defines fields of a album art
+ ************************************************************************
+*/
+typedef struct
+{
+ M4COMMON_MimeType m_mimeType;
+ M4OSA_UInt32 m_uiSize;
+ M4OSA_Void* m_pData;
+
+ M4COMMON_String m_pDescription;
+
+} M4COMMON_MetaDataAlbumArt;
+
+/**
+ ************************************************************************
+ * structure M4COMMON_MetaDataFields
+ * @brief This structure defines fields of metadata information
+ ************************************************************************
+*/
+typedef struct
+{
+ M4COMMON_MetadataType m_MetadataType;
+
+ /* Meta data fields */
+ M4COMMON_String m_pTitle; /**< Title for the media */
+ M4COMMON_String m_pArtist; /**< Performer or artist */
+ M4COMMON_String m_pAlbum; /**< Album title for the media */
+ M4COMMON_String m_pAuthor; /**< Author of the media */
+ M4COMMON_String m_pGenre; /**< Genre (category and style) of the media */
+ M4COMMON_String m_pDescription; /**< Caption or description for the media */
+ M4COMMON_String m_pCopyRights; /**< Notice about organization holding copyright
+ for the media file */
+ M4COMMON_String m_pRecordingYear; /**< Recording year for the media */
+ M4COMMON_String m_pRating; /**< Media rating */
+
+ M4COMMON_String m_pClassification; /**< Classification of the media */
+ M4COMMON_String m_pKeyWords; /**< Media keywords */
+ M4COMMON_String m_pLocation; /**< Location information */
+ M4COMMON_String m_pUrl; /**< Reference of the resource */
+
+ M4OSA_UInt8 m_uiTrackNumber; /**< Track number for the media*/
+ M4OSA_UInt32 m_uiDuration; /**< The track duration in milliseconds */
+
+ M4COMMON_MetaDataAlbumArt m_albumArt; /**< AlbumArt description */
+ M4COMMON_String m_pMood; /**< Mood of the media */
+
+ /**< Modifs ACO 4/12/07 : add Exif specific infos */
+ M4COMMON_String m_pCreationDateTime; /**< date and time original image was generated */
+ M4COMMON_String m_pLastChangeDateTime; /**< file change date and time */
+ M4COMMON_String m_pManufacturer; /**< manufacturer of image input equipment */
+ M4COMMON_String m_pModel; /**< model of image input equipment */
+ M4COMMON_String m_pSoftware; /**< software used */
+ M4COMMON_Orientation m_Orientation; /**< Orientation of the picture */
+
+ /**< Modifs FS 29/08/08 : additionnal Exif infos */
+ M4OSA_UInt32 m_width; /**< image width in pixels */
+ M4OSA_UInt32 m_height; /**< image height in pixels */
+ M4OSA_UInt32 m_thumbnailSize; /**< size of the thumbnail */
+ M4COMMON_String m_pLatitudeRef; /**< Latitude reference */
+ M4COMMON_Location m_latitude; /**< Latitude */
+ M4COMMON_String m_pLongitudeRef; /**< Longitude reference */
+ M4COMMON_Location m_longitude; /**< Longitude */
+
+} M4COMMON_MetaDataFields;
+
+
+#endif /*M4COMMON_TYPES_H*/
+
diff --git a/libvideoeditor/vss/common/inc/M4DA_Types.h b/libvideoeditor/vss/common/inc/M4DA_Types.h
new file mode 100755
index 0000000..58cab7e
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4DA_Types.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4DA_Types.h
+ * @brief Data access type definition
+ * @note This file implements media specific types
+ ************************************************************************
+*/
+
+#ifndef __M4DA_TYPES_H__
+#define __M4DA_TYPES_H__
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /*__cplusplus*/
+
+/**
+ ************************************************************************
+ * enumeration M4_StreamType
+ * @brief Type used to describe a stream (audio or video data flow).
+ ************************************************************************
+*/
+typedef enum
+{
+ M4DA_StreamTypeUnknown = -1, /**< Unknow type */
+ M4DA_StreamTypeVideoMpeg4 = 0, /**< MPEG-4 video */
+ M4DA_StreamTypeVideoH263 = 1, /**< H263 video */
+ M4DA_StreamTypeAudioAmrNarrowBand = 2, /**< Amr narrow band audio */
+ M4DA_StreamTypeAudioAmrWideBand = 3, /**< Amr wide band audio */
+ M4DA_StreamTypeAudioAac = 4, /**< AAC audio */
+ M4DA_StreamTypeAudioMp3 = 5, /**< MP3 audio */
+ M4DA_StreamTypeVideoMJpeg = 6, /**< MJPEG video */
+ M4DA_StreamTypeAudioPcm = 7, /**< Wav audio */
+ M4DA_StreamTypeAudioMidi = 8, /**< Midi audio */
+ M4DA_StreamTypeVideoMpeg4Avc = 9, /**< MPEG-4 AVC video (h264) */
+ M4DA_StreamTypeAudioAacADTS = 10, /**< AAC ADTS audio */
+ M4DA_StreamTypeAudioAacADIF = 11, /**< AAC ADIF audio */
+ M4DA_StreamTypeAudioWma = 12, /**< WMA audio */
+ M4DA_StreamTypeVideoWmv = 13, /**< WMV video */
+ M4DA_StreamTypeAudioReal = 14, /**< REAL audio */
+ M4DA_StreamTypeVideoReal = 15, /**< REAL video */
+ M4DA_StreamTypeAudioEvrc = 16, /**< Evrc audio */
+ M4DA_StreamTypeTimedText = 20, /**< Timed Text */
+ M4DA_StreamTypeAudioBba = 21, /**< Beat Brew audio fomat */
+ M4DA_StreamTypeAudioSmaf = 22, /**< SMAF audio */
+ M4DA_StreamTypeAudioImelody = 23, /**< IMELODY audio*/
+ M4DA_StreamTypeAudioXmf = 24, /**< XMF audio */
+ M4DA_StreamTypeAudioBpc = 25, /**< BPC audio */
+
+ /* ADPCM */
+ M4DA_StreamTypeAudioADPcm = 26, /**< ADPCM */
+
+ M4DA_StreamTypeVideoARGB8888 = 27
+} M4_StreamType;
+
+/**
+ ************************************************************************
+ * structure M4_StreamHandler
+ * @brief Base structure to describe a stream.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4_StreamType m_streamType; /**< Stream type */
+ M4OSA_UInt32 m_streamId; /**< Stream Id (unique number definning
+ the stream) */
+ M4OSA_Int32 m_duration; /**< Duration of the stream in milli
+ seconds */
+ M4OSA_UInt32 m_averageBitRate; /**< Average bitrate in kb/s */
+ M4OSA_UInt32 m_maxAUSize; /**< Maximum size of an Access Unit */
+ M4OSA_UInt8* m_pDecoderSpecificInfo; /**< Pointer on specific information required
+ to create a decoder */
+ M4OSA_UInt32 m_decoderSpecificInfoSize; /**< Size of the specific information
+ pointer above */
+ void* m_pUserData; /**< Pointer on User Data
+ (initialized by the user) */
+ M4OSA_UInt32 m_structSize; /**< Size of the structure in bytes */
+ M4OSA_Bool m_bStreamIsOK; /**< Flag to know if stream has no errors after
+ parsing is finished */
+ M4OSA_UInt8* m_pH264DecoderSpecificInfo; /**< Pointer on specific information
+ required to create a decoder */
+ M4OSA_UInt32 m_H264decoderSpecificInfoSize; /**< Size of the specific
+ information pointer above */
+ // MPEG4 & AAC decoders require ESDS info
+ M4OSA_UInt8* m_pESDSInfo; /**< Pointer on MPEG4 or AAC ESDS box */
+ M4OSA_UInt32 m_ESDSInfoSize; /**< Size of the MPEG4 or AAC ESDS box */
+} M4_StreamHandler;
+
+/**
+ ************************************************************************
+ * structure M4_VideoStreamHandler
+ * @brief Extended structure to describe a video stream.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4_StreamHandler m_basicProperties; /**< Audio-Video stream common parameters */
+ M4OSA_UInt32 m_videoWidth; /**< Width of the video in the stream */
+ M4OSA_UInt32 m_videoHeight; /**< Height of the video in the stream */
+ M4OSA_Float m_averageFrameRate; /**< Average frame rate of the video
+ in the stream */
+ M4OSA_Int32 videoRotationDegrees; /**< Video rotation degree */
+ M4OSA_UInt32 m_structSize; /**< Size of the structure in bytes */
+} M4_VideoStreamHandler;
+
+/**
+ ************************************************************************
+ * structure M4_AudioStreamHandler
+ * @brief Extended structure to describe an audio stream.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4_StreamHandler m_basicProperties; /**< Audio-Video stream common parameters */
+ M4OSA_UInt32 m_nbChannels; /**< Number of channels in the audio stream
+ (1-mono, 2-stereo) */
+ M4OSA_UInt32 m_byteFrameLength; /**< Size of frame samples in bytes */
+ M4OSA_UInt32 m_byteSampleSize; /**< Number of bytes per sample */
+ M4OSA_UInt32 m_samplingFrequency; /**< Sample frequency in kHz */
+ M4OSA_UInt32 m_structSize; /**< Size of the structure in bytes */
+} M4_AudioStreamHandler;
+
+#ifdef M4VPS_SUPPORT_TTEXT
+
+/**
+ ************************************************************************
+ * structure M4_TextStreamHandler
+ * @brief Extended structure to describe a text stream.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4_StreamHandler m_basicProperties; /**< Audio-Video stream common parameters */
+ M4OSA_UInt32 m_trackWidth; /**< Width of the video in the stream */
+ M4OSA_UInt32 m_trackHeight; /**< Height of the video in the stream */
+ M4OSA_UInt32 m_trackXpos; /**< X position of the text track in video area */
+ M4OSA_UInt32 m_trackYpos; /**< Y position of the text track in video area */
+ M4OSA_UInt8 back_col_rgba[4]; /**< the background color in RGBA */
+ M4OSA_UInt16 uiLenght; /**< the string lenght in bytes */
+ M4OSA_UInt32 disp_flag; /**< the way text will be displayed */
+ M4OSA_UInt8 horiz_justif; /**< the horizontal justification of the text */
+ M4OSA_UInt8 verti_justif; /**< the vertical justification of the text */
+ /* style */
+ M4OSA_UInt16 styl_start_char; /**< the first character impacted by style */
+ M4OSA_UInt16 styl_end_char; /**< the last character impacted by style */
+ M4OSA_UInt16 fontID; /**< ID of the font */
+ M4OSA_UInt8 face_style; /**< the text face-style: bold, italic,
+ underlined, plain(default) */
+ M4OSA_UInt8 font_size; /**< size in pixel of font */
+ M4OSA_UInt8 text_col_rgba[4]; /**< the text color in RGBA */
+ /* box */
+ M4OSA_UInt16 box_top; /**< the top position of text box in the track area */
+ M4OSA_UInt16 box_left; /**< the left position of text box in the track area */
+ M4OSA_UInt16 box_bottom; /**< the bottom position of text box in the track area */
+ M4OSA_UInt16 box_right; /**< the right position of text box in the track area */
+ M4OSA_UInt32 m_structSize; /**< Size of the structure in bytes */
+} M4_TextStreamHandler;
+
+#endif /*M4VPS_SUPPORT_TTEXT*/
+
+/**
+ ************************************************************************
+ * structure M4_AccessUnit
+ * @brief Structure to describe an access unit.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 m_streamID; /**< Id of the stream to get an AU from */
+ M4OSA_MemAddr8 m_dataAddress; /**< Pointer to a memory area with the encoded data */
+ M4OSA_UInt32 m_size; /**< Size of the dataAdress area */
+ M4OSA_Double m_CTS; /**< Composition Time Stamp for the Access Unit */
+ M4OSA_Double m_DTS ; /**< Decoded Time Stamp for the Access Unit */
+ M4OSA_UInt8 m_attribute; /**< RAP information & AU corrupted */
+ M4OSA_UInt32 m_maxsize; /**< Maximum size of the AU */
+ M4OSA_UInt32 m_structSize; /**< Structure size */
+} M4_AccessUnit;
+
+#ifdef __cplusplus
+}
+#endif /*__cplusplus*/
+
+#endif /* __M4DA_TYPES_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/M4DECODER_Common.h b/libvideoeditor/vss/common/inc/M4DECODER_Common.h
new file mode 100755
index 0000000..93e3062
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4DECODER_Common.h
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4DECODER_Common.h
+ * @brief Shell Decoder common interface declaration
+ * @note This file declares the common interfaces that decoder shells must implement
+ *
+ ************************************************************************
+*/
+#ifndef __M4DECODER_COMMON_H__
+#define __M4DECODER_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4READER_Common.h"
+#include "M4VIFI_FiltersAPI.h"
+
+#include "M4_Utils.h"
+
+/* ----- Errors and Warnings ----- */
+
+/**
+ * Warning: there is no new decoded frame to render since the last rendering
+ */
+#define M4WAR_VIDEORENDERER_NO_NEW_FRAME M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON, 0x0001)
+/**
+ * Warning: the deblocking filter is not implemented
+ */
+#define M4WAR_DEBLOCKING_FILTER_NOT_IMPLEMENTED M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON,\
+ 0x000002)
+
+
+/* Error: Stream H263 profiles (other than 0) are not supported */
+#define M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED M4OSA_ERR_CREATE(M4_ERR,\
+ M4DECODER_MPEG4, 0x0001)
+/* Error: Stream H263 not baseline not supported (Supported sizes are CIF, QCIF or SQCIF) */
+#define M4ERR_DECODER_H263_NOT_BASELINE M4OSA_ERR_CREATE(M4_ERR,\
+ M4DECODER_MPEG4, 0x0002)
+
+/**
+ ************************************************************************
+ * enum M4DECODER_AVCProfileLevel
+ * @brief This enum defines the AVC decoder profile and level for the current instance
+ * @note This options can be read from decoder via M4DECODER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+ M4DECODER_AVC_kProfile_0_Level_1 = 0,
+ M4DECODER_AVC_kProfile_0_Level_1b,
+ M4DECODER_AVC_kProfile_0_Level_1_1,
+ M4DECODER_AVC_kProfile_0_Level_1_2,
+ M4DECODER_AVC_kProfile_0_Level_1_3,
+ M4DECODER_AVC_kProfile_0_Level_2,
+ M4DECODER_AVC_kProfile_0_Level_2_1,
+ M4DECODER_AVC_kProfile_0_Level_2_2,
+ M4DECODER_AVC_kProfile_0_Level_3,
+ M4DECODER_AVC_kProfile_0_Level_3_1,
+ M4DECODER_AVC_kProfile_0_Level_3_2,
+ M4DECODER_AVC_kProfile_0_Level_4,
+ M4DECODER_AVC_kProfile_0_Level_4_1,
+ M4DECODER_AVC_kProfile_0_Level_4_2,
+ M4DECODER_AVC_kProfile_0_Level_5,
+ M4DECODER_AVC_kProfile_0_Level_5_1,
+ M4DECODER_AVC_kProfile_and_Level_Out_Of_Range = 255
+} M4DECODER_AVCProfileLevel;
+
+/**
+ ************************************************************************
+ * enum M4DECODER_OptionID
+ * @brief This enum defines the decoder options
+ * @note These options can be read from or written to a decoder via M4DECODER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+ /**
+ Get the version of the core decoder
+ */
+ M4DECODER_kOptionID_Version = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x01),
+ /**
+ Get the size of the currently decoded video
+ */
+ M4DECODER_kOptionID_VideoSize = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x02),
+ /**
+ Set the conversion filter to use at rendering
+ */
+ M4DECODER_kOptionID_OutputFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x03),
+ /**
+ Activate the Deblocking filter
+ */
+ M4DECODER_kOptionID_DeblockingFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x04),
+ /**
+ Get nex rendered frame CTS
+ */
+ M4DECODER_kOptionID_NextRenderedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON,\
+ 0x05),
+
+ /**
+ Set the YUV data to the dummy video decoder
+ */
+ M4DECODER_kOptionID_DecYuvData =
+ M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x06),
+ /**
+ Set the YUV data with color effect applied to the dummy video decoder
+ */
+ M4DECODER_kOptionID_YuvWithEffectNonContiguous =
+ M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x07),
+
+ M4DECODER_kOptionID_YuvWithEffectContiguous =
+ M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x08),
+
+ M4DECODER_kOptionID_EnableYuvWithEffect =
+ M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x09),
+
+ /**
+ * Get the supported video decoders and capabilities */
+ M4DECODER_kOptionID_VideoDecodersAndCapabilities =
+ M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x10),
+
+ /* common to MPEG4 decoders */
+ /**
+ * Get the DecoderConfigInfo */
+ M4DECODER_MPEG4_kOptionID_DecoderConfigInfo = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4DECODER_MPEG4, 0x01),
+
+ /* last decoded cts */
+ M4DECODER_kOptionID_AVCLastDecodedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AVC,\
+ 0x01)
+/* Last decoded cts */
+
+} M4DECODER_OptionID;
+
+
+/**
+ ************************************************************************
+ * struct M4DECODER_MPEG4_DecoderConfigInfo
+ * @brief Contains info read from the MPEG-4 VideoObjectLayer.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt8 uiProfile; /**< profile and level as defined in the Visual
+ Object Sequence header, if present */
+ M4OSA_UInt32 uiTimeScale; /**< time scale as parsed in VOL header */
+ M4OSA_UInt8 uiUseOfResynchMarker; /**< Usage of resynchronization marker */
+ M4OSA_Bool bDataPartition; /**< If 1 data partitioning is used. */
+ M4OSA_Bool bUseOfRVLC; /**< Usage of RVLC for the stream */
+
+} M4DECODER_MPEG4_DecoderConfigInfo;
+
+
+/**
+ ***********************************************************************
+ * structure M4DECODER_VideoSize
+ * @brief This structure defines the video size (width and height)
+ * @note This structure is used to retrieve via the M4DECODER_getOption_fct
+ * function the size of the current decoded video
+ ************************************************************************
+*/
+typedef struct _M4DECODER_VideoSize
+{
+ M4OSA_UInt32 m_uiWidth; /**< video width in pixels */
+ M4OSA_UInt32 m_uiHeight; /**< video height in pixels */
+
+} M4DECODER_VideoSize;
+
+/**
+ ************************************************************************
+ * structure M4DECODER_OutputFilter
+ * @brief This structure defines the conversion filter
+ * @note This structure is used to retrieve the filter function
+ * pointer and its user data via the function
+ * M4DECODER_getOption_fct with the option
+ * M4DECODER_kOptionID_OutputFilter
+ ************************************************************************
+*/
+typedef struct _M4DECODER_OutputFilter
+{
+ M4OSA_Void *m_pFilterFunction; /**< pointer to the filter function */
+ M4OSA_Void *m_pFilterUserData; /**< user data of the filter */
+
+} M4DECODER_OutputFilter;
+
+/**
+ ************************************************************************
+ * enum M4DECODER_VideoType
+ * @brief This enum defines the video types used to create decoders
+ * @note This enum is used internally by the VPS to identify a currently supported
+ * video decoder interface. Each decoder is registered with one of this type associated.
+ * When a decoder instance is needed, this type is used to identify and
+ * and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+ M4DECODER_kVideoTypeMPEG4 = 0,
+ M4DECODER_kVideoTypeMJPEG,
+ M4DECODER_kVideoTypeAVC,
+ M4DECODER_kVideoTypeWMV,
+ M4DECODER_kVideoTypeREAL,
+ M4DECODER_kVideoTypeYUV420P,
+
+ M4DECODER_kVideoType_NB /* number of decoders, keep it as last enum entry */
+
+} M4DECODER_VideoType ;
+
+typedef struct {
+ M4OSA_UInt32 mProfile;
+ M4OSA_UInt32 mLevel;
+} VideoProfileLevel;
+
+typedef struct {
+ VideoProfileLevel *profileLevel;
+ M4OSA_UInt32 profileNumber;
+} VideoComponentCapabilities;
+
+typedef struct {
+ M4_StreamType codec;
+ VideoComponentCapabilities *component;
+ M4OSA_UInt32 componentNumber;
+} VideoDecoder;
+
+typedef struct {
+ VideoDecoder *decoder;
+ M4OSA_UInt32 decoderNumber;
+} M4DECODER_VideoDecoders;
+/**
+ ************************************************************************
+ * @brief creates an instance of the decoder
+ * @note allocates the context
+ *
+ * @param pContext: (OUT) Context of the decoder
+ * @param pStreamHandler: (IN) Pointer to a video stream description
+ * @param pGlobalInterface: (IN) Pointer to the M4READER_GlobalInterface structure that must
+ * be used by the decoder to read data from the stream
+ * @param pDataInterface: (IN) Pointer to the M4READER_DataInterface structure that must
+ * be used by the decoder to read data from the stream
+ * @param pAccessUnit (IN) Pointer to an access unit (allocated by the caller)
+ * where the decoded data are stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_create_fct) (M4OSA_Context *pContext,
+ M4_StreamHandler *pStreamHandler,
+ M4READER_GlobalInterface *pGlobalInterface,
+ M4READER_DataInterface *pDataInterface,
+ M4_AccessUnit *pAccessUnit,
+ M4OSA_Void* pUserData);
+
+/**
+ ************************************************************************
+ * @brief destroy the instance of the decoder
+ * @note after this call the context is invalid
+ *
+ * @param context: (IN) Context of the decoder
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_destroy_fct) (M4OSA_Context context);
+
+/**
+ ************************************************************************
+ * @brief get an option value from the decoder
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to retrieve a property value:
+ * -the version number of the decoder
+ * -the size (widthxheight) of the image
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) indicates the option to set
+ * @param pValue: (IN/OUT) pointer to structure or value (allocated by user) where
+ * option is stored
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_getOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief set an option value of the decoder
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to set a property value:
+ * -the conversion filter to use at rendering
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) Identifier indicating the option to set
+ * @param pValue: (IN) Pointer to structure or value (allocated by user)
+ * where option is stored
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_BAD_OPTION_ID The option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_PARAMETER The option parameter is invalid
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_setOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief Decode Access Units up to a target time
+ * @note Parse and decode the stream until it is possible to output a decoded image for which
+ * the composition time is equal or greater to the passed targeted time
+ * The data are read from the reader data interface
+ *
+ * @param context: (IN) Context of the decoder
+ * @param pTime: (IN/OUT) IN: Time to decode up to (in milli secondes)
+ * OUT:Time of the last decoded frame (in ms)
+ * @param bJump: (IN) 0 if no jump occured just before this call
+ * 1 if a a jump has just been made
+ * @param tolerance: (IN) We may decode an earlier frame within the tolerance.
+ * The time difference is specified in milliseconds.
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4WAR_NO_MORE_AU there is no more access unit to decode (end of stream)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_decode_fct) (M4OSA_Context context, M4_MediaTime* pTime,
+ M4OSA_Bool bJump, M4OSA_UInt32 tolerance);
+
+/**
+ ************************************************************************
+ * @brief Renders the video at the specified time.
+ * @note
+ * @param context: (IN) Context of the decoder
+ * @param pTime: (IN/OUT) IN: Time to render to (in milli secondes)
+ * OUT:Time of the actually rendered frame (in ms)
+ * @param pOutputPlane:(OUT) Output plane filled with decoded data (converted)
+ * @param bForceRender:(IN) 1 if the image must be rendered even it has already been
+ * 0 if not (in which case the function can return
+ * M4WAR_VIDEORENDERER_NO_NEW_FRAME)
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER At least one parameter is not properly set
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC There is no more available memory
+ * @return M4WAR_VIDEORENDERER_NO_NEW_FRAME If the frame to render has already been rendered
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4DECODER_render_fct) (M4OSA_Context context, M4_MediaTime* pTime,
+ M4VIFI_ImagePlane* pOutputPlane,
+ M4OSA_Bool bForceRender);
+
+/**
+ ************************************************************************
+ * structure M4DECODER_VideoInterface
+ * @brief This structure defines the generic video decoder interface
+ * @note This structure stores the pointers to functions of one video decoder type.
+ * The decoder type is one of the M4DECODER_VideoType
+ ************************************************************************
+*/
+typedef struct _M4DECODER_VideoInterface
+{
+ M4DECODER_create_fct* m_pFctCreate;
+ M4DECODER_destroy_fct* m_pFctDestroy;
+ M4DECODER_getOption_fct* m_pFctGetOption;
+ M4DECODER_setOption_fct* m_pFctSetOption;
+ M4DECODER_decode_fct* m_pFctDecode;
+ M4DECODER_render_fct* m_pFctRender;
+} M4DECODER_VideoInterface;
+
+#endif /*__M4DECODER_COMMON_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4DECODER_Null.h b/libvideoeditor/vss/common/inc/M4DECODER_Null.h
new file mode 100644
index 0000000..047d857
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4DECODER_Null.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4VD_Null.h
+ * @brief Implementation of the a "null" video decoder,i.e. a decoder
+ * that does not do actual decoding.
+ * @note This file defines the getInterface function.
+*************************************************************************
+*/
+#ifndef __M4DECODER_NULL_H__
+#define __M4DECODER_NULL_H__
+
+#include "M4DECODER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType : Pointer to a M4DECODER_VideoType
+ * (allocated by the caller)
+ * that will be filled with the decoder type
+ * @param pDecoderInterface : Address of a pointer that will be set to
+ * the interface implemented by this decoder.
+ * The interface is a structure allocated by
+ * this function and must be freed by the caller.
+ *
+ * @returns : M4NO_ERROR if OK
+ * M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_getInterface( M4DECODER_VideoType *pDecoderType,
+ M4DECODER_VideoInterface **pDecoderInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4DECODER_NULL_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h b/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
new file mode 100755
index 0000000..cba02a0
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4ENCODER_AudioCommon.h
+ * @brief VES audio encoders shell interface.
+ * @note This file defines the types internally used by the VES to abstract audio encoders
+ ******************************************************************************
+*/
+#ifndef __M4ENCODER_AUDIOCOMMON_H__
+#define __M4ENCODER_AUDIOCOMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "M4OSA_OptionID.h" /* for M4OSA_OPTION_ID_CREATE() */
+#include "M4OSA_CoreID.h"
+
+#define M4ENCODER_AUDIO_NB_CHANNELS_MAX 2
+/* WARNING: this value must be equal to the number of samples grabbed */
+//#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 960 /* imposed by the AAC encoder. */
+#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 1024 /* imposed by the AAC encoder. */
+
+
+/**
+ ******************************************************************************
+ * enumeration M4ENCODER_Audio_OptionID
+ * @brief This enum defines the core AAC shell encoder options
+ ******************************************************************************
+*/
+typedef enum
+{
+ /* Maximum generated AU size */
+ M4ENCODER_Audio_maxAUsize = M4OSA_OPTION_ID_CREATE(M4_READ,M4ENCODER_AUDIO, 0x01)
+
+} M4ENCODER_Audio_OptionID;
+
+
+ /**
+ ******************************************************************************
+ * enum M4ENCODER_SamplingFrequency
+ * @brief Thie enum defines the audio sampling frequency.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_k8000Hz = 8000,
+ M4ENCODER_k11025Hz = 11025,
+ M4ENCODER_k12000Hz = 12000,
+ M4ENCODER_k16000Hz = 16000,
+ M4ENCODER_k22050Hz = 22050,
+ M4ENCODER_k24000Hz = 24000,
+ M4ENCODER_k32000Hz = 32000,
+ M4ENCODER_k44100Hz = 44100,
+ M4ENCODER_k48000Hz = 48000
+} M4ENCODER_SamplingFrequency;
+
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_AudioFormat
+ * @brief This enum defines the audio compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kAMRNB = 0,
+ M4ENCODER_kAAC,
+ M4ENCODER_kAudioNULL, /**< No compression */
+ M4ENCODER_kMP3,
+ M4ENCODER_kAudio_NB /* number of encoders, keep it as last enum entry */
+
+} M4ENCODER_AudioFormat;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_ChannelNumber
+ * @brief Thie enum defines the number of audio channels.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kMono = 0,
+ M4ENCODER_kStereo,
+ M4ENCODER_kStereoNoInterleave
+} M4ENCODER_ChannelNumber;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_AudioBitrate
+ * @brief Thie enum defines the avalaible bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kAudio_4_75_KBPS = 4750,
+ M4ENCODER_kAudio_5_15_KBPS = 5150,
+ M4ENCODER_kAudio_5_9_KBPS = 5900,
+ M4ENCODER_kAudio_6_7_KBPS = 6700,
+ M4ENCODER_kAudio_7_4_KBPS = 7400,
+ M4ENCODER_kAudio_7_95_KBPS = 7950,
+ M4ENCODER_kAudio_8_KBPS = 8000,
+ M4ENCODER_kAudio_10_2_KBPS = 10200,
+ M4ENCODER_kAudio_12_2_KBPS = 12200,
+ M4ENCODER_kAudio_16_KBPS = 16000,
+ M4ENCODER_kAudio_24_KBPS = 24000,
+ M4ENCODER_kAudio_32_KBPS = 32000,
+ M4ENCODER_kAudio_40_KBPS = 40000,
+ M4ENCODER_kAudio_48_KBPS = 48000,
+ M4ENCODER_kAudio_56_KBPS = 56000,
+ M4ENCODER_kAudio_64_KBPS = 64000,
+ M4ENCODER_kAudio_80_KBPS = 80000,
+ M4ENCODER_kAudio_96_KBPS = 96000,
+ M4ENCODER_kAudio_112_KBPS = 112000,
+ M4ENCODER_kAudio_128_KBPS = 128000,
+ M4ENCODER_kAudio_144_KBPS = 144000,
+ M4ENCODER_kAudio_160_KBPS = 160000,
+ M4ENCODER_kAudio_192_KBPS = 192000,
+ M4ENCODER_kAudio_224_KBPS = 224000,
+ M4ENCODER_kAudio_256_KBPS = 256000,
+ M4ENCODER_kAudio_320_KBPS = 320000
+} M4ENCODER_AudioBitrate;
+
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_AacRegulation
+ * @brief The current mode of the bitrate regulation.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kAacRegulNone = 0, /**< no bitrate regulation */
+ M4ENCODER_kAacBitReservoir /**< better quality, but more CPU consumed */
+} M4ENCODER_AacRegulation;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_AmrSID
+ * @brief This enum defines the SID of the AMR encoder.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kAmrNoSID = 0 /**< no SID */
+} M4ENCODER_AmrSID;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AacParams
+ * @brief This structure defines all the settings specific to the AAC encoder.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4ENCODER_AacRegulation Regulation;
+ M4OSA_Bool bHighSpeed;
+ M4OSA_Bool bTNS;
+ M4OSA_Bool bPNS;
+ M4OSA_Bool bIS;
+ M4OSA_Bool bMS;
+} M4ENCODER_AacParams;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AudioParams
+ * @brief This structure defines all the settings avalaible when encoding audio.
+ ******************************************************************************
+*/
+typedef struct s_M4ENCODER_AudioParams
+{
+ M4ENCODER_SamplingFrequency Frequency; /**< the sampling frequency */
+ M4ENCODER_ChannelNumber ChannelNum; /**< the numbe of channels (mono, stereo, ..) */
+ M4ENCODER_AudioBitrate Bitrate; /**< bitrate, see enum */
+ M4ENCODER_AudioFormat Format; /**< audio compression format, AMR, AAC ... */
+ union {
+ M4ENCODER_AacParams AacParam;
+ M4ENCODER_AmrSID AmrSID;
+ } SpecifParam; /**< the audio encoder specific parameters */
+} M4ENCODER_AudioParams;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AudioDecSpecificInfo
+ * @brief This structure describes the decoder specific info buffer.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 pInfo; /**< the buffer adress */
+ M4OSA_UInt32 infoSize; /**< the buffer size in bytes */
+} M4ENCODER_AudioDecSpecificInfo;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AudioBuffer
+ * @brief This structure defines the data buffer.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**< the table of buffers (unused buffers are set to NULL) */
+ M4OSA_MemAddr8 pTableBuffer[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
+ /**< the table of the size of corresponding buffer at same index */
+ M4OSA_UInt32 pTableBufferSize[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
+} M4ENCODER_AudioBuffer;
+
+typedef M4OSA_ERR (M4AE_init) (M4OSA_Context* hContext, M4OSA_Void* pUserData);
+typedef M4OSA_ERR (M4AE_cleanUp) (M4OSA_Context pContext);
+typedef M4OSA_ERR (M4AE_open) (M4OSA_Context pContext, M4ENCODER_AudioParams *params,
+ M4ENCODER_AudioDecSpecificInfo *decSpecInfo,
+ M4OSA_Context grabberContext);
+typedef M4OSA_ERR (M4AE_close) (M4OSA_Context pContext);
+typedef M4OSA_ERR (M4AE_step) (M4OSA_Context pContext, M4ENCODER_AudioBuffer *inBuffer,
+ M4ENCODER_AudioBuffer *outBuffer);
+typedef M4OSA_ERR (M4AE_getOption) (M4OSA_Context pContext, M4OSA_OptionID option,
+ M4OSA_DataOption *valuePtr);
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AudioGlobalInterface
+ * @brief Defines all the functions required for an audio encoder shell.
+ ******************************************************************************
+*/
+typedef struct _M4ENCODER_AudioGlobalInterface
+{
+ M4AE_init* pFctInit;
+ M4AE_cleanUp* pFctCleanUp;
+ M4AE_open* pFctOpen;
+ M4AE_close* pFctClose;
+ M4AE_step* pFctStep;
+ M4AE_getOption* pFctGetOption;
+} M4ENCODER_AudioGlobalInterface;
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4ENCODER_AUDIOCOMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_common.h b/libvideoeditor/vss/common/inc/M4ENCODER_common.h
new file mode 100755
index 0000000..9064602
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4ENCODER_common.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4ENCODER_common.h
+ * @note This file defines the types internally used by the VES to abstract encoders
+
+ ******************************************************************************
+*/
+#ifndef __M4ENCODER_COMMON_H__
+#define __M4ENCODER_COMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/**
+ * Video preprocessing common interface */
+#include "M4VPP_API.h"
+
+/**
+ * Writer common interface */
+#include "M4WRITER_common.h"
+
+/* IMAGE STAB */
+/* percentage of image suppressed (computed from the standard dimension).*/
+#define M4ENCODER_STAB_FILTER_CROP_PERCENTAGE 10
+ /* WARNING: take the inferior even dimension, ex: 10% for QCIF output => 192x158 */
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_OpenMode
+ * @brief Definition of open mode for the encoder.
+ * @note DEFAULT : pointer to M4ENCODER_open() which use default parameters
+ * ADVANCED : pointer to M4ENCODER_open_advanced() which allow to customize
+ * various encoding parameters
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_OPEN_DEFAULT,
+ M4ENCODER_OPEN_ADVANCED
+} M4ENCODER_OpenMode;
+
+ /**
+ ******************************************************************************
+ * enum M4ENCODER_FrameRate
+ * @brief Thie enum defines the encoded video framerates.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_k5_FPS,
+ M4ENCODER_k7_5_FPS,
+ M4ENCODER_k10_FPS,
+ M4ENCODER_k12_5_FPS,
+ M4ENCODER_k15_FPS,
+ M4ENCODER_k20_FPS,
+ M4ENCODER_k25_FPS,
+ M4ENCODER_k30_FPS,
+ M4ENCODER_kVARIABLE_FPS, /**< Variable video bitrate */
+ M4ENCODER_kUSE_TIMESCALE /**< Advanced encoding, use timescale indication rather
+ than framerate */
+} M4ENCODER_FrameRate;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_InputFormat
+ * @brief Thie enum defines the video format of the grabbing.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kIYUV420=0, /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
+ M4ENCODER_kIYUV422, /**< YUV422 planar */
+ M4ENCODER_kIYUYV, /**< YUV422 interlaced, luma first */
+ M4ENCODER_kIUYVY, /**< YUV422 interlaced, chroma first */
+ M4ENCODER_kIJPEG, /**< JPEG compressed frames */
+ M4ENCODER_kIRGB444, /**< RGB 12 bits 4:4:4 */
+ M4ENCODER_kIRGB555, /**< RGB 15 bits 5:5:5 */
+ M4ENCODER_kIRGB565, /**< RGB 16 bits 5:6:5 */
+ M4ENCODER_kIRGB24, /**< RGB 24 bits 8:8:8 */
+ M4ENCODER_kIRGB32, /**< RGB 32 bits */
+ M4ENCODER_kIBGR444, /**< BGR 12 bits 4:4:4 */
+ M4ENCODER_kIBGR555, /**< BGR 15 bits 5:5:5 */
+ M4ENCODER_kIBGR565, /**< BGR 16 bits 5:6:5 */
+ M4ENCODER_kIBGR24, /**< BGR 24 bits 8:8:8 */
+ M4ENCODER_kIBGR32 /**< BGR 32 bits */
+} M4ENCODER_InputFormat;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_Format
+ * @brief Thie enum defines the video compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kMPEG4 = 0,
+ M4ENCODER_kH263,
+ M4ENCODER_kH264,
+ M4ENCODER_kJPEG,
+ M4ENCODER_kMJPEG,
+ M4ENCODER_kNULL,
+ M4ENCODER_kYUV420, /**< No compression */
+ M4ENCODER_kYUV422, /**< No compression */
+
+ M4ENCODER_kVideo_NB /* number of decoders, keep it as last enum entry */
+} M4ENCODER_Format;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_FrameWidth
+ * @brief Thie enum defines the avalaible frame Width.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_SQCIF_Width = 128, /**< SQCIF 128x96 */
+ M4ENCODER_QQVGA_Width = 160, /**< QQVGA 160x120 */
+ M4ENCODER_QCIF_Width = 176, /**< QCIF 176x144 */
+ M4ENCODER_QVGA_Width = 320, /**< QVGA 320x240 */
+ M4ENCODER_CIF_Width = 352, /**< CIF 352x288 */
+ M4ENCODER_VGA_Width = 640, /**< VGA 640x480 */
+ M4ENCODER_SVGA_Width = 800, /**< SVGA 800x600 */
+ M4ENCODER_XGA_Width = 1024, /**< XGA 1024x768 */
+ M4ENCODER_XVGA_Width = 1280, /**< XVGA 1280x1024 */
+/* +PR LV5807 */
+ M4ENCODER_WVGA_Width = 800, /**< WVGA 800 x 480 */
+ M4ENCODER_NTSC_Width = 720, /**< NTSC 720 x 480 */
+/* -PR LV5807 */
+
+/* +CR Google */
+ M4ENCODER_640_360_Width = 640, /**< 640x360 */
+ // StageFright encoders require %16 resolution
+ M4ENCODER_854_480_Width = 848, /**< 848x480 */
+ M4ENCODER_1280_720_Width = 1280, /**< 720p 1280x720 */
+ // StageFright encoders require %16 resolution
+ M4ENCODER_1080_720_Width = 1088, /**< 720p 1088x720 */
+ M4ENCODER_960_720_Width = 960, /**< 720p 960x720 */
+ M4ENCODER_1920_1080_Width = 1920 /**< 1080p 1920x1080 */
+/* -CR Google */
+
+} M4ENCODER_FrameWidth;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_FrameHeight
+ * @brief Thie enum defines the avalaible frame Height.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_SQCIF_Height = 96, /**< SQCIF 128x96 */
+ M4ENCODER_QQVGA_Height = 120, /**< QQVGA 160x120 */
+ M4ENCODER_QCIF_Height = 144, /**< QCIF 176x144 */
+ M4ENCODER_QVGA_Height = 240, /**< QVGA 320x240 */
+ M4ENCODER_CIF_Height = 288, /**< CIF 352x288 */
+ M4ENCODER_VGA_Height = 480, /**< VGA 340x480 */
+ M4ENCODER_SVGA_Height = 600, /**< SVGA 800x600 */
+ M4ENCODER_XGA_Height = 768, /**< XGA 1024x768 */
+ M4ENCODER_XVGA_Height = 1024, /**< XVGA 1280x1024 */
+/* +PR LV5807 */
+ M4ENCODER_WVGA_Height = 480, /**< WVGA 800 x 480 */
+ M4ENCODER_NTSC_Height = 480, /**< NTSC 720 x 480 */
+/* -PR LV5807 */
+
+/* +CR Google */
+ M4ENCODER_640_360_Height = 360, /**< 640x360 */
+ M4ENCODER_854_480_Height = 480, /**< 854x480 */
+ M4ENCODER_1280_720_Height = 720, /**< 720p 1280x720 */
+ M4ENCODER_1080_720_Height = 720, /**< 720p 1080x720 */
+ M4ENCODER_960_720_Height = 720, /**< 720p 960x720 */
+ // StageFright encoders require %16 resolution
+ M4ENCODER_1920_1080_Height = 1088 /**< 1080p 1920x1080 */
+/* -CR Google */
+} M4ENCODER_FrameHeight;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_Bitrate
+ * @brief Thie enum defines the avalaible bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_k28_KBPS = 28000,
+ M4ENCODER_k40_KBPS = 40000,
+ M4ENCODER_k64_KBPS = 64000,
+ M4ENCODER_k96_KBPS = 96000,
+ M4ENCODER_k128_KBPS = 128000,
+ M4ENCODER_k192_KBPS = 192000,
+ M4ENCODER_k256_KBPS = 256000,
+ M4ENCODER_k384_KBPS = 384000,
+ M4ENCODER_k512_KBPS = 512000,
+ M4ENCODER_k800_KBPS = 800000
+
+} M4ENCODER_Bitrate;
+
+/* IMAGE STAB */
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_StabMode
+ * @brief The current mode of the stabilization filter.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kStabOff = 0, /**< stabilization filter is disabled */
+ M4ENCODER_kStabCentered, /**< stabilization filter is enabled. */
+ /**< Video input and output must have the same dimensions. Output
+ image will have black borders */
+ M4ENCODER_kStabGrabMore /**< stabilization filter is enabled. */
+ /**< Video input dimensions must be bigger than output. The ratio
+ is indicated by M4ENCODER_STAB_FILTER_CROP_PERCENTAGE */
+
+} M4ENCODER_StabMode;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_FrameMode
+ * @brief Values to drive the encoder behaviour (type of frames produced)
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4ENCODER_kNormalFrame = 0, /**< let the encoder decide which type of frame to encode */
+ M4ENCODER_kLastFrame = 1, /**< force encoder the flush all its buffers because it is
+ last frame */
+ M4ENCODER_kIFrame = 2 /**< force encoder to generate an I frame */
+
+} M4ENCODER_FrameMode;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_Params
+ * @brief This structure defines all the settings avalaible when encoding.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /* Input */
+ M4ENCODER_InputFormat InputFormat; /**< Input video format (grabbing) */
+ M4ENCODER_FrameWidth InputFrameWidth; /**< Input Frame width (grabbing) */
+ M4ENCODER_FrameHeight InputFrameHeight; /**< Input Frame height (grabbing) */
+
+ /* Output */
+ M4ENCODER_FrameWidth FrameWidth; /**< Frame width */
+ M4ENCODER_FrameHeight FrameHeight; /**< Frame height */
+ M4ENCODER_Bitrate Bitrate; /**< Bitrate, see enum */
+ M4ENCODER_FrameRate FrameRate; /**< Framerate, see enum */
+ M4ENCODER_Format Format; /**< Video compression format, H263, MPEG4,
+ MJPEG ... */
+ M4OSA_Int32 videoProfile; /** video profile */
+ M4OSA_Int32 videoLevel; /** video level */
+} M4ENCODER_Params;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_AdvancedParams
+ * @brief This structure defines the advanced settings available for MPEG-4 encoding.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**
+ * Input parameters (grabber coupled with encoder): */
+ M4ENCODER_InputFormat InputFormat; /**< Input video format */
+ M4ENCODER_FrameWidth InputFrameWidth; /**< Input Frame width */
+ M4ENCODER_FrameHeight InputFrameHeight; /**< Input Frame height */
+
+ /**
+ * Common settings for H263 and MPEG-4: */
+ M4ENCODER_FrameWidth FrameWidth; /**< Frame width */
+ M4ENCODER_FrameHeight FrameHeight; /**< Frame height */
+ M4OSA_UInt32 Bitrate; /**< Free value for the bitrate */
+ /**< Framerate (if set to M4ENCODER_kUSE_TIMESCALE use uiRateFactor & uiTimeScale instead) */
+ M4ENCODER_FrameRate FrameRate;
+ /**< Video compression format: H263 or MPEG4 */
+ M4ENCODER_Format Format;
+ M4OSA_Int32 videoProfile; /** output video profile */
+ M4OSA_Int32 videoLevel; /** output video level */
+ M4OSA_UInt32 uiHorizontalSearchRange; /**< Set to 0 will use default value (15) */
+ M4OSA_UInt32 uiVerticalSearchRange; /**< Set to 0 will use default value (15) */
+ /**< Set to 0 will use default value (0x7FFF i.e. let engine decide when to put an I) */
+ M4OSA_UInt32 uiStartingQuantizerValue;
+ /**< Enable if priority is quality, Disable if priority is framerate */
+ M4OSA_Bool bInternalRegulation;
+ /**< Ratio between the encoder frame rate and the actual frame rate */
+ M4OSA_UInt8 uiRateFactor;
+ /**< I frames periodicity, set to 0 will use default value */
+ M4OSA_UInt32 uiIVopPeriod;
+ /**< Motion estimation [default=0 (all tools), disable=8 (no tool)] */
+ M4OSA_UInt8 uiMotionEstimationTools;
+
+ /**
+ * Settings for MPEG-4 only: */
+ M4OSA_UInt32 uiTimeScale; /**< Free value for the timescale */
+ M4OSA_Bool bErrorResilience; /**< Disabled by default */
+ /**< Disabled by default (if enabled, bErrorResilience should be enabled too!) */
+ M4OSA_Bool bDataPartitioning;
+ M4OSA_Bool bAcPrediction; /**< AC prediction [default=1, disable=0] */
+
+} M4ENCODER_AdvancedParams;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_StillPictureParams
+ * @brief This structure defines all the settings avalaible when encoding still
+ * picture.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4ENCODER_FrameWidth FrameWidth; /**< Frame width */
+ M4ENCODER_FrameHeight FrameHeight; /**< Frame height */
+ M4OSA_UInt32 Quality; /**< Bitrate, see enum */
+ M4ENCODER_Format InputFormat; /**< YUV 420 or 422 */
+ M4ENCODER_Format Format; /**< Video compression format, H263, MPEG4,
+ MJPEG ... */
+ M4OSA_Bool PreProcessNeeded; /**< Is the call to the VPP is necessary */
+ M4OSA_Bool EncodingPerStripes; /**< Is encoding per stripes */
+
+} M4ENCODER_StillPictureParams;
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_Header
+ * @brief This structure defines the buffer where the sequence header is put.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 pBuf; /**< Buffer for the header */
+ M4OSA_UInt32 Size; /**< Size of the data */
+
+} M4ENCODER_Header;
+
+/**
+ ******************************************************************************
+ * enum M4ENCODER_OptionID
+ * @brief This enums defines all avalaible options.
+ ******************************************************************************
+*/
+typedef enum
+{
+ /**< set the fragment size, option value is M4OSA_UInt32 type */
+ M4ENCODER_kOptionID_VideoFragmentSize = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+ M4ENCODER_COMMON, 0x01),
+
+ /**< set the stabilization filtering, option value is M4ENCODER_StabMode type */
+ M4ENCODER_kOptionID_ImageStabilization = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+ M4ENCODER_COMMON, 0x02),
+
+ /**< prevent writting of any AU, option value is M4OSA_Bool type */
+ M4ENCODER_kOptionID_InstantStop = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+ M4ENCODER_COMMON, 0x03),
+
+ /**< get the DSI (encoder header) generated by the encoder */
+ M4ENCODER_kOptionID_EncoderHeader = M4OSA_OPTION_ID_CREATE (M4_READ ,\
+ M4ENCODER_COMMON, 0x04),
+/*+ CR LV6775 -H.264 Trimming */
+
+ M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr= M4OSA_OPTION_ID_CREATE (M4_READ ,\
+ M4ENCODER_COMMON, 0x05),
+ M4ENCODER_kOptionID_H264ProcessNALUContext = M4OSA_OPTION_ID_CREATE (M4_READ ,\
+ M4ENCODER_COMMON, 0x06)
+/*-CR LV6775 -H.264 Trimming */
+} M4ENCODER_OptionID;
+
+/*+ CR LV6775 -H.264 Trimming */
+typedef M4OSA_ERR (H264MCS_ProcessEncodedNALU_fct)(M4OSA_Void*ainstance,M4OSA_UInt8* inbuff,
+ M4OSA_Int32 inbuf_size,
+ M4OSA_UInt8 *outbuff, M4OSA_Int32 *outbuf_size);
+//*- CR LV6775 -H.264 Trimming */
+
+typedef M4OSA_Void* M4ENCODER_Context;
+
+typedef M4OSA_ERR (M4ENCODER_init) (
+ M4ENCODER_Context* pContext,
+ M4WRITER_DataInterface* pWriterDataInterface,
+ M4VPP_apply_fct* pVPPfct,
+ M4VPP_Context pVPPctxt,
+ M4OSA_Void* pExternalAPI,
+ M4OSA_Void* pUserData
+);
+
+typedef M4OSA_ERR (M4ENCODER_open) (
+ M4ENCODER_Context pContext,
+ M4SYS_AccessUnit* pAU,
+ M4OSA_Void* pParams /* Can be M4ENCODER_Params, M4ENCODER_AdvancedParams or
+ M4ENCODER_StillPictureParams */
+);
+
+typedef M4OSA_ERR (M4ENCODER_start) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_stop) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_pause) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_resume) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_close) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_cleanup) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_regulBitRate) (M4ENCODER_Context pContext);
+
+typedef M4OSA_ERR (M4ENCODER_encode) (
+ M4ENCODER_Context pContext,
+ M4VIFI_ImagePlane* pInPlane,
+ M4OSA_Double Cts,
+ M4ENCODER_FrameMode FrameMode
+);
+
+typedef M4OSA_ERR (M4ENCODER_setOption) (
+ M4ENCODER_Context pContext,
+ M4OSA_UInt32 optionID,
+ M4OSA_DataOption optionValue
+);
+
+typedef M4OSA_ERR (M4ENCODER_getOption) (
+ M4ENCODER_Context pContext,
+ M4OSA_UInt32 optionID,
+ M4OSA_DataOption optionValue
+);
+
+/**
+ ******************************************************************************
+ * struct M4ENCODER_GlobalInterface
+ * @brief Defines all the functions required for an encoder shell.
+ ******************************************************************************
+*/
+
+typedef struct _M4ENCODER_GlobalInterface
+{
+ M4ENCODER_init* pFctInit;
+ M4ENCODER_open* pFctOpen;
+
+ M4ENCODER_start* pFctStart; /* Grabber mode */
+ M4ENCODER_stop* pFctStop; /* Grabber mode */
+
+ M4ENCODER_pause* pFctPause; /* Grabber mode */
+ M4ENCODER_resume* pFctResume; /* Grabber mode */
+
+ M4ENCODER_close* pFctClose;
+ M4ENCODER_cleanup* pFctCleanup;
+
+ M4ENCODER_regulBitRate* pFctRegulBitRate;
+ M4ENCODER_encode* pFctEncode; /* Standalone mode */
+
+ M4ENCODER_setOption* pFctSetOption;
+ M4ENCODER_getOption* pFctGetOption;
+} M4ENCODER_GlobalInterface;
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4ENCODER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4OSA_CoreID.h b/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
new file mode 100755
index 0000000..7408fc8
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4OSA_CoreID.h
+ * @brief defines the uniques component identifiers used for memory management
+ * and optionID mechanism
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef __M4OSA_COREID_H__
+#define __M4OSA_COREID_H__
+
+/* CoreId are defined on 14 bits */
+/* we start from 0x0100, lower values are reserved for osal core components */
+
+/* reader shells*/
+#define M4READER_COMMON 0x0100
+#define M4READER_AVI 0x0101
+#define M4READER_AMR 0x0102
+#define M4READER_3GP 0x0103
+#define M4READER_NET 0x0104
+#define M4READER_3GP_HTTP 0x0105
+#define M4READER_MP3 0x0106
+#define M4READER_WAV 0x0107
+#define M4READER_MIDI 0x0108
+#define M4READER_ASF 0x0109
+#define M4READER_REAL 0x010A
+#define M4READER_AAC 0x010B
+#define M4READER_FLEX 0x010C
+#define M4READER_BBA 0x010D
+#define M4READER_SYNTHESIS_AUDIO 0x010E
+#define M4READER_JPEG 0x010F
+
+
+/* writer shells*/
+#define M4WRITER_COMMON 0x0110
+#define M4WRITER_AVI 0x0111
+#define M4WRITER_AMR 0x0112
+#define M4WRITER_3GP 0x0113
+#define M4WRITER_JPEG 0x0116
+#define M4WRITER_MP3 0x0117
+
+/* decoder shells */
+#define M4DECODER_COMMON 0x0120
+#define M4DECODER_JPEG 0x0121
+#define M4DECODER_MPEG4 0x0122
+#define M4DECODER_AUDIO 0x0123
+#define M4DECODER_AVC 0x0124
+#define M4DECODER_MIDI 0x0125
+#define M4DECODER_WMA 0x0126
+#define M4DECODER_WMV 0x0127
+#define M4DECODER_RMV 0x0128
+#define M4DECODER_RMA 0x0129
+#define M4DECODER_AAC 0x012A
+#define M4DECODER_BEATBREW 0x012B
+#define M4DECODER_EXTERNAL 0x012C
+
+/* encoder shells */
+#define M4ENCODER_COMMON 0x0130
+#define M4ENCODER_JPEG 0x0131
+#define M4ENCODER_MPEG4 0x0132
+#define M4ENCODER_AUDIO 0x0133
+#define M4ENCODER_VID_NULL 0x0134
+#define M4ENCODER_MJPEG 0x0135
+#define M4ENCODER_MP3 0x0136
+#define M4ENCODER_H264 0x0137
+#define M4ENCODER_AAC 0x0138
+#define M4ENCODER_AMRNB 0x0139
+#define M4ENCODER_AUD_NULL 0x013A
+#define M4ENCODER_EXTERNAL 0x013B
+
+/* cores */
+#define M4JPG_DECODER 0x0140
+#define M4JPG_ENCODER 0x0141
+
+#define M4MP4_DECODER 0x0142
+#define M4MP4_ENCODER 0x0143
+
+#define M4AVI_COMMON 0x0144
+#define M4AVI_READER 0x0145
+#define M4AVI_WRITER 0x0146
+
+#define M4HTTP_ENGINE 0x0147
+
+#define M4OSA_TMPFILE 0x0148
+#define M4TOOL_TIMER 0x0149
+
+#define M4AMR_READER 0x014A
+
+#define M4MP3_READER 0x014B
+
+#define M4WAV_READER 0x014C
+#define M4WAV_WRITER 0x014D
+#define M4WAV_COMMON 0x014E
+
+#define M4ADTS_READER 0x014F
+#define M4ADIF_READER 0x016A
+
+#define M4SPS 0x0150
+#define M4EXIF_DECODER 0x0151
+#define M4EXIF_ENCODER 0x0152
+#define M4GIF_DECODER 0x0153
+#define M4GIF_ENCODER 0x0154
+#define M4PNG_DECODER 0x0155
+#define M4PNG_ENCODER 0x0156
+#define M4WBMP_DECODER 0x0157
+#define M4WBMP_ENCODER 0x0158
+
+#define M4AMR_WRITER 0x0159 /**< no room to put it along M4AMR_READER */
+
+
+#define M4AVC_DECODER 0x015A
+#define M4AVC_ENCODER 0x015B
+
+#define M4ASF_READER 0x015C
+#define M4WMDRM_AGENT 0x015D
+#define M4MIDI_READER 0x0162 /**< no room before the presenters */
+#define M4RM_READER 0x163
+#define M4RMV_DECODER 0x164
+#define M4RMA_DECODER 0x165
+
+#define M4TOOL_XML 0x0166
+#define M4TOOL_EFR 0x0167 /**< Decryption module for Video Artist */
+#define M4IAL_FTN 0x0168 /* FTN implementation of the IAL */
+#define M4FTN 0x0169 /* FTN library */
+
+/* presenter */
+#define M4PRESENTER_AUDIO 0x0160
+#define M4PRESENTER_VIDEO 0x0161
+
+/* high level interfaces (vps, etc..)*/
+#define M4VPS 0x0170
+#define M4VTS 0x0171
+#define M4VXS 0x0172
+#define M4CALLBACK 0x0173
+#define M4VES 0x0174
+#define M4PREPROCESS_VIDEO 0x0175
+#define M4GRAB_AUDIO 0x0176
+#define M4GRAB_VIDEO 0x0177
+#define M4VSSAVI 0x0178
+#define M4VSS3GPP 0x0179
+#define M4PTO3GPP 0x017A
+#define M4PVX_PARSER 0x017B
+#define M4VCS 0x017C
+#define M4MCS 0x017D
+#define M4MNMC 0x0180 /**< mnm controller */
+#define M4TTEXT_PARSER 0x0181 /**< timed text */
+#define M4MM 0x0182 /**< Music manager */
+#define M4MDP 0x0183 /**< Metadata parser */
+#define M4MMSQLCORE 0x0184
+#define M4VPSIL 0x0185
+#define M4FILEIL 0x0186 /* IL file Interface */
+#define M4MU 0x0187
+#define M4VEE 0x0188 /**< Video effect engine */
+#define M4VA 0x0189 /* VideoArtist */
+#define M4JTS 0x018A
+#define M4JTSIL 0x018B
+#define M4AIR 0x018C /**< AIR */
+#define M4SPE 0x018D /**< Still picture editor */
+#define M4VS 0x018E /**< Video Studio (xVSS) */
+#define M4VESIL 0x018F /**< VES il */
+#define M4ID3 0x0190 /**< ID3 Tag Module */
+#define M4SC 0x0191 /**< Media Scanner */
+#define M4TG 0x0192 /**< Thumbnail Generator*/
+#define M4TS 0x0193 /**< Thumbnail storage */
+#define M4MB 0x0194 /**< Media browser */
+
+/* high level application (test or client app) */
+#define M4APPLI 0x0200
+#define M4VA_APPLI 0x0201 /**< Video Artist test application */
+
+/* external components (HW video codecs, etc.) */
+#define M4VD_EXTERNAL 0x0300
+#define M4VE_EXTERNAL 0x0301
+
+
+/* priority to combine with module ids */
+#define M4HIGH_PRIORITY 0xC000
+#define M4MEDIUM_PRIORITY 0x8000
+#define M4LOW_PRIORITY 0x4000
+#define M4DEFAULT_PRIORITY 0x0000
+
+
+#endif /*__M4OSA_COREID_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h b/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
new file mode 100755
index 0000000..6afc50c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4WAV_WavReader.h
+ * @brief WAV Reader declarations
+ * @note This file implements functions of the WAV reader
+ ************************************************************************
+*/
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4TOOL_VersionInfo.h"
+
+
+#define M4PCMC_ERR_PCM_NOT_COMPLIANT M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000001)
+#define M4PCMC_ERR_PCM_NO_SPACE_AVAIL M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000002)
+#define M4PCMC_ERR_PCM_NOT_SUPPORTED M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000003)
+
+#define M4PCMC_WAR_END_OF_STREAM M4OSA_ERR_CREATE(M4_WAR, M4WAV_COMMON ,0x000001)
+
+/**
+ ************************************************************************
+ * structure M4WAVC_DecoderSpecificInfo
+ * @brief This structure defines the decoder Specific informations
+ * @note This structure is used by the WAV reader to store all
+ * decoder specific informations:
+ * - Sample Frequency
+ * - Average Bytes per second
+ * - Number of channels (1 or 2)
+ * - Number of bits per sample (8 or 16)
+ ************************************************************************
+*/
+typedef struct {
+ M4OSA_UInt32 SampleFrequency;
+ M4OSA_UInt32 AvgBytesPerSec;
+ M4OSA_UInt32 DataLength;
+ M4OSA_UInt16 nbChannels;
+ M4OSA_UInt16 BitsPerSample;
+} M4PCMC_DecoderSpecificInfo;
+
+/**
+ ************************************************************************
+ * enum M4WAVR_State
+ * @brief This enum defines the WAV Reader States
+ * @note The state automaton is documented separately
+ * consult the design specification for details
+ ************************************************************************
+*/
+typedef enum {
+ M4PCMR_kInit = 0x0000,
+ M4PCMR_kOpening = 0x0100,
+ M4PCMR_kOpening_streamRetrieved = 0x0101,
+ M4PCMR_kReading = 0x0200,
+ M4PCMR_kReading_nextAU = 0x0201,
+ M4PCMR_kClosed = 0x0300
+} M4PCMR_State;
+
+/**
+ ************************************************************************
+ * enum M4WAVR_OptionID
+ * @brief This enum defines the WAV Reader options
+ * @note Only one option is available:
+ * - M4WAVR_kPCMblockSize: sets the size of the PCM block to read
+ * from WAV file
+ ************************************************************************
+*/
+typedef enum {
+ M4PCMR_kPCMblockSize = M4OSA_OPTION_ID_CREATE(M4_READ, M4WAV_READER, 0x01)
+} M4PCMR_OptionID;
+
+/**
+ ************************************************************************
+ * structure M4WAVR_Context
+ * @brief This structure defines the WAV Reader context
+ * @note This structure is used for all WAV Reader calls to store
+ * the context
+ ************************************************************************
+*/
+typedef struct {
+ M4OSA_MemAddr32 m_pDecoderSpecInfo;/**< Pointer to the decoder specific info
+ structure contained in pStreamDesc
+ (only used to free...) */
+ M4OSA_FileReadPointer* m_pFileReadFunc;/**< The OSAL set of pointer to function for
+ file management */
+ M4OSA_Context m_fileContext; /**< The context needed by OSAL to manage File */
+ M4PCMC_DecoderSpecificInfo m_decoderConfig;/**< Specific configuration for decoder */
+ M4PCMR_State m_state; /**< state of the wav reader */
+ M4PCMR_State m_microState; /**< state of the read wav stream */
+ M4OSA_UInt32 m_blockSize; /**< Size of the read block */
+ M4OSA_UInt32 m_offset; /**< Offset of the PCM read (i.e m_offset of the
+ file without wav header) */
+ M4OSA_MemAddr32 m_pAuBuffer; /**< Re-used buffer for AU content storage */
+ M4OSA_FilePosition m_dataStartOffset;/**< offset of the pcm data beginning into
+ the file */
+} M4PCMR_Context;
+
+/*************************************************************************
+ *
+ * Prototypes of all WAV reader functions
+ *
+ ************************************************************************/
+M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ M4OSA_FileReadPointer* pFileFunction);
+M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc);
+M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs);
+M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
+M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
+M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+ M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS);
+M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context);
+M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ M4OSA_DataOption* pValue);
+M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ M4OSA_DataOption Value);
+M4OSA_ERR M4PCMR_getVersion(M4_VersionInfo *pVersion);
diff --git a/libvideoeditor/vss/common/inc/M4READER_3gpCom.h b/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
new file mode 100755
index 0000000..22a5a03
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4READER_3gpCom.h
+ * @brief Generic encapsulation of the core 3gp reader
+ * @note This file declares the generic shell interface retrieving function
+ * of the 3GP reader
+ ************************************************************************
+*/
+
+#ifndef __M4READER_3GPCOM_H__
+#define __M4READER_3GPCOM_H__
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Error: Function M4READER_Com3GP_getNextStreamHandler must be called before.
+ */
+#define M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000001)
+
+/**
+ * Error: No video stream H263 in file.
+ */
+#define M4ERR_VIDEO_NOT_H263 M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000002)
+
+/**
+ * There has been a problem with the decoder configuration information, seems to be invalid */
+#define M4ERR_READER3GP_DECODER_CONFIG_ERROR M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000003)
+
+#define M4READER_COM3GP_MAXVIDEOSTREAM 5
+#define M4READER_COM3GP_MAXAUDIOSTREAM 5
+#define M4READER_COM3GP_MAXTEXTSTREAM 5
+
+typedef struct
+{
+ M4OSA_Context m_pFFContext; /**< core file format context */
+
+ M4_StreamHandler* m_AudioStreams[M4READER_COM3GP_MAXAUDIOSTREAM];
+ M4_StreamHandler* m_pAudioStream; /**< pointer to the current allocated audio
+ stream handler */
+
+ M4_StreamHandler* m_VideoStreams[M4READER_COM3GP_MAXVIDEOSTREAM];
+ M4_StreamHandler* m_pVideoStream; /**< pointer to the current allocated video
+ stream handler */
+
+#ifdef M4VPS_SUPPORT_TTEXT
+ M4_StreamHandler* m_TextStreams[M4READER_COM3GP_MAXTEXTSTREAM];
+ M4_StreamHandler* m_pTextStream; /**< pointer to the current allocated text
+ stream handler */
+#endif /*M4VPS_SUPPORT_TTEXT*/
+
+} M4READER_Com3GP_Context;
+
+/**
+ ************************************************************************
+ * structure M4READER_3GP_Buffer (but nothing specific to 3GP, nor to a reader !)
+ * @brief This structure defines a buffer that can be used to exchange data (should be in OSAL)
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 size; /**< the size in bytes of the buffer */
+ M4OSA_MemAddr8 dataAddress; /**< the pointer to the buffer */
+} M4READER_3GP_Buffer;
+
+/**
+ ************************************************************************
+ * enum M4READER_3GP_OptionID
+ * @brief This enum defines the reader options specific to the 3GP format.
+ * @note These options can be read from or written to a 3GP reader via M4READER_3GP_getOption.
+ ************************************************************************
+*/
+typedef enum
+{
+ /**
+ * Get the DecoderConfigInfo for H263,
+ * option value must be a pointer to M4READER_3GP_H263Properties allocated by caller */
+ M4READER_3GP_kOptionID_H263Properties = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x01),
+
+ /**
+ * Get the Purple Labs drm information */
+ M4READER_3GP_kOptionID_PurpleLabsDrm = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x02),
+
+ /**
+ * Set the Fast open mode (Only the first AU of each stream will be parsed -> less CPU,
+ less RAM). */
+ M4READER_3GP_kOptionID_FastOpenMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x03),
+
+ /**
+ * Set the Audio only mode (the video stream won't be opened) */
+ M4READER_3GP_kOptionID_AudioOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x04),
+
+ /**
+ * Set the Video only mode (the audio stream won't be opened) */
+ M4READER_3GP_kOptionID_VideoOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x05),
+
+ /**
+ * Get the next video CTS */
+ M4READER_3GP_kOptionID_getNextVideoCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x06)
+
+} M4READER_3GP_OptionID;
+
+
+/**
+ ************************************************************************
+ * struct M4READER_3GP_H263Properties
+ * @brief Contains info about H263 stream read from the 3GP file.
+ ************************************************************************
+*/
+typedef struct
+{
+ /**< the profile as defined in the Visual Object Sequence header, if present */
+ M4OSA_UInt8 uiProfile;
+ /**< the level as defined in the Visual Object Sequence header, if present */
+ M4OSA_UInt8 uiLevel;
+
+} M4READER_3GP_H263Properties;
+
+/**
+ ************************************************************************
+ * @brief Get the next stream found in the 3gp file
+ * @note
+ * @param pContext: (IN) Context of the reader
+ * @param pMediaFamily: (OUT) Pointer to a user allocated M4READER_MediaFamily that will
+ * be filled with the media family of the found stream
+ * @param pStreamHandler: (OUT) Pointer to a stream handler that will be allocated and
+ * filled with the found stream description
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER At least one parameter is not properly set
+ * @return M4WAR_NO_MORE_STREAM No more available stream in the media (all streams found)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_getNextStreamHandler(M4OSA_Context context,
+ M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandler);
+
+/**
+ ************************************************************************
+ * @brief Prepare the access unit (AU)
+ * @note An AU is the smallest possible amount of data to be decoded by a decoder.
+ * @param pContext: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param pAccessUnit (IN/OUT) Pointer to an access unit to fill with read data
+ * (the au structure is allocated by the user, and must
+ * be initialized by calling M4READER_fillAuStruct_fct
+ * after creation)
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER At least one parameter is not properly set
+ * @returns M4ERR_ALLOC Memory allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit);
+
+/**
+ ************************************************************************
+ * @brief Cleans up the stream handler
+ * @param pContext: (IN/OUT) Context of the reader shell
+ * @param pStreamHandler: (IN/OUT) Stream handler
+ * @return M4ERR_PARAMETER: The context is null
+ * @return M4NO_ERROR: No error
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_cleanUpHandler(M4_StreamHandler* pStreamHandler);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4READER_3GPCOM_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Amr.h b/libvideoeditor/vss/common/inc/M4READER_Amr.h
new file mode 100755
index 0000000..b6e7f97
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Amr.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file M4READER_Amr.h
+ * @brief Generic encapsulation of the core amr reader
+ * @note This file declares the generic shell interface retrieving function
+ * of the AMR reader
+ ************************************************************************
+*/
+#ifndef __M4READER_AMR_H__
+#define __M4READER_AMR_H__
+
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType : Pointer on a M4READER_MediaType (allocated by the caller)
+* that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+* implemented by this reader. The interface is a structure allocated
+* by the function and must be un-allocated by the caller.
+* @param pRdrDataInterface : Address of a pointer that will be set to the data interface
+* implemented by this reader. The interface is a structure allocated
+* by the function and must be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR if OK
+* ERR_ALLOC if an allocation failed
+* ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4READER_AMR_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Common.h b/libvideoeditor/vss/common/inc/M4READER_Common.h
new file mode 100755
index 0000000..8863a7e
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Common.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4READER_Common.h
+ * @brief Shell Reader common interface declaration
+ * @note This file declares the common interfaces that reader shells must implement
+ *
+ ************************************************************************
+*/
+#ifndef __M4READER_COMMON_H__
+#define __M4READER_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_CoreID.h"
+#include "M4DA_Types.h"
+#include "M4Common_types.h"
+
+/* ERRORS */
+#define M4ERR_READER_UNKNOWN_STREAM_TYPE M4OSA_ERR_CREATE(M4_ERR, M4READER_COMMON, 0x0001)
+
+/* WARNINGS */
+#define M4WAR_READER_NO_METADATA M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0001)
+#define M4WAR_READER_INFORMATION_NOT_PRESENT M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0002)
+
+
+/**
+ ************************************************************************
+ * enum M4READER_MediaType
+ * @brief This enum defines the Media types used to create media readers
+ * @note This enum is used internally by the VPS to identify a currently supported
+ * media reader interface. Each reader is registered with one of this type associated.
+ * When a reader instance is needed, this type is used to identify and
+ * and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+ M4READER_kMediaTypeUnknown = -1, /**< Unknown media type */
+ M4READER_kMediaType3GPP = 0, /**< 3GPP file media type */
+ M4READER_kMediaTypeAVI = 1, /**< AVI file media type */
+ M4READER_kMediaTypeAMR = 2, /**< AMR file media type */
+ M4READER_kMediaTypeMP3 = 3, /**< MP3 file media type */
+ M4READER_kMediaTypeRTSP = 4, /**< RTSP network accessed media type */
+ M4READER_kMediaType3GPPHTTP = 5, /**< Progressively downloaded 3GPP file media type */
+ M4READER_kMediaTypePVHTTP = 6, /**< Packet Video HTTP proprietary type */
+ M4READER_kMediaTypeWAV = 7, /**< WAV file media type */
+ M4READER_kMediaType3GPEXTHTTP = 8, /**< An external progressively downloaded 3GPP file
+ media type */
+ M4READER_kMediaTypeAAC = 9, /**< ADTS and ADIF AAC support */
+ M4READER_kMediaTypeREAL = 10, /**< REAL Media type */
+ M4READER_kMediaTypeASF = 11, /**< ASF Media type */
+ M4READER_kMediaTypeFLEXTIME = 12, /**< FlexTime Media type */
+ M4READER_kMediaTypeBBA = 13, /**< Beatbrew audio Media type */
+ M4READER_kMediaTypeSYNTHAUDIO = 14, /**< Synthesis audio Media type */
+ M4READER_kMediaTypePCM = 15, /**< PCM Media type */
+ M4READER_kMediaTypeJPEG = 16, /**< JPEG Media type */
+ M4READER_kMediaTypeGIF = 17, /**< GIF Media type */
+ M4READER_kMediaTypeADIF = 18, /**< AAC-ADTS Media type */
+ M4READER_kMediaTypeADTS = 19, /**< AAC-ADTS Media type */
+
+ M4READER_kMediaType_NB /* number of readers, keep it as last enum entry */
+
+} M4READER_MediaType;
+
+/**
+ ************************************************************************
+ * enum M4READER_MediaFamily
+ * @brief This enum defines the Media family of a stream
+ * @note This enum is used internally by the VPS to identify what kind of stream
+ * has been retrieved via getNextStream() function.
+ ************************************************************************
+*/
+typedef enum
+{
+ M4READER_kMediaFamilyUnknown = -1,
+ M4READER_kMediaFamilyVideo = 0,
+ M4READER_kMediaFamilyAudio = 1,
+ M4READER_kMediaFamilyText = 2
+} M4READER_MediaFamily;
+
+
+
+/**
+ ************************************************************************
+ * enum M4READER_OptionID
+ * @brief This enum defines the reader options
+ * @note These options can be read from a reader via M4READER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+ /**
+ Get the duration of the movie (in ms)
+ */
+ M4READER_kOptionID_Duration = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0),
+
+ /**
+ Get the version of the core reader
+ */
+ M4READER_kOptionID_Version = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 1),
+
+ /**
+ Get the copyright from the media (if present)
+ (currently implemented for 3GPP only: copyright get from the cprt atom in the udta if present)
+ */
+ M4READER_kOptionID_Copyright= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 2),
+
+
+ /**
+ Set the OSAL file reader functions to the reader (type of value: M4OSA_FileReadPointer*)
+ */
+ M4READER_kOptionID_SetOsaFileReaderFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 3),
+
+ /**
+ Set the OSAL file writer functions to the reader (type of value: M4OSA_FileWriterPointer*)
+ */
+ M4READER_kOptionID_SetOsaFileWriterFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 4),
+
+ /**
+ Set the OSAL file writer functions to the reader (type of value: M4OSA_NetFunction*)
+ */
+ M4READER_kOptionID_SetOsaNetFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 5),
+
+ /**
+ Creation time in sec. since midnight, Jan. 1, 1970 (type of value: M4OSA_UInt32*)
+ (available only for 3GPP content, including PGD)
+ */
+ M4READER_kOptionID_CreationTime = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 6),
+
+ /**
+ Bitrate in bps (type of value: M4OSA_Double*)
+ */
+ M4READER_kOptionID_Bitrate = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 7),
+
+ /**
+ Tag ID3v1 of MP3 source (type of value: M4MP3R_ID3Tag*)
+ */
+ M4READER_kOptionID_Mp3Id3v1Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 8),
+
+ /**
+ Tag ID3v2 of MP3 source (type of value: M4MP3R_ID3Tag*)
+ */
+ M4READER_kOptionID_Mp3Id3v2Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 9),
+
+ /**
+ Number of Access Unit in the Audio stream (type of value: M4OSA_UInt32*)
+ */
+ M4READER_kOptionID_GetNumberOfAudioAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xA),
+
+ /**
+ Number of frames per bloc
+ */
+ M4READER_kOptionID_GetNbframePerBloc = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 0xB),
+
+ /**
+ Flag for protection presence
+ */
+ M4READER_kOptionID_GetProtectPresence = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 0xC),
+
+ /**
+ Set DRM Context
+ */
+ M4READER_kOptionID_SetDRMContext = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xD),
+
+ /**
+ Get ASF Content Description Object
+ */
+ M4READER_kOptionID_ContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xE),
+
+ /**
+ Get ASF Content Description Object
+ */
+ M4READER_kOptionID_ExtendedContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 0xF),
+
+ /**
+ Get Asset 3gpp Fields
+ */
+ M4READER_kOptionID_3gpAssetFields = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x10),
+
+ /**
+ Set the max metadata size supported in the reader
+ Only relevant in 3gp parser till now, but can be used for other readers
+ */
+ M4READER_kOptionID_MaxMetadataSize = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x11),
+
+ M4READER_kOptionID_GetMetadata = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x12),
+ /**
+ Get 3gpp 'ftyp' atom
+ */
+ M4READER_kOptionID_3gpFtypBox = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x13),
+
+
+ /* value is M4OSA_Bool* */
+ /* return the drm protection status of the file*/
+ M4READER_kOptionID_isProtected = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x14),
+
+ /* value is a void* */
+ /* return the aggregate rights of the file*/
+ /* The buffer must be allocated by the application and must be big enough*/
+ /* By default, the size for WMDRM is 76 bytes */
+ M4READER_kOptionID_getAggregateRights = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x15),
+ /**
+ Get ASF Content Description Object
+ */
+ M4READER_kOptionID_ExtendedContentEncryption = M4OSA_OPTION_ID_CREATE(M4_READ,\
+ M4READER_COMMON, 0x16),
+
+ /**
+ Number of Access Unit in the Video stream (type of value: M4OSA_UInt32*)
+ */
+ M4READER_kOptionID_GetNumberOfVideoAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x17),
+
+ /**
+ Chunk mode activation size in case of JPG reader */
+ M4READER_kOptionID_JpegChunckSize = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x18),
+
+ /**
+ Check if ASF file contains video */
+ M4READER_kOptionID_hasVideo = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x19),
+
+ /**
+ Set specific read mode for Random Access JPEG */
+ M4READER_kOptionID_JpegRAMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x20),
+
+ /**
+ Get Thumbnail buffer in case of JPG reader */
+ M4READER_kOptionID_JpegThumbnail = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x21),
+
+ /**
+ Get FPDATA buffer in case of JPG reader */
+ M4READER_kOptionID_JpegFPData = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x22),
+
+ /**
+ Get JPEG info (progressive, subsampling) */
+ M4READER_kOptionID_JpegInfo= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x23)
+
+
+/*****************************************/
+} M4READER_OptionID;
+/*****************************************/
+
+/**
+ ************************************************************************
+ * structure M4READER_CopyRight
+ * @brief This structure defines a copyRight description
+ * @note This structure is used to retrieve the copyRight of the media
+ * (if present) via the getOption() function
+ ************************************************************************
+*/
+typedef struct _M4READER_CopyRight
+{
+ /**
+ Pointer to copyright data (allocated by user)
+ */
+ M4OSA_UInt8* m_pCopyRight;
+
+ /**
+ Pointer to copyright size. The pCopyRightSize must
+ be Initialized with the size available in the pCopyRight buffer
+ */
+ M4OSA_UInt32 m_uiCopyRightSize;
+
+} M4READER_CopyRight;
+
+
+
+/**
+ ************************************************************************
+ * structure M4READER_StreamDataOption
+ * @brief This structure defines a generic stream data option
+ * @note It is used is used to set or get a stream specific data defined
+ * by a relevant reader option ID.
+ ************************************************************************
+*/
+typedef struct _M4READER_StreamDataOption
+{
+ M4_StreamHandler* m_pStreamHandler; /**< identifier of the stream */
+ M4OSA_Void* m_pOptionValue; /**< value of the data option to get or to set */
+
+} M4READER_StreamDataOption;
+
+/**
+ ************************************************************************
+ * enumeration M4_EncodingFormat
+ * @brief Text encoding format
+ ************************************************************************
+*/
+// typedef enum
+// {
+// M4_kEncFormatUnknown = 0, /**< Unknown format */
+// M4_kEncFormatASCII = 1, /**< ISO-8859-1. Terminated with $00 */
+// M4_kEncFormatUTF8 = 2, /**< UTF-8 encoded Unicode . Terminated with $00 */
+// M4_kEncFormatUTF16 = 3 /**< UTF-16 encoded Unicode. Terminated with $00 00 */
+/*} M4_EncodingFormat;*/
+
+/**
+ ************************************************************************
+ * structure M4_StringAttributes
+ * @brief This structure defines string attribute
+ ************************************************************************
+*/
+// typedef struct
+// {
+// M4OSA_Void* m_pString; /**< Pointer to text */
+// M4OSA_UInt32 m_uiSize; /**< Size of text */
+// M4_EncodingFormat m_EncodingFormat; /**< Text encoding format */
+// } M4_StringAttributes;
+
+
+/**
+ ************************************************************************
+ * structure M4READER_Buffer
+ * @brief This structure defines a buffer in all readers
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt8* m_pData;
+ M4OSA_UInt32 m_uiBufferSize;
+} M4READER_Buffer;
+
+typedef struct
+{
+ M4OSA_UInt32 m_uiSessionId;
+ M4OSA_UInt32 m_uiMediaId;
+ M4OSA_UInt32 m_uiNbInstance;
+ M4OSA_Char** m_pInstance;
+} M4_SdpAssetInstance;
+/*
+typedef enum
+{
+ M4READER_kUnknownFormat = 0,
+ M4READER_kTagID3V1,
+ M4READER_kTagID3V2,
+ M4READER_kASFContentDesc,
+ M4READER_k3GppAssetBoxFromUDTA,
+ M4READER_k3GppAssetBoxFromSDP,
+ M4READER_kJpegExif
+} M4READER_MetaDataType;*/
+
+
+/**
+ ************************************************************************
+ * structure M4_3gpAssetFields
+ * @brief This structure defines fields of a 3gpp asset information
+ ************************************************************************
+*/
+typedef struct
+{
+ M4COMMON_MetaDataFields m_metadata;
+
+ M4OSA_UInt32 m_uiSessionID; /* For SDP */
+ M4OSA_UInt32 m_uiMediaID; /* For SDP */
+
+
+ /* Note: The two following fields were added for internal use
+ (For Music manager project..) !! */
+ M4_StreamType m_VideoStreamType; /**< Video stream type */
+ M4_StreamType m_AudioStreamType; /**< Audio stream type */
+
+} M4_MetaDataFields;
+
+
+#define M4_METADATA_STR_NB 22 /* one string in album art structure*/
+
+typedef struct
+{
+ M4OSA_UInt32 m_uiNbBuffer;
+ M4_SdpAssetInstance* m_pAssetInfoInst; /* Set of 3gpp asset boxes */
+ M4COMMON_MetaDataAlbumArt m_albumArt; /* RC: PV specific album art:added
+ here because this type is used by
+ union below in streaming */
+
+} M4READER_netInfos;
+
+
+typedef union
+{
+ M4READER_Buffer m_pTagID3Buffer[2]; /* Tag ID3 V1, V2 */
+ struct
+ {
+ M4READER_Buffer m_pAsfDescContent; /* ASF description content buffer */
+ M4READER_Buffer m_pAsfExtDescContent; /* ASF extended description content buffer */
+ } m_asf;
+ M4_MetaDataFields m_pMetadataFields; /* Already parsed and filled 3gpp asset fields */
+ M4READER_netInfos m_pAssetInfoInstance; /* Set of 3gpp asset boxes in the sdp file */
+
+} M4_MetadataBuffer;
+
+
+
+
+/*********** READER GLOBAL Interface ************************************/
+
+/**
+ ************************************************************************
+ * @brief create an instance of the reader
+ * @note create the context
+ * @param pContext: (OUT) pointer on a reader context
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC a memory allocation has failed
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_create_fct) (M4OSA_Context* pContext);
+
+/**
+ ************************************************************************
+ * @brief destroy the instance of the reader
+ * @note after this call the context is invalid
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_destroy_fct) (M4OSA_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief open the reader and initializes its created instance
+ * @note this function, for the network reader, sends the DESCRIBE
+ * @param context: (IN) Context of the reader
+ * @param pFileDescriptor: (IN) Pointer to proprietary data identifying the media to open
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_open_fct) (M4OSA_Context context, M4OSA_Void* pFileDescriptor);
+
+
+/**
+ ************************************************************************
+ * @brief close the reader
+ * @note
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_close_fct) (M4OSA_Context context);
+
+
+
+/**
+ ************************************************************************
+ * @brief Get the next stream found in the media
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pMediaFamily: (OUT) pointer to a user allocated M4READER_MediaFamily that will
+ * be filled with the media family of the found stream
+ * @param pStreamHandler: (OUT) pointer to a stream handler that will be allocated and
+ * filled with the found stream description
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4WAR_NO_MORE_STREAM no more available stream in the media (all streams found)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getNextStream_fct) (M4OSA_Context context,
+ M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandler);
+
+
+/**
+ ************************************************************************
+ * @brief fill the access unit structure with initialization values
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler: (IN) pointer to the stream handler to which the access unit
+ * will be associated
+ * @param pAccessUnit: (IN/OUT) pointer to the access unit (allocated by the caller)
+ * to initialize
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_fillAuStruct_fct) (M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit);
+
+/**
+ ************************************************************************
+ * @brief starts the instance of the reader
+ * @note only needed for network until now...
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_start_fct) (M4OSA_Context context);
+
+/**
+ ************************************************************************
+ * @brief stop reading
+ * @note only needed for network until now... (makes a pause)
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_stop_fct) (M4OSA_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief get an option value from the reader
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to retrieve a property value:
+ * -the duration of the longest stream of the media
+ * -the version number of the reader
+ *
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) indicates the option to get
+ * @param pValue: (OUT) pointer to structure or value (allocated by user)
+ * where option is stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getOption_fct) (M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue);
+
+
+/**
+ ************************************************************************
+ * @brief set en option value of the readder
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to set a property value:
+ * - nothing for the moment
+ *
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) indicates the option to set
+ * @param pValue: (IN) pointer to structure or value (allocated by user) where
+ * option is stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_setOption_fct) (M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue);
+
+
+/**
+ ************************************************************************
+ * @brief jump into the stream at the specified time
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) the stream handler of the stream to make jump
+ * @param pTime (IN/OUT) IN: the time to jump to (in ms)
+ * OUT: the time to which the stream really jumped
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_BAD_STREAM_ID the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_jump_fct) (M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler,
+ M4OSA_Int32* pTime);
+
+
+/**
+ ************************************************************************
+ * @brief reset the stream, that is seek it to beginning and make it ready to be read
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to reset
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_BAD_STREAM_ID the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_reset_fct) (M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler);
+
+
+/**
+ ************************************************************************
+ * @brief get the time of the closest RAP access unit before the given time
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) the stream handler of the stream to search
+ * @param pTime (IN/OUT) IN: the time to search from (in ms)
+ * OUT: the time (cts) of the preceding RAP AU.
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_STREAM_ID the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getPrevRapTime_fct) (M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler,
+ M4OSA_Int32* pTime);
+
+
+/**
+ ************************************************************************
+ * structure M4READER_GlobalInterface
+ * @brief This structure defines the generic media reader GLOBAL interface
+ * @note This structure stores the pointers to functions concerning
+ * creation and control of one reader type.
+ * The reader type is one of the M4READER_MediaType
+ ************************************************************************
+*/
+typedef struct _M4READER_GlobalInterface
+/*****************************************/
+{
+ M4READER_create_fct* m_pFctCreate;
+ M4READER_destroy_fct* m_pFctDestroy;
+ M4READER_open_fct* m_pFctOpen;
+ M4READER_close_fct* m_pFctClose;
+ M4READER_getOption_fct* m_pFctGetOption;
+ M4READER_setOption_fct* m_pFctSetOption;
+ M4READER_getNextStream_fct* m_pFctGetNextStream;
+ M4READER_fillAuStruct_fct* m_pFctFillAuStruct;
+ M4READER_start_fct* m_pFctStart;
+ M4READER_stop_fct* m_pFctStop;
+ M4READER_jump_fct* m_pFctJump;
+ M4READER_reset_fct* m_pFctReset;
+ M4READER_getPrevRapTime_fct* m_pFctGetPrevRapTime;
+
+} M4READER_GlobalInterface;
+
+
+/************* READER DATA Interface ************************************/
+
+
+
+/**
+ ************************************************************************
+ * @brief Gets an access unit (AU) from the stream handler source.
+ * @note An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
+ *
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param pAccessUnit (IN/OUT) Pointer to an access unit to fill with read data
+ * (the au structure is allocated by the user, and must be
+ * initialized by calling M4READER_fillAuStruct_fct after
+ * creation)
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @returns M4ERR_ALLOC memory allocation failed
+ * @returns M4ERR_BAD_STREAM_ID at least one of the stream Id. does not exist.
+ * @returns M4WAR_NO_DATA_YET there is no enough data on the stream for new
+ * access unit
+ * @returns M4WAR_NO_MORE_AU there are no more access unit in the stream
+ * (end of stream)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getNextAu_fct)(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit);
+
+
+/**
+ ************************************************************************
+ * structure M4READER_DataInterface
+ * @brief This structure defines the generic media reader DATA interface
+ * @note This structure stores the pointers to functions concerning
+ * data access for one reader type.(those functions are typically called from
+ * a decoder) The reader type is one of the M4READER_MediaType
+ ************************************************************************
+*/
+typedef struct _M4READER_DataInterface
+{
+ M4READER_getNextAu_fct* m_pFctGetNextAu;
+
+ /**
+ stores the context created by the M4READER_create_fct() function
+ so it is accessible without decoder
+ */
+ M4OSA_Context m_readerContext;
+/*****************************************/
+} M4READER_DataInterface;
+/*****************************************/
+
+
+#endif /*__M4READER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Pcm.h b/libvideoeditor/vss/common/inc/M4READER_Pcm.h
new file mode 100755
index 0000000..f0fc857
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Pcm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4READER_Pcm.h
+ * @brief Generic encapsulation of the core wav reader
+ * @note This file declares the generic shell interface retrieving function
+ * of the wav reader
+*************************************************************************
+*/
+#ifndef __M4READER_PCM_H__
+#define __M4READER_PCM_H__
+
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType : Pointer on a M4READER_MediaType (allocated by the caller)
+* that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+* implemented by this reader. The interface is a structure allocated
+* by the function and must be un-allocated by the caller.
+* @param pRdrDataInterface : Address of a pointer that will be set to the data interface
+* implemented by this reader. The interface is a structure allocated
+* by the function and must be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR if OK
+* ERR_ALLOC if an allocation failed
+* ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4READER_PCM_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h b/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
new file mode 100755
index 0000000..f50367c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4SYS_AccessUnit.h
+ * @brief Access unit manipulation
+ * @note This file defines the access unit structure,
+ * and declares functions to manipulate it.
+ ************************************************************************
+*/
+
+#ifndef M4SYS_ACCESSUNIT_H
+#define M4SYS_ACCESSUNIT_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Time.h"
+#include "M4SYS_Stream.h"
+
+/** The attribute of a fragment*/
+typedef enum {
+ M4SYS_kFragAttrOk = 01, /**< The fragment is correct, there is no error
+ (size cannot be 0)*/
+ M4SYS_kFragAttrCorrupted = 02, /**< The fragment is corrupted (there is at least a bit or byte
+ error somewhere in the fragment (size cannot be 0)*/
+ M4SYS_kFragAttrLost = 03 /**< The fragment is lost, so the size must be 0.*/
+} M4SYS_FragAttr;
+
+
+/** A Fragment is a piece of access unit. It can be decoded without decoding the others*/
+typedef struct {
+ M4OSA_MemAddr8 fragAddress; /**< The data pointer. All fragments of the same access unit
+ must be contiguous in memory*/
+ M4OSA_UInt32 size; /**< The size of the fragment. It must be 0 if fragment is
+ flagged 'lost'*/
+ M4SYS_FragAttr isCorrupted; /**< The attribute of this fragment*/
+} M4SYS_Frag;
+
+/**< The attribute of an access unit*/
+typedef M4OSA_UInt8 M4SYS_AU_Attr;
+
+#define AU_Corrupted 0x01 /**< At least one fragment of the access unit is flagged corrupted.*/
+#define AU_P_Frame 0x02 /**< The access unit is a P_frame*/
+#define AU_RAP 0x04 /**< The access unit is a random access point.*/
+
+
+/** An access unit is the smallest piece of data with timing information.*/
+typedef struct {
+ M4SYS_StreamDescription* stream ;
+ M4OSA_MemAddr32 dataAddress; /**< The data pointer. The size of this block
+ (allocated size) must be a 32-bits integer multiple*/
+ M4OSA_UInt32 size; /**< The size in bytes of the dataAddress. The size may
+ not match a 32-bits word boundary.*/
+ M4OSA_Time CTS; /**< The Composition Time Stamp*/
+ M4OSA_Time DTS; /**< The Decoded Time Stamp*/
+ M4SYS_AU_Attr attribute; /**< The attribute of the access unit*/
+ M4OSA_UInt8 nbFrag; /**< The number of fragments. It can be 0 if there is
+ no fragment.*/
+ M4SYS_Frag** frag; /**< An array of 'nbFrag' fragments. It stores the
+ fragments structure. The original definition
+ < of frag has been changed from M4SYS_Frag* frag[]
+ to M4SYS_Frag** frag since the support
+ < of such syntax is only a Microsoft extension of
+ the C compiler. */
+} M4SYS_AccessUnit;
+
+/* Error codes */
+#define M4ERR_AU_NO_MORE_FRAG M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000001)
+#define M4ERR_AU_BUFFER_OVERFLOW M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000002)
+#define M4ERR_AU_BAD_INDEX M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000003)
+#define M4ERR_NOT_ENOUGH_FRAG M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000004)
+
+
+
+#endif /*M4SYS_ACCESSUNIT_H*/
+
diff --git a/libvideoeditor/vss/common/inc/M4SYS_Stream.h b/libvideoeditor/vss/common/inc/M4SYS_Stream.h
new file mode 100755
index 0000000..bab0ce7
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4SYS_Stream.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+
+ ************************************************************************
+ * @file M4SYS_Stream.h
+ * @brief Stream manipulation
+ * @note This file defines the stream structure.
+ ************************************************************************
+*/
+
+#ifndef M4SYS_STREAM_H
+#define M4SYS_STREAM_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Time.h"
+
+typedef M4OSA_UInt32 M4SYS_StreamID;
+
+/** The streamType type provides a way to distinguish all streams (AAC, AMR, YUV420, MPEG-4 Video,
+ H263). Stream types can be sorted in 2 ways:
+@arg Some of them are raw data, others are encoded
+@arg Some of them are related to an audio media, a video media...
+@n So a specific naming convention has been designed to allow a quick parsing of the streamType
+ value to return the above categories. StreamType is an un-signed integer on 16 bits.
+@arg The first byte (MSB) defines the codec type. It can be either Audio,Video, Picture,
+ Text or Scene.
+@arg The second byte (LSB) defines the sub-codecs type (ie YUV420, PCM_16 bits, AMR...).
+ Moreover if this value is greater than 0x80 the stream is a raw stream, else the stream
+ is an encoded one
+@n 0x0000 is a forbidden value, it describes an unknown stream */
+
+typedef enum {
+ M4SYS_kUnknown = 0x0000,
+ /* Stream type definition
+ 0xYYZZ : YY is the codec type (Audio, Video, Picture, Scene ...)
+ ZZ is the sub-codec type (AAC, AMR , ...)
+ if ZZ is greater than 0x80 it is a raw format*/
+
+ /* Audio ones : Range from [0x0100-0x01FF]*/
+ M4SYS_kAudioUnknown = 0x0100,
+ M4SYS_kAAC = 0x0101,
+ M4SYS_kCELP = 0x0102,
+ M4SYS_kAMR = 0x0103,
+ M4SYS_kAMR_WB = 0x0104,
+ M4SYS_kMP3 = 0x0105,
+ M4SYS_kMIDI = 0x0106,
+ M4SYS_kWMA = 0x0107,
+ M4SYS_kREALAUDIO = 0x0108,
+ M4SYS_kEVRC = 0x0109,
+ M4SYS_kPCM_16bitsS = 0x0181, /* PCM 16 bits Signed */
+ M4SYS_kPCM_16bitsU = 0x0182, /* PCM 16 bits Un-signed */
+ M4SYS_kPCM_8bitsU = 0x0183, /* PCM 8 bits Un-signed */
+/* FixAA 2008/03/03 types: M4SYS_kPCM_16bitsS, M4SYS_kPCM_16bitsU and M4SYS_kPCM_8bitsU
+ are now only used by AudioMixer and ReaderAVI => An update is necessary in the future for use
+ type M4SYS_kPCM */
+ M4SYS_kXMF = 0x0184,
+ M4SYS_kSMAF = 0x0185,
+ M4SYS_kIMEL = 0x0186,
+ M4SYS_kBBA = 0x0187,
+ M4SYS_kBPC = 0x0188,
+ M4SYS_kADPCM = 0x0189, /* ADPCM added */
+ M4SYS_kPCM = 0x0190, /* stream type added: PCM; PR2569 fixAA */
+ M4SYS_kAudioAll = 0x01FF, /* all audio streams */
+
+ /* Video ones : Range [0x0200-0x02FF]*/
+ M4SYS_kVideoUnknown = 0x0200,
+ M4SYS_kMPEG_4 = 0x0201,
+ M4SYS_kH263 = 0x0202,
+ M4SYS_kH263pp = 0x0203,
+ M4SYS_kH264 = 0x0204,
+ M4SYS_kREALVIDEO = 0x0205,
+ M4SYS_kYUV420 = 0x0281,
+ M4SYS_kRGB32 = 0x0282,
+ M4SYS_kBGR32 = 0x0283,
+ M4SYS_kRGB24 = 0x0284,
+ M4SYS_kBGR24 = 0x0285,
+ M4SYS_kVideoAll = 0x02FF, /* all video streams */
+
+ /* Picture ones : Range [0x0300-0x03FF]*/
+ M4SYS_kPictureUnknown = 0x0300,
+ M4SYS_kJPEG = 0x0301,
+ M4SYS_kGIF = 0x0302,
+ M4SYS_kBMP = 0x0383,
+ M4SYS_kStillAll = 0x03FF, /* all still picture streams */
+
+ /* Text ones : Range [0x0400-0x04FF]*/
+ M4SYS_kTextUnknown = 0x0400,
+ M4SYS_kTimedText = 0x0401,
+ M4SYS_kUTF8 = 0x0481,
+ M4SYS_kUTF16 = 0x0482,
+ M4SYS_kUCS2 = 0x0483,
+ M4SYS_kTextAll = 0x04FF, /* all text streams */
+
+ /* Scene & Graphics ones : Range [0x0500-0x05FF]*/
+ M4SYS_kSceneUnknown = 0x0500,
+ M4SYS_kSMIL = 0x0501,
+ M4SYS_kBIFS = 0x0502,
+ M4SYS_kSceneAll = 0x05FF, /* all scene streams */
+
+ /* hinted ones : Range [0x0600-0x06FF]*/
+ M4SYS_kHintedUnknown = 0x0600,
+ M4SYS_kRTP = 0x0601,
+ M4SYS_kMPEG2_TS = 0x0602,
+ M4SYS_kHintedAll = 0x06FF, /* all packetized streams */
+
+ /* MPEG-4 system ones : Range [0x0700-0x07FF]*/
+ M4SYS_kSysUnknown = 0x0700,
+ M4SYS_kODS = 0x0701,
+ M4SYS_kIPMP = 0x0702,
+ M4SYS_kOCI = 0x0703,
+ M4SYS_kSysAll = 0x07FF /* all system streams*/
+} M4SYS_StreamType ;
+
+typedef struct {
+ M4SYS_StreamID streamID ;
+ M4OSA_UInt32 value ;
+} M4SYS_StreamIDValue ;
+
+typedef struct {
+ M4SYS_StreamID streamID ;
+ M4OSA_UInt32 size ;
+ M4OSA_MemAddr32 addr ;
+} M4SYS_StreamIDmemAddr ;
+
+/** This strucure defines a set of properties associated to a stream*/
+typedef struct {
+ M4SYS_StreamID streamID; /**< The ID of the stream. It must be unique for a media
+ (ie in a MP4 file, two tracks can not have two times the same ID).
+ 0 is forbidden.*/
+ M4SYS_StreamType streamType; /**< The stream type of the stream*/
+ M4OSA_UInt8 profileLevel; /**< The profile & level of a stream. It is related to the
+ stream type & the definition comes from the standard bodies
+ (i.e. MPEG-4 Video & MPEG-4 Audio). Some values are
+ pre-defined: 0xFE=userPrivate 0xFF=no Profile &
+ Level specified*/
+ M4OSA_UInt32 decoderSpecificInfoSize; /**< The decoder configuration. These bytes are
+ needed to initialise a decoder.*/
+ M4OSA_MemAddr32 decoderSpecificInfo; /**< The size (in bytes) of the decoder specific info.*/
+ M4OSA_UInt32 timeScale; /**< The time scale of the stream. It means that all timing
+ duration of this stream are computed in this timescale
+ (ie timeScale = 8000, means there are 8000 ticks in
+ one second)*/
+ M4OSA_Time duration; /**< The stream duration of this stream. The time unit is the
+ time scale. The value can be set to M4SYS_UnknownTime if
+ the duration is not known.*/
+ M4OSA_Int32 averageBitrate; /**< The average bitrate (in bit per second) of this stream.
+ The average bitrate is computed on the stream duration.
+ -1 value means either there is no average bitrate or no
+ average bitrate is provided.*/
+ M4OSA_Int32 maxBitrate; /**< The maximum bitrate (in bit per second) of this stream.
+ The maximum bitrate is computed on a sliding window of 1
+ second. -1 value means either there is no max. bitrate or
+ no max. bitrate is provided.*/
+} M4SYS_StreamDescription;
+
+typedef enum {
+ M4SYS_kPreviousRAP = 0x01 ,
+ M4SYS_kNextRAP = 0x02 ,
+ M4SYS_kClosestRAP = 0x03 ,
+ M4SYS_kNoRAPprevious = 0x11 ,
+ M4SYS_kNoRAPnext = 0x12 ,
+ M4SYS_kNoRAPclosest = 0x13 ,
+ M4SYS_kBeginning = 0x20
+} M4SYS_SeekAccessMode ;
+
+#endif /*M4SYS_STREAM_H*/
+
+
+
diff --git a/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h b/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
new file mode 100755
index 0000000..7016b8d
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4TOOL_VersionInfo.h
+ * @brief defines a common version information structure
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef __M4TOOL_VERSIONINFO_H__
+#define __M4TOOL_VERSIONINFO_H__
+
+#include "M4OSA_Types.h"
+
+/**
+ * structure M4_VersionInfo
+ * @brief This structure describes version of core component
+ * @note This structure is typically used to retrieve version information
+ * of a component via getOption function
+ */
+typedef struct _M4_VersionInfo
+{
+ M4OSA_UInt32 m_major; /*major version of the component*/
+ M4OSA_UInt32 m_minor; /*minor version of the component*/
+ M4OSA_UInt32 m_revision; /*revision version of the component*/
+
+ /* Structure size */
+ M4OSA_UInt32 m_structSize;
+
+} M4_VersionInfo;
+
+
+#endif /*__M4TOOL_VERSIONINFO_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
new file mode 100755
index 0000000..90bfcb6
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_EXTERNAL_INTERFACE_H__
+#define __M4VD_EXTERNAL_INTERFACE_H__
+
+#include "M4DECODER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ----- DSI bitstream parser ----- */
+
+/* This function is available to clients of the shell to allow them to analyse clips
+(useful for video editing) without having to instanciate a decoder, which can be useful precisely
+if HW decoders are a possibility. */
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(
+ M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
+ M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+ M4DECODER_VideoSize* pVideoSize);
+
+M4OSA_ERR getAVCProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
+
+M4OSA_ERR getH263ProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
+
+M4OSA_ERR getMPEG4ProfileAndLevel(M4OSA_UInt8 profileAndLevel,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_EXTERNAL_INTERFACE_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VD_Tools.h b/libvideoeditor/vss/common/inc/M4VD_Tools.h
new file mode 100755
index 0000000..3ca36ac
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_Tools.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_TOOLS_H__
+#define __M4VD_TOOLS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "NXPSW_CompilerSwitches.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Types.h"
+/* ----- bitstream parser ----- */
+
+typedef struct
+{
+ M4OSA_UInt32 stream_byte;
+ M4OSA_UInt32 stream_index;
+ M4OSA_MemAddr8 in;
+
+} M4VS_Bitstream_ctxt;
+
+M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+ M4OSA_UInt32 nb_bits);
+M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+ M4OSA_MemAddr32 dest_bits,
+ M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_TOOLS_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VFL_transition.h b/libvideoeditor/vss/common/inc/M4VFL_transition.h
new file mode 100755
index 0000000..77f76cb
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VFL_transition.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ /**
+ ******************************************************************************
+ * @file M4TRAN_transition.h
+ * @brief
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef __M4VFL_TRANSITION_H__
+#define __M4VFL_TRANSITION_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef unsigned char UInt8;
+typedef unsigned long UInt32;
+
+typedef struct S_M4ViComImagePlane
+{
+ UInt32 u_width; /* active width, in pixels */
+ UInt32 u_height; /* active height, in lines */
+ UInt32 u_topleft; /* index of 1st active pixel */
+ UInt32 u_stride; /* line stride, in bytes */
+ UInt8 *pac_data; /* buffer address */
+} M4ViComImagePlane;
+
+typedef struct S_M4VFL_modifLumParam
+{
+ unsigned short lum_factor;
+ unsigned short copy_chroma;
+} M4VFL_ModifLumParam;
+
+#define M4VIFI_OK 0
+#define M4VIFI_ILLEGAL_FRAME_HEIGHT 8
+#define M4VIFI_ILLEGAL_FRAME_WIDTH 9
+
+unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+ M4VFL_ModifLumParam *lum_param, void *user_data);
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+ unsigned long lum_factor, void *user_data);
+
+/**
+ *************************************************************************************************
+ * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn1,
+ * M4VIFI_ImagePlane *pPlaneIn2,
+ * M4VIFI_ImagePlane *pPlaneOut,
+ * M4VIFI_UInt32 Progress)
+ * @brief Blends two YUV 4:2:0 Planar images.
+ * @note Blends YUV420 planar images,
+ * Map the value of progress from (0 - 1000) to (0 - 1024)
+ * Set the range of blendingfactor,
+ * 1. from 0 to (Progress << 1) ;for Progress <= 512
+ * 2. from (( Progress - 512)<< 1) to 1024 ;otherwise
+ * Set the increment of blendingfactor for each element in the image row by the factor,
+ * = (Range-1) / (image width-1) ;for width >= range
+ * = (Range) / (image width) ;otherwise
+ * Loop on each(= i) row of output Y plane (steps of 2)
+ * Loop on each(= j) column of output Y plane (steps of 2)
+ * Get four Y samples and one U & V sample from two input YUV4:2:0 images and
+ * Compute four Y sample and one U & V sample for output YUV4:2:0 image
+ * using the following,
+ * Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) * In2(i,j)
+ * end loop column
+ * end loop row.
+ * @param pUserData: (IN) User Specific Parameter
+ * @param pPlaneIn1: (IN) Pointer to an array of image plane structures maintained for Y, U
+ * and V planes.
+ * @param pPlaneIn2: (IN) Pointer to an array of image plane structures maintained for Y, U
+ * and V planes.
+ * @param pPlaneOut: (OUT) Pointer to an array of image plane structures maintained for Y, U
+ * and V planes.
+ * @param Progress: (IN) Progress value (varies between 0 and 1000)
+ * @return M4VIFI_OK: No error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ ***********************************************************************************************/
+unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData, M4ViComImagePlane *pPlaneIn1,
+ M4ViComImagePlane *pPlaneIn2,
+ M4ViComImagePlane *pPlaneOut, UInt32 Progress);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // __M4VFL_TRANSITION_H__
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Clip.h b/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
new file mode 100755
index 0000000..1f07616
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_Clip.h
+ * @brief Global Table definition
+ * @note This file defines the Clipping and Division table address
+ ******************************************************************************
+*/
+
+#ifndef _M4VIFI_CLIP_H_
+#define _M4VIFI_CLIP_H_
+
+/* Clipping matrix for RGB values */
+EXTERN CNST M4VIFI_UInt8 *M4VIFI_ClipTable_zero;
+/* Division table for (65535/x); x = 0 to 512 */
+EXTERN CNST M4VIFI_UInt16 *M4VIFI_DivTable_zero;
+
+#endif /* _M4VIFI_CLIP_H_ */
+
+/* End of file M4VIFI_Clip.h */
+
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Defines.h b/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
new file mode 100755
index 0000000..e4591e5
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_Defines.h
+ * @brief Macro Definition
+ * @note This file defines all the macro used in the filter library
+ ******************************************************************************
+*/
+
+#ifndef _M4VIFI_DEFINES_H_
+#define _M4VIFI_DEFINES_H_
+
+/**
+ *****************************************************************************
+ * Macros used for color transform RGB565 to YUV
+ *****************************************************************************
+*/
+#define CST_RGB_16_SIZE 2
+#define Y16(r, g, b) CLIP( ( ( (80593 * r)+(77855 * g)+(30728 * b)) >> 15))
+#define U16(r, g, b) CLIP(128+ ( ( -(45483 * r)-(43936 * g)+(134771 * b)) >> 15 ))
+#define V16(r, g, b) CLIP(128+ ( ( (134771 * r)-(55532 * g)-(21917 * b)) >> 15 ))
+
+
+/**
+ *****************************************************************************
+ * Macros used for color transform YUV to RGB
+ * B = 1.164(Y - 16) + 2.018(U - 128)
+ * G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128)
+ * R = 1.164(Y - 16) + 1.596(V - 128)
+ * Above Conversion Formula is implemented for fixed point operation
+ *****************************************************************************
+*/
+
+#define CST_RGB_24_SIZE 3
+
+#ifdef __RGB_V1__
+#define DEMATRIX(Rx,Gx,Bx,Yx37,Ux,Vx) \
+ Rx = CLIP(((Yx37 + (Vx * 51) + 16) >> 5) - 223); \
+ Gx = CLIP(((Yx37 - ((Ux+(Vx<<1)) * 13) +16) >> 5) + 135); \
+ Bx = CLIP(((Yx37 + (Ux * 65) + 16) >> 5) - 277)
+#else
+#define DEMATRIX(Rx,Gx,Bx,Yx2568,Ux,Vx) \
+ Rx = CLIP(((Yx2568 + (Vx * 0x3343) + (M4VIFI_Int32)0xffe40800) >> 13)); \
+ Gx = CLIP(((Yx2568 - (Ux * 0x0c92) - (Vx * 0x1a1e) + (M4VIFI_Int32)0x00110180) >> 13)); \
+ Bx = CLIP(((Yx2568 + (Ux * 0x40cf) + (M4VIFI_Int32)0xffdd4200) >> 13));
+#endif /* __RGB_V1__ */
+
+/**
+ *****************************************************************************
+ * Packing and Unpacking is different for little and big endian
+ * r, g, b, Rx, Gx, Bx are 8 bit color value
+ * a, data are 16 bit pixel value
+ *****************************************************************************
+ */
+
+/* Pack computations common for little endian and big endian modes */
+#define PACK_BGR24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Bx; rgb_ptr[1] =\
+ (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Rx;}
+#define PACK_RGB24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Rx; rgb_ptr[1] =\
+ (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Bx;}
+
+#ifdef BIG_ENDIAN
+#define PACK_RGB565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a)))\
+ | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
+#define PACK_BGR565(a, Rx, Gx, Bx) (((Bx >> 3) << (11 + (a)))\
+ | ((Gx >> 2) << (5 + (a))) | ((Rx >> 3) << (a)))
+#define GET_RGB565(r, g, b, data) {b = ((data) & 31); g =\
+ ((data >> 5) & 63); r = ((data >> 11) & 31);}
+#define GET_BGR565(b, g, r, data) \
+ r = ((data) & 31); \
+ g = ((data >> 5) & 63); \
+ b = ((data >> 11) & 31 );
+#else /* LITTLE endian: 0x12345678 -> 78 56 34 12 */
+#define PACK_RGB565(a, Rx, Gx, Bx) (((Bx >> 3) << (8 + (a))) \
+ | (((Gx >> 2)&0x7) << (13 + (a))) | ((Gx >> 5) << (a)) | ((Rx >> 3) << (3 + a)))
+#define PACK_BGR565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a))) \
+ | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
+#define GET_RGB565(r, g, b, data) { b = (M4VIFI_UInt8)(((data) & 0x1F00) >> 8); g =\
+ (M4VIFI_UInt8)((((data) & 0x7) << 3) | (((data) & 0xE000) >> 13)); r =\
+ (M4VIFI_UInt8)(((data) & 0xF8) >> 3);}
+#define GET_BGR565(b, g, r, data) \
+ b = ((data) & 31); \
+ g = ((data >> 5) & 63); \
+ r = ((data >> 11) & 31 );
+#endif /* BIG_ENDIAN */
+
+
+#define CST_RGB_24_SIZE 3
+#define Y24(r,g,b) CLIP(( ( (19595 * r) + (38470 * g) + (9437 * b) ) >>16))
+#define U24(r,g,b) CLIP(128 + ( ( -(11059 * r) - (21709 * g) + (32768 * b)) >>16))
+#define V24(r,g,b) CLIP(128 + ( ( (32768 * r) - (27426 * g) - (5329 * b)) >>16))
+#define GET_RGB24(r,g,b,s,o) r = s[o]; g = s[o + 1]; b = s[o + 2];
+
+/**
+ ***********************************************************************************
+ * Macro for clipping using the clipping matrix for RGB values
+ ***********************************************************************************
+*/
+/** Clip function ensures values with range of 0 and 255 */
+#define CLIP(x) *(M4VIFI_ClipTable_zero + (x))
+#define CLIP_OVF 500
+#define CLIP_LUT_SIZE (256 + 2 * CLIP_OVF)
+/** Division table for RGB565 to HLS conversion */
+#define DIVCLIP(x) *(M4VIFI_DivTable_zero + (x))
+
+/**
+ *****************************************************************************
+ * Endianness (default configuration is Little Endian)
+ *****************************************************************************
+*/
+#if (!defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN))
+/** Default endian setting */
+#define LITTLE_ENDIAN
+#endif
+
+/**
+ *****************************************************************************
+ * Other macros and define
+ *****************************************************************************
+*/
+/** YUV plane index */
+#define PLANES 3
+#define YPlane 0
+#define UPlane 1
+#define VPlane 2
+
+/** Check for value is EVEN */
+#ifndef IS_EVEN
+#define IS_EVEN(a) (!(a & 0x01))
+#endif
+
+/* Used for fixed point implementation */
+#ifndef MAX_SHORT
+#define MAX_SHORT 0x10000
+#endif
+
+#endif /* _M4VIFI_DEFINES_H_ */
+
+/* End of file M4VIFI_Defines.h */
+
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h b/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
new file mode 100755
index 0000000..3d2fc9d
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
@@ -0,0 +1,785 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_FiltersAPI.h
+ * @brief External API and Data definitions for the video filter library
+ * @note This file defines and declares data common to the video filter library:
+ * -# data types
+ * -# error codes
+ * -# external API's
+ * -# API level structure definition
+ ******************************************************************************
+*/
+
+#ifndef _M4VIFI_FILTERSAPI_H_
+
+#define _M4VIFI_FILTERSAPI_H_
+
+#ifdef __cplusplus
+
+extern "C" {
+
+#endif /* __cplusplus */
+
+ /**
+ ***********************************************************
+ * Data types definition
+ ***********************************************************
+ */
+
+ typedef unsigned char M4VIFI_UInt8;
+ typedef char M4VIFI_Int8;
+ typedef unsigned short M4VIFI_UInt16;
+ typedef unsigned long M4VIFI_UInt32;
+ typedef short M4VIFI_Int16;
+ typedef long M4VIFI_Int32;
+ typedef float M4VIFI_Float;
+ typedef double M4VIFI_Double;
+ typedef unsigned char M4VIFI_ErrorCode;
+
+/**
+ ***********************************************************
+ * Error codes definition
+ ***********************************************************
+*/
+#define M4VIFI_OK 0
+#define M4VIFI_INVALID_PARAM 7
+#define M4VIFI_ILLEGAL_FRAME_HEIGHT 8
+#define M4VIFI_ILLEGAL_FRAME_WIDTH 9
+
+/**
+ ***********************************************************
+ * Other basic definitions
+ ***********************************************************
+*/
+#define CNST const
+#define EXTERN extern
+
+#ifndef NULL
+#define NULL 0
+
+#endif
+#ifndef FALSE
+#define FALSE 0
+#define TRUE !FALSE
+
+#endif
+
+/**
+ ***********************************************************
+ * Structures definition
+ ***********************************************************
+*/
+
+/**
+ ******************************************************************************
+ * structure M4VIFI_ImagePlane
+ * @brief Texture (YUV) planes structure
+ * @note This structure details the image planes for the output textures:
+ * sizes (in pixels) are luma plane sizes, the 3 pointers point
+ * to the Y, U and V buffers which store data in planar format.
+ ******************************************************************************
+*/
+
+ typedef struct
+ {
+ M4VIFI_UInt32 u_width; /**< Width of luma in pixel unit */
+ M4VIFI_UInt32 u_height; /**< Height of luma in pixel unit */
+ M4VIFI_UInt32 u_topleft; /**< Pointer to first texture active pixel */
+ M4VIFI_UInt32 u_stride; /**< Stride value */
+ M4VIFI_UInt8 *pac_data; /**< Pointer to the data */
+ } M4VIFI_ImagePlane;
+
+/**
+ ******************************************************************************
+ * structure M4VIFI_FramingData
+ * @brief Data necessary to add an overlay on an image
+ * @note This structure details the position and the data of the overlay
+ ******************************************************************************
+*/
+ typedef struct
+ {
+ M4VIFI_UInt32
+ m_xPosStep; /**< X positioning of the overlay vs main picture.
+ X positioning is expressed in percentage vs the main
+ picture width.
+ m_xPosStep must be expressed by step of 1% and between
+ -50/+50%.
+ 0% means overlay is centered vs main picture on
+ X abscissa. */
+ M4VIFI_UInt32
+ m_yPosStep; /**< Y positioning of the overlay vs main picture.
+ Y positioning is expressed in percentage vs the main
+ picture width.
+ m_xPosStep must be expressed by step of 1% and between
+ -50/+50%.
+ 0% means overlay is centered vs main picture on
+ Y abscissa. */
+
+ M4VIFI_ImagePlane
+ *
+ m_imagePlane; /**< Pointer to the framing image with alpha channel */
+ } M4VIFI_FramingData;
+
+/**
+ ******************************************************************************
+ * structure M4VIFI_HLSoffset
+ * @brief HLS offset structure
+ * @note This structure have the hue, saturation and lightness value
+ * for quality enhancement. Range of values neccessarily be
+ * hue = -360 to 360, sat = 0 to 100 and light = 0 t0 100
+ ******************************************************************************
+*/
+ typedef struct
+ {
+ M4VIFI_Int16 hue; /**< Hue offset */
+ M4VIFI_Int16 sat; /**< Saturation offset */
+ M4VIFI_Int16 light; /**< Light offset */
+ } M4VIFI_HLSoffset;
+
+/**
+ ******************************************************************************
+ * structure M4VIFI_Tranformation
+ * @brief Image Tranformation Structure
+ * @note Image Tranformation Request
+ * rotation : 1 -> +90deg Rotation
+ * -1 -> -90deg Rotation
+ * 0 -> No Rotation
+ ******************************************************************************
+*/
+ typedef struct
+ {
+ M4VIFI_Int32 i32_rotation; /**< Rotation Flag */
+ } M4VIFI_Tranformation;
+
+/**
+ ******************************************************************************
+ * structure M4VIFI_pContext
+ * @brief New Structures
+ * @note -# Structure of M4VIFI_HLSoffset
+ ******************************************************************************
+*/
+ typedef struct
+ {
+ M4VIFI_HLSoffset hlsOffset; /**< HLS offset structure */
+ } M4VIFI_pContext;
+
+ /*
+ *****************************************************
+ * External API functions
+ *****************************************************
+ */
+
+ /**< Effect filters */
+ M4VIFI_UInt8 M4VIFI_SepiaYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_GrayscaleYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_ContrastYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_NegativeYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_FlipYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_MirrorYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_ColorRYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_ColorGYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_ColorBYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_FramingRGB565toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_FramingYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_SetHueInYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_ColdYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ M4VIFI_UInt8 M4VIFI_WarmYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+/* ADS Compiler */
+
+/* Generic ARM assembly functions */
+#if defined ADS_ARM
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear RGB888toRGB888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear RGB565toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** RGB565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BGR565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV422 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420 to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in RGB565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in BGR565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420 M4VIFI_RGB565toYUV420AdsArm
+#define M4VIFI_BGR565toYUV420 M4VIFI_BGR565toYUV420AdsArm
+#define M4VIFI_UYVYtoYUV420 M4VIFI_UYVYtoYUV420AdsArm
+#define M4VIFI_YUV420toRGB565 M4VIFI_YUV420toRGB565AdsArm
+#define M4VIFI_YUV420toBGR565 M4VIFI_YUV420toBGR565AdsArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+ M4VIFI_ResizeBilinearYUV420toRGB565AdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+ M4VIFI_ResizeBilinearYUV420toBGR565AdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm
+
+#define M4VIFI_SetHLSinRGB565 M4VIFI_SetHLSinRGB565AdsArm
+#define M4VIFI_SetHLSinBGR565 M4VIFI_SetHLSinBGR565AdsArm
+
+/* ARM9E assembly functions */
+#elif defined ADS_ARM9E
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV888toYUV888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV565toYUV565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** RGB565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BGR565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV422 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420 to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in RGB565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in BGR565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize YUV420toYUV420 from QCIF to QVGA*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
+ M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+#define M4VIFI_RGB565toYUV420 M4VIFI_RGB565toYUV420AdsArm9E
+#define M4VIFI_BGR565toYUV420 M4VIFI_BGR565toYUV420AdsArm9E
+#define M4VIFI_UYVYtoYUV420 M4VIFI_UYVYtoYUV420AdsArm9E
+#define M4VIFI_YUV420toRGB565 M4VIFI_YUV420toRGB565AdsArm9E
+#define M4VIFI_YUV420toBGR565 M4VIFI_YUV420toBGR565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+ M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+ M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E
+#define M4VIFI_SetHLSinRGB565 M4VIFI_SetHLSinRGB565AdsArm9E
+#define M4VIFI_SetHLSinBGR565 M4VIFI_SetHLSinBGR565AdsArm9E
+#define M4VIFI_YUV420QCIFtoYUV420QVGA M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E
+#define M4VIFI_YUV420PlanartoYUV420Linear M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E
+/* GCC Compiler */
+/* Generic ARM assembly functions */
+
+#elif defined GCC_ARM
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV888toYUV888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV565toYUV565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** RGB565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BGR565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420 to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toBGR565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Modify HLS in RGB565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Modify HLS in BGR565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420 M4VIFI_RGB565toYUV420GccArm
+#define M4VIFI_BGR565toYUV420 M4VIFI_BGR565toYUV420GccArm
+#define M4VIFI_YUV420toRGB565 M4VIFI_YUV420toRGB565GccArm
+#define M4VIFI_YUV420toBGR565 M4VIFI_YUV420toBGR565GccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+ M4VIFI_ResizeBilinearYUV420toRGB565GccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+ M4VIFI_ResizeBilinearYUV420toBGR565GccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm
+#define M4VIFI_SetHLSinRGB565 M4VIFI_SetHLSinRGB565GccArm
+#define M4VIFI_SetHLSinBGR565 M4VIFI_SetHLSinBGR565GccArm
+
+/* ARM9E assembly functions */
+#elif defined GCC_ARM9E
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV888toYUV888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV565toYUV565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** RGB565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BGR565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420 to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in RGB565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in BGR565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm9E(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420 M4VIFI_RGB565toYUV420GccArm9E
+#define M4VIFI_BGR565toYUV420 M4VIFI_BGR565toYUV420GccArm9E
+#define M4VIFI_YUV420toRGB565 M4VIFI_YUV420toRGB565GccArm9E
+#define M4VIFI_YUV420toBGR565 M4VIFI_YUV420toBGR565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+ M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+ M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E
+#define M4VIFI_SetHLSinBGR565 M4VIFI_SetHLSinBGR565GccArm9E
+#define M4VIFI_SetHLSinRGB565 M4VIFI_SetHLSinRGB565GccArm9E
+
+/* TI CCS assembly files */
+#elif defined TI411_ARM9E
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV888toYUV888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV565toYUV565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** YUV420 (Planar) to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 (Planar) to Resized RGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 (Planar) to Resized RGB888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420(Planar) to Resized and Rotated (-90) RGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420(Planar) to Resized and Rotated (+90) RGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420(Planar) to Resized YUV420(Planar) */
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution */
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(-90) */
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(+90) */
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_YUV420toRGB565 M4VIFI_YUV420toRGB565Ti411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+ M4VIFI_ResizeBilinearYUV420toRGB565Ti411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftTi411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+ M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightTi411Arm9E
+
+#define M4VIFI_YUV420QCIFtoYUV420QVGA M4VIFI_YUV420QCIFtoYUV420QVGATi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA M4VIFI_YUV420QCIFtoRGB565QVGATi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL M4VIFI_YUV420QCIFtoRGB565QVGA_RLTi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR M4VIFI_YUV420QCIFtoRGB565QVGA_RRTi411Arm9E
+
+/* ANSI C Functions */
+#else
+
+ /** Apply grayscale effect RGB565toRGB565 */
+
+ M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV888toYUV888 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV565toYUV565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+ /** RGB565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB565toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BRG565 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR565toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** BRG888 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_BGR888toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+ /** RGB888 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+
+ /** YUV422 to YUV420 */
+ M4VIFI_UInt8 M4VIFI_UYVYtoYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+ /** YUV420 to RGB565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedLeft(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** YUV420 to BGR565 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedRight(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** YUV420 to BGR24 */
+ M4VIFI_UInt8 M4VIFI_YUV420toBGR24(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** YUV420 to RGB24 */
+ M4VIFI_UInt8 M4VIFI_YUV420toRGB24(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** Resize Bilinear YUV420toYUV420 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /** Resize Bilinear YUV420toRGB565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight(
+ void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in RGB565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /** Modify HLS in BGR565 */
+ M4VIFI_UInt8 M4VIFI_SetHLSinBGR565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ /**Resize YUV420toYUV420 from QCIF to QVGA*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+ /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
+ M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
+ M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420Linear(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+/** Resizes YUV420 Planar Image and stores in YUV422 Interleaved format
+ with/without +or-90 rotation*/
+ M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV422Interleaved(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+#endif
+
+ /** definition of the converter function types */
+
+ typedef M4VIFI_UInt8 M4VIFI_PlanConverterFunctionType(void
+ *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
+
+ /** definition of the preprocessing function types */
+ typedef M4VIFI_UInt8 M4VIFI_PreprocessFunctionType(void
+ *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
+
+ M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+ M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data,
+ M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+#ifdef __cplusplus
+
+}
+
+#endif /* __cplusplus */
+
+#endif /* _M4VIFI_FILTERSAPI_H_ */
+
+/* End of file M4VIFI_FiltersAPI.h */
diff --git a/libvideoeditor/vss/common/inc/M4VPP_API.h b/libvideoeditor/vss/common/inc/M4VPP_API.h
new file mode 100755
index 0000000..965ca22
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VPP_API.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VPP_API.h
+ * @brief Video preprocessing API public functions prototypes.
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef M4VPP_API_H
+#define M4VPP_API_H
+
+#include "M4OSA_Types.h" /**< Include for common OSAL types */
+#include "M4OSA_Error.h" /**< Include for common OSAL errors */
+
+/**
+ * Include Video filters interface definition (for the M4VIFI_ImagePlane type) */
+#include "M4VIFI_FiltersAPI.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/**
+ ******************************************************************************
+ * Public type of the Video Preprocessing execution context
+ ******************************************************************************
+*/
+typedef M4OSA_Void* M4VPP_Context;
+
+typedef enum
+{
+ M4VPP_kIYUV420=0, /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
+ M4VPP_kIYUV422, /**< YUV422 planar */
+ M4VPP_kIYUYV, /**< YUV422 interlaced, luma first */
+ M4VPP_kIUYVY, /**< YUV422 interlaced, chroma first */
+ M4VPP_kIJPEG, /**< JPEG compressed frames */
+ M4VPP_kIRGB444, /**< RGB 12 bits 4:4:4 */
+ M4VPP_kIRGB555, /**< RGB 15 bits 5:5:5 */
+ M4VPP_kIRGB565, /**< RGB 16 bits 5:6:5 */
+ M4VPP_kIRGB24, /**< RGB 24 bits 8:8:8 */
+ M4VPP_kIRGB32, /**< RGB 32 bits */
+ M4VPP_kIBGR444, /**< BGR 12 bits 4:4:4 */
+ M4VPP_kIBGR555, /**< BGR 15 bits 5:5:5 */
+ M4VPP_kIBGR565, /**< BGR 16 bits 5:6:5 */
+ M4VPP_kIBGR24, /**< BGR 24 bits 8:8:8 */
+ M4VPP_kIBGR32 /**< BGR 32 bits */
+} M4VPP_InputVideoFormat;
+
+
+/**
+ ******************************************************************************
+ * @brief Prototype of the main video preprocessing function
+ * @note Preprocess one frame
+ * @param pContext: (IN) Execution context of the VPP.
+ * @param pPlaneIn: (INOUT) Input Image
+ * @param pPlaneOut: (INOUT) Output Image
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (M4VPP_apply_fct) (M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext)
+ * @brief This function allocates a new execution context for the Video Preprocessing component.
+ * @note
+ * @param pContext: (OUT) Execution context allocated by the function.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_ALLOC: there is no more available memory.
+ * @return M4ERR_PARAMETER: pContext is NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ * M4VIFI_ImagePlane* pPlaneOut)
+ * @brief Preprocess one frame.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @param pPlaneIn: (INOUT) Input Image
+ * @param pPlaneOut: (INOUT) Output Image
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pContext or pPlaneIn or pPlaneOut is NULL (debug only).
+ * @return M4ERR_STATE: Video Preprocessing is not in an appropriate state for this function
+ * to be called
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext)
+ * @brief This method frees the execution context for the Video Preprocessing component.
+ * Any further usage of the context will lead to unpredictable result.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: pContext is NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VES_InputVideoFormat format)
+ * @brief This method apply the video preprocessing to the input plane. Result is put into the
+ * output plan.
+ * @param pContext: (IN) Execution context.
+ * @param format : (IN) Format of input plane (rgb, yuv, ...)
+ * @return M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VPP_InputVideoFormat format);
+
+/**
+ ******************************************************************************
+ * @brief Definition of the errors specific to this module.
+ ******************************************************************************
+*/
+
+/**< Input and output planes have incompatible properties */
+#define M4VPP_ERR_IMCOMPATIBLE_IN_AND_OUT_PLANES M4OSA_ERR_CREATE( M4_ERR,\
+ M4PREPROCESS_VIDEO, 0x000001);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* M4VPP_API_H */
+
diff --git a/libvideoeditor/vss/common/inc/M4WRITER_common.h b/libvideoeditor/vss/common/inc/M4WRITER_common.h
new file mode 100755
index 0000000..abb7b86
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4WRITER_common.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ /**
+ ******************************************************************************
+ * @file M4WRITER_common.h
+ * @brief VES writers shell interface.
+ * @note This file defines the types internally used by the VES to abstract writers
+ ******************************************************************************
+*/
+#ifndef __M4WRITER_COMMON_H__
+#define __M4WRITER_COMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h" /* for M4OSA_FileWriterPointer */
+#include "M4OSA_FileReader.h" /* for M4OSA_FileWriterPointer */
+#include "M4OSA_OptionID.h" /* for M4OSA_OPTION_ID_CREATE() */
+#include "M4OSA_CoreID.h" /* for M4WRITER_COMMON */
+
+#include "M4SYS_Stream.h" /* for M4SYS_StreamID */
+#include "M4SYS_AccessUnit.h" /* for M4SYS_AccessUnit */
+
+/**
+ ******************************************************************************
+ * MP4W Errors & Warnings definition
+ ******************************************************************************
+*/
+#define M4WAR_WRITER_STOP_REQ M4OSA_ERR_CREATE(M4_WAR, M4WRITER_COMMON ,0x000001)
+
+/**
+ ******************************************************************************
+ * enum M4WRITER_OutputFileType
+ * @brief This enum defines the avalaible output file format.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4WRITER_kUnknown=-1,
+ M4WRITER_k3GPP=0, /**< 3GPP compliant file */
+ M4WRITER_kAVI=1, /**< AVI file */
+ M4WRITER_kAMR=2, /**< AMR file */
+ M4WRITER_kNETWORK3GPP=3, /**< 3GPP via TCP */
+ M4WRITER_kPCM=4, /**< PCM file */
+ M4WRITER_kJPEG=5, /**< JPEG EXIF writer */
+ M4WRITER_kMP3=6, /**< MP3 writer */
+
+ M4WRITER_kType_NB /* number of writers, keep it as last enum entry */
+
+} M4WRITER_OutputFileType;
+
+/**
+ ******************************************************************************
+ * enum M4WRITER_OptionID
+ * @brief This enums defines all avalaible options. All the reuturned values are in
+ * M4OSA_UInt32 type.
+ ******************************************************************************
+*/
+typedef enum {
+ M4WRITER_kMaxAUSize = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x01),
+ M4WRITER_kMaxChunckSize = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x02),
+ M4WRITER_kFileSize = M4OSA_OPTION_ID_CREATE (M4_READ , \
+ M4WRITER_COMMON, 0x03), /**< File size if the process was ended when we call the method */
+ M4WRITER_kFileSizeAudioEstimated= M4OSA_OPTION_ID_CREATE (M4_READ ,\
+ M4WRITER_COMMON, 0x04), /**< File size if the process was ended when we call the
+ method, estimated size for audio */
+ M4WRITER_kEmbeddedString = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x05), /**< String embedded at the end of the file(SW - VES) */
+ M4WRITER_kEmbeddedVersion = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x06), /**< Version embedded at the end of the file */
+ M4WRITER_kIntegrationTag = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x07), /**< String embedded at the end of the file (char[60]
+ for integration purpose) */
+ M4WRITER_kMaxFileSize = M4OSA_OPTION_ID_CREATE (M4_WRITE , \
+ M4WRITER_COMMON, 0x08), /**< Maximum file size limitation */
+ M4WRITER_kMaxFileDuration = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x09), /**< Maximum file duration limitation */
+ M4WRITER_kSetFtypBox = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x0A), /**< Set 'ftyp' atom */
+ M4WRITER_kMetaData = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x0B), /**< Additionnal information to set in the file */
+ M4WRITER_kDSI = M4OSA_OPTION_ID_CREATE (M4_WRITE , \
+ M4WRITER_COMMON, 0x0C), /**< To set DSI of the file (Decoder specifc info) */
+ M4WRITER_kJpegReserveFPData = M4OSA_OPTION_ID_CREATE (M4_WRITE ,\
+ M4WRITER_COMMON, 0x0D), /**< Reserve some space in the file for JPEG fast
+ processing data */
+ M4WRITER_kJpegSetFPData = M4OSA_OPTION_ID_CREATE (M4_WRITE , \
+ M4WRITER_COMMON, 0x0E), /**< Write Fast Processing Data in the file*/
+ /* + CRLV6775 -H.264 trimming */
+ M4WRITER_kMUL_PPS_SPS = M4OSA_OPTION_ID_CREATE (M4_WRITE , M4WRITER_COMMON, 0x0F)
+ /* - CRLV6775 -H.264 trimming */
+} M4WRITER_OptionID;
+
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_Header
+ * @brief This structure defines the buffer where an header is put.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_MemAddr8 pBuf; /**< Buffer for the header */
+ M4OSA_UInt32 Size; /**< Size of the data */
+} M4WRITER_Header;
+
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_StreamVideoInfos
+ * @brief This structure defines the specific video stream infos, extension to
+ * M4SYS_StreamDescription.
+ ******************************************************************************
+*/
+typedef struct {
+ M4OSA_UInt32 height; /**< Frame height */
+ M4OSA_UInt32 width; /**< Frame Width */
+ M4OSA_Double fps; /**< Targetted framerate of the video */
+ M4WRITER_Header Header; /**< Sequence header of the video stream,
+ member set to NULL if no header present */
+} M4WRITER_StreamVideoInfos;
+
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_StreamAudioInfos
+ * @brief This structure defines the specific audio stream infos, extension to
+ M4SYS_StreamDescription.
+ ******************************************************************************
+*/
+typedef struct {
+ M4OSA_UInt32 nbSamplesPerSec; /**< Number of Samples per second */
+ M4OSA_UInt16 nbBitsPerSample; /**< Number of Bits in 1 sample */
+ M4OSA_UInt16 nbChannels; /**< Number of channels */
+ M4WRITER_Header Header; /**< Decoder Specific Info of the audiostream,
+ member set to NULL if no DSI present */
+} M4WRITER_StreamAudioInfos;
+
+
+/**
+ ******************************************************************************
+ * enum M4WRITER_Orientation
+ * @brief This enum defines the possible orientation of a frame as described
+ * in the EXIF standard.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4WRITER_OrientationUnknown = 0,
+ M4WRITER_OrientationTopLeft,
+ M4WRITER_OrientationTopRight,
+ M4WRITER_OrientationBottomRight,
+ M4WRITER_OrientationBottomLeft,
+ M4WRITER_OrientationLeftTop,
+ M4WRITER_OrientationRightTop,
+ M4WRITER_OrientationRightBottom,
+ M4WRITER_OrientationLeftBottom
+}M4WRITER_Orientation ;
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_MetaData
+ * @brief This structure defines all the meta data to store in the encoded file.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Char* Description ;
+ M4OSA_Char* PhoneManufacturer ;
+ M4OSA_Char* PhoneModel ;
+ M4OSA_Char* Artist ;
+ M4OSA_Char* Copyright ;
+ M4OSA_Char* Software ;
+ M4OSA_Char* CreationDate;
+ M4WRITER_Orientation Orientation ;
+
+ M4OSA_UInt32 Width ;
+ M4OSA_UInt32 Height ;
+
+ M4OSA_UInt32 ThumbnailWidth ;
+ M4OSA_UInt32 ThumbnailHeight ;
+ M4OSA_Bool ThumbnailPresence ;
+}M4WRITER_MetaData;
+
+
+typedef void* M4WRITER_Context;
+
+typedef M4OSA_ERR (M4WRITER_openWrite) (M4WRITER_Context* hContext,\
+ void* outputFileDescriptor,\
+ M4OSA_FileWriterPointer* pFileWriterPointer,\
+ void* tempFileDescriptor, \
+ M4OSA_FileReadPointer* pFileReaderPointer);
+typedef M4OSA_ERR (M4WRITER_addStream) (M4WRITER_Context pContext,\
+ M4SYS_StreamDescription*streamDescription);
+typedef M4OSA_ERR (M4WRITER_startWriting) (M4WRITER_Context pContext);
+typedef M4OSA_ERR (M4WRITER_closeWrite) (M4WRITER_Context pContext);
+typedef M4OSA_ERR (M4WRITER_setOption) (M4WRITER_Context pContext, \
+ M4OSA_UInt32 optionID, \
+ M4OSA_DataOption optionValue);
+typedef M4OSA_ERR (M4WRITER_getOption) (M4WRITER_Context pContext, \
+ M4OSA_UInt32 optionID, \
+ M4OSA_DataOption optionValue);
+
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_GlobalInterface
+ * @brief Defines all the functions required for a writer shell.
+ ******************************************************************************
+*/
+typedef struct _M4WRITER_GlobalInterface
+{
+ M4WRITER_openWrite* pFctOpen;
+ M4WRITER_addStream* pFctAddStream;
+ M4WRITER_startWriting* pFctStartWriting;
+ M4WRITER_closeWrite* pFctCloseWrite;
+ M4WRITER_setOption* pFctSetOption;
+ M4WRITER_getOption* pFctGetOption;
+} M4WRITER_GlobalInterface;
+
+typedef M4OSA_ERR M4WRITER_startAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
+ M4SYS_AccessUnit* pAU);
+typedef M4OSA_ERR M4WRITER_processAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
+ M4SYS_AccessUnit* pAU);
+
+/**
+ ******************************************************************************
+ * struct M4WRITER_DataInterface
+ * @brief Defines all the functions required to write data with a writer shell.
+ ******************************************************************************
+*/
+typedef struct _M4WRITER_DataInterface
+{
+ M4WRITER_startAU* pStartAU;
+ M4WRITER_processAU* pProcessAU;
+
+ M4WRITER_Context pWriterContext;
+
+} M4WRITER_DataInterface;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4WRITER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_BitStreamParser.h b/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
new file mode 100755
index 0000000..c875458
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4_BitStreamParser.h
+ * @brief MPEG-4 File Format bit stream utility
+ * @note This file contains utility functions used to parse MPEG specific
+ * data structures.
+ ************************************************************************
+*/
+#ifndef __M4_BITSTREAMPARSER_H__
+#define __M4_BITSTREAMPARSER_H__
+
+#include "M4OSA_Types.h"
+
+/**
+* M4_BitStreamParser_Init.
+*
+* Allocates the context and initializes internal data
+*
+* @param pContext : A pointer to the context internally used by the package - ALLOCATED BY THE
+* FUNCTION (M4OSA_NULL if allocation fails)
+* @param bitStream : A pointer to the bitstream - must be 32 bits as access are 32 bits
+* @param size : The size of the bitstream in bytes
+*
+*/
+void M4_BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
+
+/**
+ ************************************************************************
+ * @brief Clean up context
+ * @param pContext (IN/OUT) M4_BitStreamParser context.
+ ************************************************************************
+*/
+void M4_BitStreamParser_CleanUp(void* pContext);
+
+/**
+ ************************************************************************
+ * @brief Read the next <length> bits in the bitstream.
+ * @note The function does not update the bitstream pointer.
+ * @param pContext (IN/OUT) M4_BitStreamParser context.
+ * @param length (IN) The number of bits to extract from the bitstream
+ * @return the read bits
+ ************************************************************************
+*/
+M4OSA_UInt32 M4_BitStreamParser_ShowBits(void* pContext, M4OSA_Int32 length);
+
+/**
+ ************************************************************************
+ * @brief Increment the bitstream pointer of <length> bits.
+ * @param pContext (IN/OUT) M4_BitStreamParser context.
+ * @param length (IN) The number of bit to shift the bitstream
+ ************************************************************************
+*/
+void M4_BitStreamParser_FlushBits(void* pContext, M4OSA_Int32 length);
+
+/**
+ ************************************************************************
+ * @brief Get a pointer to the current byte pointed by the bitstream pointer.
+ * It does not update the bitstream pointer
+ *
+ * @param pContext : A pointer to the context internally used by the package
+ * @param length : The number of bit to extract from the bitstream
+ *
+ * @returns the read bits
+*/
+M4OSA_UInt32 M4_BitStreamParser_GetBits(void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 length);
+
+/**
+* M4_BitStreamParser_Restart resets the bitstream indexes.
+*
+* @param pContext : A pointer to the context internally used by the package
+*
+*/
+void M4_BitStreamParser_Restart(void* pContext);
+
+/**
+ ************************************************************************
+ * @brief Get a pointer to the current byte pointed by the bitstream pointer.
+ * @returns pointer to the current location in the bitstream
+ * @note It should be used carefully as the pointer is in the bitstream itself
+ * and no copy is made.
+ * @param pContext (IN/OUT) M4_BitStreamParser context.
+*/
+M4OSA_UInt8* M4_BitStreamParser_GetCurrentbitStreamPointer(void* pContext);
+
+/**
+* M4_BitStreamParser_GetSize gets the size of the bitstream in bytes
+*
+* @param pContext : A pointer to the context internally used by the package
+*
+* @returns the size of the bitstream in bytes
+*/
+M4OSA_Int32 M4_BitStreamParser_GetSize(void* pContext);
+
+void M4_MPEG4BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
+
+/**
+* getMpegLengthFromInteger returns a decoded size value from an encoded one (SDL)
+*
+* @param pContext : A pointer to the context internally used by the package
+* @param val : encoded value
+*
+* @returns size in a human readable form
+*/
+
+M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromInteger(void* pContext, M4OSA_UInt32 val);
+
+
+/**
+ ************************************************************************
+ * @brief Decode an MPEG4 Systems descriptor size from an encoded SDL size data.
+ * @note The value is read from the current bitstream location.
+ * @param pContext (IN/OUT) M4_BitStreamParser context.
+ * @return Size in a human readable form
+ ************************************************************************
+*/
+M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromStream(void* pContext);
+
+#endif /*__M4_BITSTREAMPARSER_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_Common.h b/libvideoeditor/vss/common/inc/M4_Common.h
new file mode 100755
index 0000000..760a7da
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_Common.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4_Common.h
+ * @brief Common data structure between shells
+ * @note
+*************************************************************************
+*/
+#ifndef __M4_COMMON_H__
+#define __M4_COMMON_H__
+
+#include "M4OSA_Types.h"
+
+/**
+ ************************************************************************
+ * structure _parameterSet
+ * @brief This structure defines the structure of parameters for the avc
+ * decoder specific info
+ * @note
+ ************************************************************************
+*/
+typedef struct _parameterSet
+{
+ M4OSA_UInt16 m_length; /* Number of items*/
+ M4OSA_UInt8* m_pParameterSetUnit; /* Array of items*/
+} ParameterSet ;
+
+/**
+ ************************************************************************
+ * structure _avcSpecificInfo
+ * @brief This structure defines the structure of specific info for the avc decoder
+ * @note
+ ************************************************************************
+*/
+typedef struct _avcSpecificInfo
+{
+ M4OSA_UInt8 m_nalUnitLength; /* length in bytes of the NALUnitLength
+ field in a AVC sample */
+ M4OSA_UInt8 m_numOfSequenceParameterSets; /* Number of sequence parameter sets*/
+ M4OSA_UInt8 m_numOfPictureParameterSets; /* Number of picture parameter sets*/
+ ParameterSet *m_pSequenceParameterSet; /* Sequence parameter sets array*/
+ ParameterSet *m_pPictureParameterSet; /* Picture parameter sets array*/
+} AvcSpecificInfo ;
+
+/**
+ ************************************************************************
+ * structure M4_SynthesisAudioInfo
+ * @brief This structure contains specific pointers used for synthesis audio format
+ ************************************************************************
+*/
+typedef struct _synthesisAudioInfo
+{
+ M4OSA_Void* m_pInputBuf;
+ M4OSA_Void* m_pInputInfo;
+ M4OSA_UInt16 m_uiNbSubFramePerStep;
+ M4OSA_UInt32 m_uiUsedBytes;
+} M4_SynthesisAudioInfo;
+
+
+/*
+ ************************************************************************
+ * enum M4_AACDownsamplingMode
+ * @brief This enum states modes for Down sampling
+ ************************************************************************
+*/
+typedef enum
+{
+ AAC_kDS_OFF = 0, /**< No Down sampling */
+ AAC_kDS_BY_2 = 1, /**< Down sampling by 2
+ Profile = AAC :
+ output sampling rate = aac_samp_freq/2
+ Profile = HE_AAC and input is AAC:
+ Output sampling rate = aac_samp_freq.(No downsamping).
+ Profile = HE_AAC and input is HE_AAC:
+ Output sampling rate = aac_samp_freq (Downsampling
+ occurs in SBR tool).
+ case profile = HE_AAC_v2 :
+ Not Supported */
+ AAC_kDS_BY_3 = 2, /**< Down sampling by 3 - only for AAC profile */
+ AAC_kDS_BY_4 = 3, /**< Down sampling by 4 - only for AAC profile */
+ AAC_kDS_BY_8 = 4 /**< Down sampling by 8 - only for AAC profile */
+
+} M4_AACDownsamplingMode;
+
+
+/*
+ ************************************************************************
+ * enum M4_AACOutputMode
+ * @brief This enum defines the output mode
+ ************************************************************************
+*/
+typedef enum
+{
+ AAC_kMono = 0, /**< Output is Mono */
+ AAC_kStereo = 1 /**< Output is Stereo */
+} M4_AACOutputMode;
+
+
+/*
+ ************************************************************************
+ * enum M4_AACDecProfile
+ * @brief This enum defines the AAC decoder profile
+ ************************************************************************
+*/
+typedef enum
+{
+ AAC_kAAC = 0, /**< AAC profile (only AAC LC object are supported) */
+ AAC_kHE_AAC = 1, /**< HE AAC or AAC+ profile (SBR in LP Mode) */
+ AAC_kHE_AAC_v2 = 2 /**< HE AAC v2 or Enhanced AAC+ profile (SBR Tool in HQ Mode) */
+} M4_AACDecProfile;
+
+
+/**
+ ************************************************************************
+ * structure M4_AacDecoderConfig
+ * @brief This structure defines specific settings according to
+ * the user requirements
+ ************************************************************************
+*/
+typedef struct
+{
+ M4_AACDecProfile m_AACDecoderProfile;
+ M4_AACDownsamplingMode m_DownSamplingMode;
+ M4_AACOutputMode m_OutputMode;
+
+} M4_AacDecoderConfig;
+
+
+/**
+ ************************************************************************
+ * structure M4READER_AudioSbrUserdata
+ * @brief This structure defines the user's data needed to decode the
+ * AACplus stream
+ * @note The field m_pFirstAU is used in case of local files and
+ * the field m_bIsSbrEnabled is used in streaming case.
+ ************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Void* m_pFirstAU; /**< The first AU from where SBR data are
+ extracted (local file case)*/
+ M4OSA_Bool m_bIsSbrEnabled; /**< A boolean that indicates if the stream is
+ AACplus (streaming case)*/
+ M4_AacDecoderConfig* m_pAacDecoderUserConfig;/**< Decoder specific user setting */
+
+} M4READER_AudioSbrUserdata;
+
+#endif /* __M4_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_Utils.h b/libvideoeditor/vss/common/inc/M4_Utils.h
new file mode 100755
index 0000000..a1e0829
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_Utils.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4_Utils.h
+ * @brief Utilities
+ * @note This file defines utility macros
+*************************************************************************
+*/
+#ifndef __M4_UTILS_H__
+#define __M4_UTILS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* M4_MediaTime definition
+ This type is used internally by some shell components */
+#include "M4OSA_Types.h"
+typedef M4OSA_Double M4_MediaTime;
+
+/* GET_MEMORY32 macro definition
+ This macro is used by the 3GP reader*/
+#ifdef __BIG_ENDIAN
+#define GET_MEMORY32(x) (x)
+#else
+#define GET_MEMORY32(x) ( (((x)&0xff)<<24) | (((x)&0xff00)<<8) |\
+ (((x)&0xff0000)>>8) | (((x)&0xff000000)>>24) )
+#endif /*__BIG_ENDIAN*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4_UTILS_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h b/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
new file mode 100755
index 0000000..9e7d03f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4_VideoEditingCommon.h
+ * @brief Video Editing (VSS3GPP, MCS, PTO3GPP) common definitions
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef __M4_VIDEOEDITINGCOMMON_H__
+#define __M4_VIDEOEDITINGCOMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Version */
+/* CHANGE_VERSION_HERE */
+#define M4VIDEOEDITING_VERSION_MAJOR 3
+#define M4VIDEOEDITING_VERSION_MINOR 1
+#define M4VIDEOEDITING_VERSION_REVISION 0
+
+#define M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE 0x7fffffff
+#define M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL 0x7fffffff
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_FileType
+ * @brief This enum defines the file format type to be used
+ ******************************************************************************
+*/
+typedef enum {
+ M4VIDEOEDITING_kFileType_3GPP = 0, /**< 3GPP file media type : input & output */
+ M4VIDEOEDITING_kFileType_MP4 = 1, /**< MP4 file media type : input */
+ M4VIDEOEDITING_kFileType_AMR = 2, /**< AMR file media type : input & output */
+ M4VIDEOEDITING_kFileType_MP3 = 3, /**< MP3 file media type : input */
+ M4VIDEOEDITING_kFileType_PCM = 4, /**< PCM RAW file media type : input RC */
+ M4VIDEOEDITING_kFileType_JPG = 5, /**< STILL PICTURE FEATURE: JPG file media
+ type : input AND OUTPUT */
+ M4VIDEOEDITING_kFileType_BMP = 6, /**< STILL PICTURE FEATURE: BMP file media
+ type : input only */
+ M4VIDEOEDITING_kFileType_GIF = 7, /**< STILL PICTURE FEATURE: GIF file media
+ type : input only */
+ M4VIDEOEDITING_kFileType_PNG = 8, /**< STILL PICTURE FEATURE: PNG file media
+ type : input only */
+ M4VIDEOEDITING_kFileType_ARGB8888 = 9, /**< STILL PICTURE FEATURE: ARGB8888 file
+ media type : input only */
+ M4VIDEOEDITING_kFileType_M4V = 10, /**< M4V file media type : input only */
+ M4VIDEOEDITING_kFileType_Unsupported = 255 /**< Unsupported file media type */
+} M4VIDEOEDITING_FileType;
+
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_VideoFormat
+ * @brief This enum defines the avalaible video compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VIDEOEDITING_kNoneVideo = 0, /**< Video not present */
+ M4VIDEOEDITING_kH263 = 1, /**< H263 video */
+ M4VIDEOEDITING_kH264 = 2, /**< H264 video */
+ M4VIDEOEDITING_kMPEG4 = 3, /**< MPEG-4 video */
+ M4VIDEOEDITING_kNullVideo = 254, /**< Do not care video type, use NULL encoder */
+ M4VIDEOEDITING_kUnsupportedVideo = 255 /**< Unsupported video stream type */
+} M4VIDEOEDITING_VideoFormat;
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_AudioFormat
+ * @brief This enum defines the avalaible audio format.
+ * @note HE_AAC, HE_AAC_v2 and MP3 can not be used for the output audio format
+ ******************************************************************************
+*/
+typedef enum {
+ M4VIDEOEDITING_kNoneAudio = 0, /**< Audio not present */
+ M4VIDEOEDITING_kAMR_NB = 1, /**< AMR Narrow Band audio */
+ M4VIDEOEDITING_kAAC = 2, /**< AAC audio */
+ M4VIDEOEDITING_kAACplus = 3, /**< AAC+ audio */
+ M4VIDEOEDITING_keAACplus = 4, /**< Enhanced AAC+ audio */
+ M4VIDEOEDITING_kMP3 = 5, /**< MP3 audio */
+ M4VIDEOEDITING_kEVRC = 6, /**< EVRC audio */
+ M4VIDEOEDITING_kPCM = 7, /**< PCM audio */
+ M4VIDEOEDITING_kNullAudio = 254, /**< Do not care audio type, use NULL encoder */
+ M4VIDEOEDITING_kUnsupportedAudio = 255 /**< Unsupported audio stream type */
+} M4VIDEOEDITING_AudioFormat;
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_VideoFrameSize
+ * @brief This enum defines the available output frame sizes.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VIDEOEDITING_kSQCIF=0, /**< SQCIF 128x96 */
+ M4VIDEOEDITING_kQQVGA, /**< QQVGA 160x120 */
+ M4VIDEOEDITING_kQCIF, /**< QCIF 176x144 */
+ M4VIDEOEDITING_kQVGA, /**< QVGA 320x240 */
+ M4VIDEOEDITING_kCIF, /**< CIF 352x288 */
+ M4VIDEOEDITING_kVGA, /**< VGA 640x480 */
+/* +PR LV5807 */
+ M4VIDEOEDITING_kWVGA, /**< WVGA 800x480 */
+ M4VIDEOEDITING_kNTSC, /**< NTSC 720x480 */
+/* -PR LV5807 */
+
+/* +CR Google */
+ M4VIDEOEDITING_k640_360, /**< 640x360 */
+ M4VIDEOEDITING_k854_480, /**< 854x480 */
+ M4VIDEOEDITING_k1280_720, /**< 720p 1280x720 */
+ M4VIDEOEDITING_k1080_720, /**< 720p 1080x720 */
+ M4VIDEOEDITING_k960_720, /**< 720p 960x720 */
+ M4VIDEOEDITING_k1920_1080 /**<1080p 1920x1080*/
+/* -CR Google */
+
+} M4VIDEOEDITING_VideoFrameSize;
+
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_Videoframerate
+ * @brief This enum defines the available video framerates.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VIDEOEDITING_k5_FPS = 0,
+ M4VIDEOEDITING_k7_5_FPS,
+ M4VIDEOEDITING_k10_FPS,
+ M4VIDEOEDITING_k12_5_FPS,
+ M4VIDEOEDITING_k15_FPS,
+ M4VIDEOEDITING_k20_FPS,
+ M4VIDEOEDITING_k25_FPS,
+ M4VIDEOEDITING_k30_FPS
+} M4VIDEOEDITING_VideoFramerate;
+
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_AudioSamplingFrequency
+ * @brief This enum defines the available output audio sampling frequencies
+ * @note 8 kHz is the only supported frequency for AMR-NB output
+ * @note 16 kHz is the only supported frequency for AAC output
+ * @note The recommended practice is to use the Default value when setting the encoding parameters
+ ******************************************************************************
+*/
+typedef enum {
+ M4VIDEOEDITING_kDefault_ASF = 0, /**< Default Audio Sampling Frequency for selected
+ Audio output format */
+ M4VIDEOEDITING_k8000_ASF = 8000, /**< Note: Default audio Sampling Frequency for
+ AMR-NB output */
+ M4VIDEOEDITING_k11025_ASF = 11025,
+ M4VIDEOEDITING_k12000_ASF = 12000,
+ M4VIDEOEDITING_k16000_ASF = 16000, /**< Note: Default audio Sampling Frequency
+ for AAC output */
+ M4VIDEOEDITING_k22050_ASF = 22050,
+ M4VIDEOEDITING_k24000_ASF = 24000,
+ M4VIDEOEDITING_k32000_ASF = 32000,
+ M4VIDEOEDITING_k44100_ASF = 44100,
+ M4VIDEOEDITING_k48000_ASF = 48000
+
+} M4VIDEOEDITING_AudioSamplingFrequency;
+
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_Bitrate
+ * @brief This enum defines the available audio or video bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VIDEOEDITING_kVARIABLE_KBPS = -1, /* no regulation */
+ M4VIDEOEDITING_kUndefinedBitrate = 0, /* undefined */
+ M4VIDEOEDITING_k8_KBPS = 8000,
+ M4VIDEOEDITING_k9_2_KBPS = 9200, /* evrc only */
+ M4VIDEOEDITING_k12_2_KBPS = 12200, /* amr only */
+ M4VIDEOEDITING_k16_KBPS = 16000,
+ M4VIDEOEDITING_k24_KBPS = 24000,
+ M4VIDEOEDITING_k32_KBPS = 32000,
+ M4VIDEOEDITING_k40_KBPS = 40000,
+ M4VIDEOEDITING_k48_KBPS = 48000,
+ M4VIDEOEDITING_k56_KBPS = 56000,
+ M4VIDEOEDITING_k64_KBPS = 64000,
+ M4VIDEOEDITING_k80_KBPS = 80000,
+ M4VIDEOEDITING_k96_KBPS = 96000,
+ M4VIDEOEDITING_k112_KBPS = 112000,
+ M4VIDEOEDITING_k128_KBPS = 128000,
+ M4VIDEOEDITING_k160_KBPS = 160000,
+ M4VIDEOEDITING_k192_KBPS = 192000,
+ M4VIDEOEDITING_k224_KBPS = 224000,
+ M4VIDEOEDITING_k256_KBPS = 256000,
+ M4VIDEOEDITING_k288_KBPS = 288000,
+ M4VIDEOEDITING_k320_KBPS = 320000,
+ M4VIDEOEDITING_k384_KBPS = 384000,
+ M4VIDEOEDITING_k512_KBPS = 512000,
+ M4VIDEOEDITING_k800_KBPS = 800000,
+/*+ New Encoder bitrates */
+ M4VIDEOEDITING_k2_MBPS = 2000000,
+ M4VIDEOEDITING_k5_MBPS = 5000000,
+ M4VIDEOEDITING_k8_MBPS = 8000000,
+/*- New Encoder bitrates */
+} M4VIDEOEDITING_Bitrate;
+
+
+/**
+ ******************************************************************************
+ * structure M4VIDEOEDITING_FtypBox
+ * @brief Information to build the 'ftyp' atom
+ ******************************************************************************
+*/
+#define M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS 10
+typedef struct
+{
+ /* All brand fields are actually char[4] stored in big-endian integer format */
+
+ M4OSA_UInt32 major_brand; /* generally '3gp4' */
+ M4OSA_UInt32 minor_version; /* generally '0000' or 'x.x ' */
+ M4OSA_UInt32 nbCompatibleBrands; /* number of compatible brands */
+ M4OSA_UInt32 compatible_brands[M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS]; /* array of
+ max compatible brands */
+
+} M4VIDEOEDITING_FtypBox;
+
+/* Some useful brands */
+#define M4VIDEOEDITING_BRAND_0000 0x00000000
+#define M4VIDEOEDITING_BRAND_3G2A 0x33673261
+#define M4VIDEOEDITING_BRAND_3GP4 0x33677034
+#define M4VIDEOEDITING_BRAND_3GP5 0x33677035
+#define M4VIDEOEDITING_BRAND_3GP6 0x33677036
+#define M4VIDEOEDITING_BRAND_AVC1 0x61766331
+#define M4VIDEOEDITING_BRAND_EMP 0x656D7020
+#define M4VIDEOEDITING_BRAND_ISOM 0x69736F6D
+#define M4VIDEOEDITING_BRAND_MP41 0x6D703431
+#define M4VIDEOEDITING_BRAND_MP42 0x6D703432
+#define M4VIDEOEDITING_BRAND_VFJ1 0x76666A31
+
+/**
+ ******************************************************************************
+ * enum M4VIDEOEDITING_ClipProperties
+ * @brief This structure gathers the information related to an input file
+ ******************************************************************************
+*/
+typedef struct {
+
+ /**
+ * Common */
+ M4OSA_Bool bAnalysed; /**< Flag to know if the file has
+ been already analysed or not */
+ M4OSA_UInt8 Version[3]; /**< Version of the libraries used to
+ perform the clip analysis */
+ M4OSA_UInt32 uiClipDuration; /**< Clip duration (in ms) */
+ M4VIDEOEDITING_FileType FileType; /**< .3gp, .amr, .mp3 */
+ M4VIDEOEDITING_FtypBox ftyp; /**< 3gp 'ftyp' atom, major_brand =
+ 0 if not used */
+
+ /**
+ * Video */
+ M4VIDEOEDITING_VideoFormat VideoStreamType; /**< Format of the video stream */
+ M4OSA_UInt32 uiClipVideoDuration; /**< Video track duration (in ms) */
+ M4OSA_UInt32 uiVideoBitrate; /**< Video average bitrate (in bps)*/
+ M4OSA_UInt32 uiVideoMaxAuSize; /**< Maximum Access Unit size of the
+ video stream */
+ M4OSA_UInt32 uiVideoWidth; /**< Video frame width */
+ M4OSA_UInt32 uiVideoHeight; /**< Video frame height */
+ M4OSA_UInt32 uiVideoTimeScale; /**< Video time scale */
+ M4OSA_Float fAverageFrameRate; /**< Average frame rate of the video
+ stream */
+ M4OSA_Int32 uiVideoLevel; /**< video level*/
+ M4OSA_Int32 uiVideoProfile; /**< video profile */
+
+ M4OSA_Bool bMPEG4dataPartition; /**< MPEG-4 uses data partitioning */
+ M4OSA_Bool bMPEG4rvlc; /**< MPEG-4 uses RVLC tool */
+ M4OSA_Bool bMPEG4resynchMarker; /**< MPEG-4 stream uses Resynch
+ Marker */
+
+ /**
+ * Audio */
+ M4VIDEOEDITING_AudioFormat AudioStreamType; /**< Format of the audio stream */
+ M4OSA_UInt32 uiClipAudioDuration; /**< Audio track duration (in ms) */
+ M4OSA_UInt32 uiAudioBitrate; /**< Audio average bitrate (in bps) */
+ M4OSA_UInt32 uiAudioMaxAuSize; /**< Maximum Access Unit size of the
+ audio stream */
+ M4OSA_UInt32 uiNbChannels; /**< Number of channels
+ (1=mono, 2=stereo) */
+ M4OSA_UInt32 uiSamplingFrequency; /**< Sampling audio frequency
+ (8000 for amr, 16000 or more for aac) */
+ M4OSA_UInt32 uiExtendedSamplingFrequency; /**< Extended frequency for
+ AAC+, eAAC+ streams */
+ M4OSA_UInt32 uiDecodedPcmSize; /**< Size of the decoded PCM data */
+
+ /**
+ * Video editing compatibility chart */
+ M4OSA_Bool bVideoIsEditable; /**< Video stream can be decoded and
+ re-encoded */
+ M4OSA_Bool bAudioIsEditable; /**< Audio stream can be decoded and
+ re-encoded */
+ M4OSA_Bool bVideoIsCompatibleWithMasterClip; /**< Video properties match reference
+ clip properties */
+ M4OSA_Bool bAudioIsCompatibleWithMasterClip; /**< Audio properties match reference
+ clip properties */
+
+ /**
+ * Still Picture */
+ M4OSA_UInt32 uiStillPicWidth; /**< Image width */
+ M4OSA_UInt32 uiStillPicHeight; /**< Image height */
+ M4OSA_UInt32 uiClipAudioVolumePercentage;
+ M4OSA_Bool bSetImageData;
+
+ M4OSA_Int32 videoRotationDegrees; /**< Video rotation degree */
+
+} M4VIDEOEDITING_ClipProperties;
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* __M4_VIDEOEDITINGCOMMON_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/MonoTo2I_16.h b/libvideoeditor/vss/common/inc/MonoTo2I_16.h
new file mode 100755
index 0000000..74b1c8a
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/MonoTo2I_16.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MONOTO2I_16_H_
+#define _MONOTO2I_16_H_
+
+
+void MonoTo2I_16( const short *src,
+ short *dst,
+ short n);
+
+/**********************************************************************************/
+
+#endif /* _MONOTO2I_16_H_ */
+
+/**********************************************************************************/
+
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
new file mode 100755
index 0000000..13cac6d
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NXPSW_COMPILERSWITCHES_H
+#define NXPSW_COMPILERSWITCHES_H
+
+/* ----- Main features ----- */
+#include "NXPSW_CompilerSwitches_MCS.h" /* Transcoder */
+
+/* ----- Add-ons ----- */
+
+#endif /* NXPSW_COMPILERSWITCHES_H */
+
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
new file mode 100755
index 0000000..e1b62e1
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef NXPSW_COMPILERSWITCHES_MCS_H
+#define NXPSW_COMPILERSWITCHES_MCS_H
+
+ /***********/
+ /* READERS */
+ /***********/
+
+/* ----- AMR reader support ----- */
+#define M4VSS_SUPPORT_READER_AMR /**< [default] Support .amr files */
+
+/* ----- 3GPP reader support ----- */
+#define M4VSS_SUPPORT_READER_3GP /**< [default] Support .mp4, .3gp files */
+
+
+/* ----- MP3 reader support ----- */
+#define M4VSS_SUPPORT_READER_MP3 /**< [default] Support .mp3 files */
+
+/* ----- RAW reader support ----- */
+#define M4VSS_SUPPORT_READER_PCM /**< [default] Support .pcm files */
+
+
+ /************/
+ /* DECODERS */
+ /************/
+
+/* ----- AMR NB decoder support ----- */
+#define M4VSS_SUPPORT_AUDEC_AMRNB /**< [default] Support AMR NB streams */
+
+/* ----- AAC decoder support ----- */
+#define M4VSS_SUPPORT_AUDEC_AAC /**< [default] Support AAC, AAC+ and eAAC+ streams */
+#define M4VSS_SUPPORT_VIDEC_NULL
+
+/* ----- MP4/H263 video decoder support ----- */
+#define M4VSS_SUPPORT_VIDEC_3GP /**< [default] Support mpeg4 and H263 decoders */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+#define GET_DECODER_CONFIG_INFO
+#endif
+
+#define M4VSS_SUPPORT_VIDEO_AVC /**< [default] Support H264 decoders */
+
+/* ----- MP3 decoder support----- */
+#define M4VSS_SUPPORT_AUDEC_MP3 /**< [default] Support MP3 decoders */
+
+
+/* ----- NULL decoder support----- */
+#define M4VSS_SUPPORT_AUDEC_NULL /** [default] Support PCM reading */
+
+
+ /***********/
+ /* WRITERS */
+ /***********/
+
+/* ----- 3gp writer ----- */
+#define M4VSS_SUPPORT_WRITER_3GPP /**< [default] support encapsulating in 3gp format
+ {amr,aac} x {mpeg4,h263} */
+
+
+
+
+
+ /************/
+ /* ENCODERS */
+ /************/
+
+/* ----- mpeg4 & h263 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_MPEG4 /**< [default] support encoding in mpeg4 and
+ h263 format {yuv,rgb} */
+
+/* ----- h264 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AVC
+
+/* ----- amr encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AMR /**< [default] support encoding in amr 12.2 format {amr,wav} */
+
+/* ----- aac encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AAC /**< [default] support encoding in aac format {amr,wav} */
+
+
+/* ----- mp3 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_MP3 /**< [default] support encoding in mp3 format {mp3} */
+
+ /************/
+ /* FEATURES */
+ /************/
+
+/* ----- VSS3GPP & xVSS ----- */
+#define M4VSS_SUPPORT_EXTENDED_FEATURES /**< [default] if defined, implementation is xVSS else
+ it is VSS3GPP */
+
+/* ----- SPS ----- */
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+
+//#define M4SPS_GIF_NOT_SUPPORTED /**< [option] do not support GIF format in still picture api */
+//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
+//#define M4SPS_PNG_NOT_SUPPORTED /**< [option] do not support PNG format in still picture api */
+#define M4SPS_WBMP_NOT_SUPPORTED /**< [option] do not support WBMP format in still picture api */
+#define M4SPS_BGR565_COLOR_OUTPUT /**< [option] output in still picture api is BGR565
+ (default = BGR24) */
+
+#else
+
+#define M4SPS_GIF_NOT_SUPPORTED /**< [option] do not support GIF format in still picture api */
+//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
+#define M4SPS_PNG_NOT_SUPPORTED /**< [option] do not support PNG format in still picture api */
+#define M4SPS_WBMP_NOT_SUPPORTED /**< [option] do not support WBMP format in still picture api */
+//#define M4SPS_BGR565_COLOR_OUTPUT /**< [option] output in still picture api is BGR565
+// (default = BGR24) */
+
+#endif
+
+#define M4VSS_ENABLE_EXTERNAL_DECODERS
+
+#define M4VSS_SUPPORT_OMX_CODECS
+
+#endif /* NXPSW_COMPILERSWITCHES_MCS_H */
+
diff --git a/libvideoeditor/vss/common/inc/SSRC.h b/libvideoeditor/vss/common/inc/SSRC.h
new file mode 100755
index 0000000..2b1cfcf
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/SSRC.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/****************************************************************************************/
+/* */
+/* Project:: */
+/* %name: SSRC.h % */
+/* */
+/****************************************************************************************/
+
+/*
+ The input and output blocks of the SRC are by default blocks of 40 ms. This means that
+ the following default block sizes are used:
+
+ Fs Default Block size
+ ----- ----------
+ 8000 320
+ 11025 441
+ 12000 480
+ 16000 640
+ 22050 882
+ 24000 960
+ 32000 1280
+ 44100 1764
+ 48000 1920
+
+ An API is provided to change the default block size into any multiple of the minimal
+ block size.
+
+ All the sampling rates above are supported as input and as output sampling rate
+*/
+
+#ifndef __SSRC_H__
+#define __SSRC_H__
+
+/****************************************************************************************
+ INCLUDES
+*****************************************************************************************/
+
+#include "LVM_Types.h"
+
+/****************************************************************************************
+ DEFINITIONS
+*****************************************************************************************/
+
+#define SSRC_INSTANCE_SIZE 548
+#define SSRC_INSTANCE_ALIGNMENT 4
+#define SSRC_SCRATCH_ALIGNMENT 4
+
+/****************************************************************************************
+ TYPE DEFINITIONS
+*****************************************************************************************/
+
+/* Status return values */
+typedef enum
+{
+ SSRC_OK = 0, /* Successful return from a routine */
+ SSRC_INVALID_FS = 1, /* The input or the output sampling rate is
+ invalid */
+ SSRC_INVALID_NR_CHANNELS = 2, /* The number of channels is not equal to mono
+ or stereo */
+ SSRC_NULL_POINTER = 3, /* One of the input pointers is NULL */
+ SSRC_WRONG_NR_SAMPLES = 4, /* Invalid number of samples */
+ SSRC_ALLINGMENT_ERROR = 5, /* The instance memory or the scratch memory
+ is not alligned */
+ SSRC_INVALID_MODE = 6, /* A wrong value has been used for the mode
+ parameter */
+ SSRC_INVALID_VALUE = 7, /* An invalid (out of range) value has been
+ used for one of the parameters */
+ LVXXX_RETURNSTATUS_DUMMY = LVM_MAXENUM
+} SSRC_ReturnStatus_en;
+
+/* Instance memory */
+typedef struct
+{
+ LVM_INT32 Storage [ SSRC_INSTANCE_SIZE/4 ];
+} SSRC_Instance_t;
+
+/* Scratch memory */
+typedef LVM_INT32 SSRC_Scratch_t;
+
+/* Nuber of samples mode */
+typedef enum
+{
+ SSRC_NR_SAMPLES_DEFAULT = 0,
+ SSRC_NR_SAMPLES_MIN = 1,
+ SSRC_NR_SAMPLES_DUMMY = LVM_MAXENUM
+} SSRC_NR_SAMPLES_MODE_en;
+
+/* Instance parameters */
+typedef struct
+{
+ LVM_Fs_en SSRC_Fs_In;
+ LVM_Fs_en SSRC_Fs_Out;
+ LVM_Format_en SSRC_NrOfChannels;
+ LVM_INT16 NrSamplesIn;
+ LVM_INT16 NrSamplesOut;
+} SSRC_Params_t;
+
+
+/****************************************************************************************
+ FUNCTION PROTOTYPES
+*****************************************************************************************/
+
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: SSRC_GetNrSamples */
+/* */
+/* DESCRIPTION: */
+/* This function retrieves the number of samples (or sample pairs for stereo) to be */
+/* used as input and as output of the SSRC module. */
+/* */
+/* PARAMETERS: */
+/* Mode There are two modes: */
+/* - SSRC_NR_SAMPELS_DEFAULT. In this mode, the function */
+/* will return the number of samples for 40 ms blocks */
+/* - SSRC_NR_SAMPELS_MIN will return the minimal number */
+/* of samples that is supported for this conversion */
+/* ratio. Each integer multiple of this ratio will */
+/* be accepted by the SSRC_Init function */
+/* */
+/* pSSRC_Params pointer to the instance parameters */
+/* */
+/* RETURNS: */
+/* SSRC_OK Succeeded */
+/* SSRC_INVALID_FS When the requested input or output sampling rates */
+/* are invalid. */
+/* SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO */
+/* or LVM_STEREO */
+/* SSRC_NULL_POINTER When pSSRC_Params is a NULL pointer */
+/* SSRC_INVALID_MODE When Mode is not a valid setting */
+/* */
+/* */
+/* NOTES: */
+/* */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_GetNrSamples( SSRC_NR_SAMPLES_MODE_en Mode,
+ SSRC_Params_t* pSSRC_Params );
+
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: SSRC_GetScratchSize */
+/* */
+/* DESCRIPTION: */
+/* This function retrieves the scratch size for a given conversion ratio and */
+/* for given buffer sizes at the input and at the output */
+/* */
+/* PARAMETERS: */
+/* pSSRC_Params pointer to the instance parameters */
+/* pScratchSize pointer to the scratch size. The SSRC_GetScratchSize */
+/* function will fill in the correct value (in bytes). */
+/* */
+/* RETURNS: */
+/* SSRC_OK when the function call succeeds */
+/* SSRC_INVALID_FS When the requested input or output sampling rates */
+/* are invalid. */
+/* SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO */
+/* or LVM_STEREO */
+/* SSRC_NULL_POINTER When any of the input pointers is a NULL pointer */
+/* SSRC_WRONG_NR_SAMPLES When the number of samples on the input or on the output */
+/* are incorrect */
+/* */
+/* NOTES: */
+/* */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_GetScratchSize( SSRC_Params_t* pSSRC_Params,
+ LVM_INT32* pScratchSize );
+
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: SSRC_Init */
+/* */
+/* DESCRIPTION: */
+/* This function is used to initialize the SSRC module instance. */
+/* */
+/* PARAMETERS: */
+/* pSSRC_Instance Instance pointer */
+/* */
+/* pSSRC_Scratch pointer to the scratch memory */
+/* pSSRC_Params pointer to the instance parameters */
+/* pInputInScratch, pointer to a location in the scratch memory that can be */
+/* used to store the input samples (e.g. to save memory) */
+/* pOutputInScratch pointer to a location in the scratch memory that can be */
+/* used to store the output samples (e.g. to save memory) */
+/* */
+/* RETURNS: */
+/* SSRC_OK Succeeded */
+/* SSRC_INVALID_FS When the requested input or output sampling rates */
+/* are invalid. */
+/* SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO */
+/* or LVM_STEREO */
+/* SSRC_WRONG_NR_SAMPLES When the number of samples on the input or the output */
+/* are incorrect */
+/* SSRC_NULL_POINTER When any of the input pointers is a NULL pointer */
+/* SSRC_ALLINGMENT_ERROR When the instance memory or the scratch memory is not */
+/* 4 bytes alligned */
+/* */
+/* NOTES: */
+/* 1. The init function will clear the internal state */
+/* */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_Init( SSRC_Instance_t* pSSRC_Instance,
+ SSRC_Scratch_t* pSSRC_Scratch,
+ SSRC_Params_t* pSSRC_Params,
+ LVM_INT16** ppInputInScratch,
+ LVM_INT16** ppOutputInScratch);
+
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: SSRC_SetGains */
+/* */
+/* DESCRIPTION: */
+/* This function sets headroom gain and the post gain of the SSRC */
+/* */
+/* PARAMETERS: */
+/* bHeadroomGainEnabled parameter to enable or disable the headroom gain of the */
+/* SSRC. The default value is LVM_MODE_ON. LVM_MODE_OFF */
+/* can be used in case it can be guaranteed that the input */
+/* level is below -6dB in all cases (the default headroom */
+/* is -6 dB) */
+/* */
+/* bOutputGainEnabled parameter to enable or disable the output gain. The */
+/* default value is LVM_MODE_ON */
+/* */
+/* OutputGain the value of the output gain. The output gain is a linear */
+/* gain value. 0x7FFF is equal to +6 dB and 0x0000 corresponds */
+/* to -inf dB. By default, a 3dB gain is applied, resulting */
+/* in an overall gain of -3dB (-6dB headroom + 3dB output gain)*/
+/* */
+/* RETURNS: */
+/* SSRC_OK Succeeded */
+/* SSRC_NULL_POINTER When pSSRC_Instance is a NULL pointer */
+/* SSRC_INVALID_MODE Wrong value used for the bHeadroomGainEnabled or the */
+/* bOutputGainEnabled parameters. */
+/* SSRC_INVALID_VALUE When OutputGain is out to the range [0;32767] */
+/* */
+/* NOTES: */
+/* 1. The SSRC_SetGains function is an optional function that should only be used */
+/* in rare cases. Preferably, use the default settings. */
+/* */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_SetGains( SSRC_Instance_t* pSSRC_Instance,
+ LVM_Mode_en bHeadroomGainEnabled,
+ LVM_Mode_en bOutputGainEnabled,
+ LVM_INT16 OutputGain );
+
+
+/****************************************************************************************/
+/* */
+/* FUNCTION: SSRC_Process */
+/* */
+/* DESCRIPTION: */
+/* Process function for the SSRC module. */
+/* */
+/* PARAMETERS: */
+/* pSSRC_Instance Instance pointer */
+/* pSSRC_AudioIn Pointer to the input data */
+/* pSSRC_AudioOut Pointer to the output data */
+/* */
+/* RETURNS: */
+/* SSRC_OK Succeeded */
+/* SSRC_NULL_POINTER When one of pSSRC_Instance, pSSRC_AudioIn or pSSRC_AudioOut */
+/* is NULL */
+/* */
+/* NOTES: */
+/* */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_Process( SSRC_Instance_t* pSSRC_Instance,
+ LVM_INT16* pSSRC_AudioIn,
+ LVM_INT16* pSSRC_AudioOut);
+
+/****************************************************************************************/
+
+#endif /* __SSRC_H__ */
diff --git a/libvideoeditor/vss/common/inc/VideoEditorResampler.h b/libvideoeditor/vss/common/inc/VideoEditorResampler.h
new file mode 100755
index 0000000..b8497d3
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/VideoEditorResampler.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEOEDITORRESAMPLER_H
+#define VIDEOEDITORRESAMPLER_H
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "M4OSA_Types.h"
+
+M4OSA_Context LVAudioResamplerCreate(M4OSA_Int32 bitDepth, M4OSA_Int32 inChannelCount,
+ M4OSA_Int32 sampleRate, M4OSA_Int32 quality);
+void LVAudiosetSampleRate(M4OSA_Context resamplerContext,M4OSA_Int32 inSampleRate);
+void LVAudiosetVolume(M4OSA_Context resamplerContext, M4OSA_Int16 left, M4OSA_Int16 right) ;
+void LVAudioresample_LowQuality(M4OSA_Int16* out, M4OSA_Int16* input,
+ M4OSA_Int32 outFrameCount, M4OSA_Context resamplerContext);
+void LVDestroy(M4OSA_Context resamplerContext);
+
+void MonoTo2I_16( const M4OSA_Int16 *src,
+ M4OSA_Int16 *dst,
+ M4OSA_Int16 n);
+
+void From2iToMono_16( const M4OSA_Int16 *src,
+ M4OSA_Int16 *dst,
+ M4OSA_Int16 n);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* VIDEOEDITORRESAMPLER_H */
+
+
diff --git a/libvideoeditor/vss/common/inc/marker.h b/libvideoeditor/vss/common/inc/marker.h
new file mode 100755
index 0000000..83cade0
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/marker.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARKER_H
+#define MARKER_H
+
+#define ADD_CODE_MARKER_FUN(m_condition) \
+ if ( !(m_condition) ) \
+ { \
+ __asm__ volatile ( \
+ ".word 0x21614062\n\t" /* '!a@b' */ \
+ ".word 0x47712543\n\t" /* 'Gq%C' */ \
+ ".word 0x5F5F5F43\n\t" /* '___C' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x245F5F5F" /* '$___' */ \
+ ); \
+ }
+
+#define ADD_TEXT_MARKER_FUN(m_condition) \
+ if ( !(m_condition) ) \
+ { \
+ __asm__ volatile ( \
+ ".word 0x21614062\n\t" /* '!a@b' */ \
+ ".word 0x47712543\n\t" /* 'Gq%C' */ \
+ ".word 0x5F5F5F54\n\t" /* '___T' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x5F5F5F5F\n\t" /* '____' */ \
+ ".word 0x245F5F5F" /* '$___' */ \
+ ); \
+ }
+
+#endif
diff --git a/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h b/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
new file mode 100755
index 0000000..e23c02a
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4EXIFC_CommonAPI.h
+ * @brief EXIF common data header
+ * @note The types, structures and macros defined in this file allow reading
+ * and writing EXIF JPEG images compliant spec EXIF 2.2
+ ******************************************************************************
+*/
+
+
+#ifndef __M4_EXIF_COMMON_API_H__
+#define __M4_EXIF_COMMON_API_H__
+
+#include "M4TOOL_VersionInfo.h"
+#include "M4Common_types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CoreID.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ ************************************************************************
+ * type M4EXIFC_Context
+ ************************************************************************
+*/
+typedef M4OSA_Void* M4EXIFC_Context;
+
+/**
+ ******************************************************************************
+ * Errors & Warnings
+ ******************************************************************************
+*/
+
+#define M4EXIFC_NO_ERR 0x00000000 /**< invalid parameter */
+#define M4EXIFC_ERR_PARAMETER 0x00000001 /**< invalid parameter */
+#define M4EXIFC_ERR_ALLOC 0x00000002 /**< allocation error */
+#define M4EXIFC_ERR_BAD_CONTEXT 0x00000003 /**< invalid context */
+#define M4EXIFC_ERR_NOT_COMPLIANT 0x00000004 /**< the image in buffer is not
+ JPEG compliant */
+#define M4EXIFC_ERR_NO_APP_FOUND 0x00000005 /**< the JPEG image does not contain any APP1
+ Exif 2.2 compliant */
+#define M4EXIFC_WAR_NO_THUMBNAIL 0x00000006 /**< the Exif part does not contain any
+ thumbnail */
+#define M4EXIFC_ERR_APP_TRUNCATED 0x00000007 /**< The APP1 section in input buffer is
+ not complete */
+
+
+/**
+ ******************************************************************************
+ * structure M4EXIFC_BasicTags
+ * @brief This structure stores the basic tags values.
+ * @note This Exif reader focuses on a set of "Entry Tags".
+ * This structure contains the corresponding "Entry Values" of these tags.
+ * M4EXIFC_Char* fields of structure are Null terminated Strings.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Int32 width; /**< image width in pixels */
+ M4OSA_Int32 height; /**< image height in pixels */
+ M4OSA_Char *creationDateTime; /**< date and time original image was generated */
+ M4OSA_Char *lastChangeDateTime; /**< file change date and time */
+ M4OSA_Char *description; /**< image title */
+ M4OSA_Char *make; /**< manufacturer of image input equipment */
+ M4OSA_Char *model; /**< model of image input equipment */
+ M4OSA_Char *software; /**< software used */
+ M4OSA_Char *artist; /**< person who created the image */
+ M4OSA_Char *copyright; /**< copyright holder */
+ M4COMMON_Orientation orientation; /**< orientation of image */
+ M4OSA_Int32 thumbnailSize; /**< size of the thumbnail */
+ M4OSA_UInt8 *thumbnailImg; /**< pointer to the thumbnail in main image buffer*/
+ M4OSA_Char *latitudeRef; /**< latitude reference */
+ M4COMMON_Location latitude; /**< latitude */
+ M4OSA_Char *longitudeRef; /**< longitude reference */
+ M4COMMON_Location longitude; /**< longitude */
+
+} M4EXIFC_BasicTags;
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4EXIFC_getVersion (M4_VersionInfo *pVersion)
+ * @brief get the version numbers of the exif library.
+ * @note This function retrieves the version numbers in a structure.
+ * @param pVersion: (OUT) the structure containing version numbers
+ * @return M4NO_ERROR: there is no error
+ * @return M4EXIFC_ERR_PARAMETER: (Debug only) the parameter is M4EXIFC_NULL.
+ ******************************************************************************
+*/
+M4OSA_ERR M4EXIFC_getVersion (M4_VersionInfo *pVersion);
+
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus*/
+#endif /* __M4_EXIF_COMMON_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_API.h b/libvideoeditor/vss/inc/M4PTO3GPP_API.h
new file mode 100755
index 0000000..4aa20d2
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_API.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4PTO3GPP_API.h
+ * @brief The Pictures to 3GPP Converter.
+ * @note M4PTO3GPP produces 3GPP compliant audio/video files
+ * from an AMR NB audio file and raw pictures into a MPEG-4/h263 3GPP file.
+ ******************************************************************************
+ */
+
+#ifndef __M4PTO3GPP_API_H__
+#define __M4PTO3GPP_API_H__
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ * Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Definitions of M4VIFI_ImagePlane */
+#include "M4VIFI_FiltersAPI.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Public type of the M4PTO3GPP context */
+typedef M4OSA_Void* M4PTO3GPP_Context;
+
+
+/**
+ ******************************************************************************
+ * enum M4PTO3GPP_ReplaceAudioMode
+ * @brief This enumeration defines the way the audio is managed if it is shorter than the video
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4PTO3GPP_kAudioPaddingMode_None = 0, /**< Audio track is kept shorter than the video track*/
+ M4PTO3GPP_kAudioPaddingMode_Silence, /**< If audio is shorter, silence is added at the end*/
+ M4PTO3GPP_kAudioPaddingMode_Loop /**< If audio is shorter, loop back to the beginning
+ when the whole track has been processed */
+} M4PTO3GPP_AudioPaddingMode;
+
+
+/**
+ ******************************************************************************
+ * struct M4PTO3GPP_OutputFileMaxSize
+ * @brief Defines the maximum size of the 3GPP file produced by the PTO3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4PTO3GPP_k50_KB, /**< Output 3GPP file size is limited to 50 Kbytes */
+ M4PTO3GPP_k75_KB, /**< Output 3GPP file size is limited to 75 Kbytes */
+ M4PTO3GPP_k100_KB, /**< Output 3GPP file size is limited to 100 Kbytes */
+ M4PTO3GPP_k150_KB, /**< Output 3GPP file size is limited to 150 Kbytes */
+ M4PTO3GPP_k200_KB, /**< Output 3GPP file size is limited to 200 Kbytes */
+ M4PTO3GPP_k300_KB, /**< Output 3GPP file size is limited to 300 Kbytes */
+ M4PTO3GPP_k400_KB, /**< Output 3GPP file size is limited to 400 Kbytes */
+ M4PTO3GPP_k500_KB, /**< Output 3GPP file size is limited to 500 Kbytes */
+ M4PTO3GPP_kUNLIMITED=-1 /**< Output 3GPP file size is not limited */
+} M4PTO3GPP_OutputFileMaxSize;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
+ * M4VIFI_ImagePlane* pImagePlanes, M4OSA_Double* pPictureDuration);
+ * @brief The integrator must implement a function following this prototype.
+ * Its goal is to feed the PTO3GPP with YUV420 pictures.
+ *
+ * @note This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
+ * @param pContext (IN) The integrator own context
+ * @param pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
+ * @param pPictureDuration(OUT) Duration of the returned picture
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null (bebug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
+ M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_Double* pPictureDuration);
+
+
+/**
+ ******************************************************************************
+ * struct M4PTO3GPP_Params
+ * @brief M4PTO3GPP parameters definition
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**< Output video compression format, H263 or MPEG4 */
+ M4VIDEOEDITING_VideoFormat OutputVideoFormat;
+ /**< Output frame size : SQCIF to VGA*/
+ M4VIDEOEDITING_VideoFrameSize OutputVideoFrameSize;
+ /**< Targeted Output bit-rate, see enum*/
+ M4VIDEOEDITING_Bitrate OutputVideoBitrate;
+ /**< Maximum size of the output 3GPP file, see enum */
+ M4PTO3GPP_OutputFileMaxSize OutputFileMaxSize;
+ /**< Callback function to be called by the PTO3GPP to get the input pictures*/
+ M4PTO3GPP_PictureCallbackFct* pPictureCallbackFct;
+ /**< Context to be given as third argument of the picture callback function call*/
+ M4OSA_Void* pPictureCallbackCtxt;
+ /**< File descriptor of the input audio track file */
+ M4OSA_Void* pInputAudioTrackFile;
+ /**< Format of the audio file */
+ M4VIDEOEDITING_FileType AudioFileFormat;
+ /**< Type of processing to apply when audio is shorter than video*/
+ M4PTO3GPP_AudioPaddingMode AudioPaddingMode;
+ /**< File descriptor of the output 3GPP file */
+ M4OSA_Void* pOutput3gppFile;
+ /**< File descriptor of the temporary file to store metadata ("moov.bin") */
+ M4OSA_Void* pTemporaryFile;
+ /**< Number of input YUV frames to encode */
+ M4OSA_UInt32 NbVideoFrames;
+ M4OSA_Int32 videoProfile;
+ M4OSA_Int32 videoLevel;
+} M4PTO3GPP_Params;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+ * @brief Get the M4PTO3GPP version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
+ * @brief Initializes the M4PTO3GPP (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the M4PTO3GPP context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL
+ * @return M4ERR_ALLOC: The context structure could not be allocated
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+ * @brief Set the M4PTO3GPP input and output files.
+ * @note It opens the input file, but the output file may not be created yet.
+ * @param pContext (IN) M4PTO3GPP context
+ * @param pParams (IN) Pointer to the parameters for the PTO3GPP.
+ * @note The pointed structure can be de-allocated after this function returns because
+ * it is internally copied by the PTO3GPP
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state
+ * for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
+ * size parameter is incompatible with H263 encoding
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT
+ * The output video format parameter is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE
+ * The output video bit-rate parameter is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE
+ * The output video frame size parameter is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE
+ * The output file size parameter is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING
+ * The output audio padding parameter is undefined
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+ * @brief Perform one step of trancoding.
+ * @note
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state
+ * for this function to be called
+ * @return M4PTO3GPP_WAR_END_OF_PROCESSING: Encoding completed
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+ * @brief Finish the M4PTO3GPP transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state
+ * for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+ * @brief Free all resources used by the M4PTO3GPP.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4PTO3GPP_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
new file mode 100755
index 0000000..57bd54f
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4PTO3GPP_ErrorCodes.h
+ * @brief Picture to 3gpp Service error definitions.
+ * @note
+ ******************************************************************************
+ */
+
+#ifndef __M4PTO3GPP_ErrorCodes_H__
+#define __M4PTO3GPP_ErrorCodes_H__
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/**
+ * The output video format parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0001 )
+/**
+ * The output video frame size parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE \
+ M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0002 )
+/**
+ * The output video bit-rate parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE \
+ M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0003 )
+/**
+ * The output video frame size parameter is incompatible with H263 encoding */
+#define ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 \
+ M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0004 )
+/**
+ * The file size is undefined */
+#define ERR_PTO3GPP_INVALID_FILE_SIZE M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0005 )
+/**
+ * The input audio file contains a track format not handled by PTO3GPP */
+#define ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE \
+ M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0006 )
+/**
+ * The output video format parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0007 )
+
+/**
+ * The AMR decoder initialization failed */
+#define ERR_PTO3GPP_AMR_DECODER_INIT_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0020 )
+/**
+ * The AMR decoder failed */
+#define ERR_PTO3GPP_AMR_DECODE_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0021 )
+/**
+ * The AMR decoder cleanup failed */
+#define ERR_PTO3GPP_AMR_DECODER_DESTROY_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0022 )
+
+/**
+ * The video encoder initialization failed */
+#define ERR_PTO3GPP_VIDEO_ENCODER_INIT_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0023 )
+/**
+ * The video encoder decoding failed */
+#define ERR_PTO3GPP_VIDEO_ENCODE_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0024 )
+/**
+ * The video encoder cleanup failed */
+#define ERR_PTO3GPP_VIDEO_ENCODER_DESTROY_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0025 )
+
+/**
+ * The output file size parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0026 )
+
+/**
+ * The Encoding is completed */
+#define M4PTO3GPP_WAR_END_OF_PROCESSING M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0027 )
+
+/**
+ * The Encoding is completed */
+#define M4PTO3GPP_WAR_LAST_PICTURE M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0028 )
+
+/**
+ * The output audio padding parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0029 )
+
+/**
+ * The video encoder encountered an Acces Unit error: very probably a file write error */
+#define ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x002A )
+
+#endif /* __M4PTO3GPP_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
new file mode 100755
index 0000000..592e566
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4PTO3GPP_InternalTypes.h
+ * @brief Picture to 3gpp Service internal definitions
+ * @note This file contains all enum and types not visible to the external world.
+ ******************************************************************************
+ */
+
+
+#ifndef __M4PTO3GPP_INTERNALTYPES_H__
+#define __M4PTO3GPP_INTERNALTYPES_H__
+
+#define M4PTO3GPP_VERSION_MAJOR 3
+#define M4PTO3GPP_VERSION_MINOR 0
+#define M4PTO3GPP_VERSION_REVISION 6
+
+/**
+ * M4PTO3GPP public API and types */
+#include "M4PTO3GPP_API.h"
+#include "M4_Utils.h"
+
+/**
+ * Internally used modules */
+
+#include "M4WRITER_common.h" /* Write 3GPP file */
+#include "M4READER_Common.h" /* Read AMR file */
+#include "M4ENCODER_common.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ ******************************************************************************
+ * enum M4PTO3GPP_States
+ * @brief Main state machine of the M4PTO3GPP.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4PTO3GPP_kState_CREATED = 0, /**< M4PTO3GPP_Init has been called */
+ M4PTO3GPP_kState_OPENED = 1, /**< M4PTO3GPP_Open has been called */
+ M4PTO3GPP_kState_READY = 2, /**< Step can be called */
+ M4PTO3GPP_kState_FINISHED = 3, /**< Transcoding is finished */
+ M4PTO3GPP_kState_CLOSED = 4 /**< Output file has been created */
+}
+M4PTO3GPP_States;
+
+/**
+ ******************************************************************************
+ * enum M4PTO3GPP_StreamState
+ * @brief State of a media stream encoding (audio or video).
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4PTO3GPP_kStreamState_NOSTREAM = 0, /**< No stream present */
+ M4PTO3GPP_kStreamState_STARTED = 1, /**< The stream encoding is in progress */
+ M4PTO3GPP_kStreamState_FINISHED = 2 /**< The stream has finished encoding */
+}
+M4PTO3GPP_StreamState;
+
+/*
+ * Definition of max AU size */
+#define M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO 0.8F /**< Max AU size will be 0.8 times the
+ YUV4:2:0 frame size */
+#define M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO 1.2F /**< Max chunk size will be 1.2 times
+ the max AU size */
+#define M4PTO3GPP_AUDIO_MAX_AU_SIZE 1000 /**< AAC max AU size seems to be
+ about 850 bytes */
+#define M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE 5000
+
+/**
+ ******************************************************************************
+ * enum anonymous enum
+ * @brief enum to keep track of the encoder state
+ ******************************************************************************
+ */
+enum
+{
+ M4PTO3GPP_kNoEncoder,
+ M4PTO3GPP_kEncoderClosed,
+ M4PTO3GPP_kEncoderStopped,
+ M4PTO3GPP_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure M4PTO3GPP_InternalContext
+ * @brief This structure defines the M4PTO3GPP context (private)
+ * @note This structure is used for all M4PTO3GPP calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**
+ * M4PTO3GPP main variables */
+ M4PTO3GPP_States m_State; /**< M4PTO3GPP internal state */
+ M4PTO3GPP_Params m_Params; /**< M4PTO3GPP parameters, set by the user */
+ M4PTO3GPP_StreamState m_VideoState; /**< State of the video encoding */
+ M4PTO3GPP_StreamState m_AudioState; /**< State of the audio encoding */
+
+ /**
+ * OSAL file read/write functions */
+ M4OSA_FileReadPointer* pOsalFileRead; /**< OSAL file read functions,
+ to be provided by user */
+ M4OSA_FileWriterPointer* pOsalFileWrite; /**< OSAL file write functions,
+ to be provided by user */
+
+ /**
+ * Reader stuff */
+ M4_AccessUnit* m_pReaderAudioAU; /**< Read audio access unit */
+ M4_AudioStreamHandler* m_pReaderAudioStream;/**< Description of the read audio stream */
+
+ /**
+ * Writer stuff */
+ M4SYS_AccessUnit m_WriterVideoAU; /**< Written video access unit */
+ M4SYS_AccessUnit m_WriterAudioAU; /**< Written audio access unit */
+ M4ENCODER_Header* m_pEncoderHeader; /**< Sequence header returned by the
+ encoder at encoder create (if any) */
+ M4SYS_StreamDescription* m_pWriterVideoStream; /**< Description of the written
+ video stream */
+ M4SYS_StreamDescription* m_pWriterAudioStream; /**< Description of the written
+ audio stream */
+ M4WRITER_StreamVideoInfos* m_pWriterVideoStreamInfo; /**< Video properties of the written
+ video stream */
+ M4WRITER_StreamAudioInfos* m_pWriterAudioStreamInfo; /**< Audio properties of the written
+ audio stream */
+
+ /**
+ * Contexts of the used modules */
+ M4OSA_Void* m_pAudioReaderContext; /**< Context of the audio reader module*/
+ M4OSA_Void* m_p3gpWriterContext; /**< Context of the 3GP writer module */
+ M4OSA_Void* m_pMp4EncoderContext; /**< Mp4 encoder context */
+ M4OSA_UInt32 m_eEncoderState;
+
+ /**
+ * Reader Interfaces */
+ M4READER_GlobalInterface* m_pReaderGlobInt; /**< Reader common interface, global part */
+ M4READER_DataInterface* m_pReaderDataInt; /**< Reader common interface, data part */
+
+ /**
+ * Writer Interfaces */
+ M4WRITER_GlobalInterface* m_pWriterGlobInt; /**< Writer common interface, global part */
+ M4WRITER_DataInterface* m_pWriterDataInt; /**< Writer common interface, data part */
+
+ /**
+ * Encoder Interfaces */
+ M4ENCODER_GlobalInterface* m_pEncoderInt; /**< Encoder common interface */
+ M4OSA_Void* m_pEncoderExternalAPI;
+ M4OSA_Void* m_pEncoderUserData;
+
+ /**
+ * */
+ M4VIFI_ImagePlane* pSavedPlane;
+ M4OSA_UInt32 uiSavedDuration;
+
+ /**
+ * Video rate control stuff */
+ M4_MediaTime m_dLastVideoRegulCts; /**< Last time (CTS) the video bitrate
+ regulation has been called */
+ M4_MediaTime m_mtCts; /**< Current video cts */
+ M4_MediaTime m_mtNextCts; /**< Next video CTS to transcode */
+ M4_MediaTime m_mtAudioCts; /**< Current audio cts */
+ M4_MediaTime m_AudioOffSet; /**< Audio Offset to add to the cts in loop mode*/
+ M4_MediaTime m_PrevAudioCts; /**< Previous audio cts for AAC looping */
+ M4_MediaTime m_DeltaAudioCts; /**< Delta audio cts for AAC looping */
+ M4OSA_UInt32 m_CurrentFileSize; /**< Current Output file size */
+ M4OSA_UInt32 m_MaxFileSize; /**< Max Output file size */
+ M4OSA_Bool m_IsLastPicture; /**< A boolean that signals to the encoder that
+ this is the last frame to be encoded*/
+ M4OSA_Bool m_bLastInternalCallBack;
+ M4OSA_UInt32 m_NbCurrentFrame; /**< Index of the current YUV frame encoded */
+
+ /**
+ * Audio padding mode */
+ M4OSA_Bool m_bAudioPaddingSilence; /**< A boolean that signals that audio
+ AU will be padded by silence */
+} M4PTO3GPP_InternalContext;
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ * M4VIFI_ImagePlane* pPlaneOut)
+ * @brief Call an external callback to get the picture to encode
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the M4PTO3GPP
+ * internal context in our case
+ * @param pPlaneIn (IN) Contains the image
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the
+ * output YUV420 image read with the m_pPictureCallbackFct
+ * @return M4NO_ERROR: No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4PTO3GPP_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_API.h
new file mode 100755
index 0000000..0bb7141
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_API.h
@@ -0,0 +1,819 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_API_H__
+#define __M4VSS3GPP_API_H__
+
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_API.h
+ * @brief Video Studio Service 3GPP public API.
+ * @note VSS allows editing 3GPP files.
+ * It is a straightforward and fully synchronous API.
+ ******************************************************************************
+ */
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ * Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Image planes definition */
+#include "M4VIFI_FiltersAPI.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+#include "M4ENCODER_AudioCommon.h"
+#include "M4AD_Common.h"
+#include "M4DA_Types.h"
+
+/**
+ * Extended API (xVSS) */
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+#include "M4VSS3GPP_Extended_API.h"
+#endif
+
+//#include "M4VD_HW_API.h"
+//#include "M4VE_API.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ * Edition Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+
+/**
+ * Public type of the VSS edit context */
+typedef M4OSA_Void* M4VSS3GPP_EditContext;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_VideoEffectType
+ * @brief This enumeration defines the video effect types of the VSS3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4VSS3GPP_kVideoEffectType_None = 0, /**< No video effect */
+ M4VSS3GPP_kVideoEffectType_FadeFromBlack = 8, /**< Intended for begin effect */
+ M4VSS3GPP_kVideoEffectType_FadeToBlack = 16, /**< Intended for end effect */
+ M4VSS3GPP_kVideoEffectType_External = 256 /**< External effect function is used */
+ /* reserved 256 + n */ /**< External effect number n */
+
+} M4VSS3GPP_VideoEffectType;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_AudioEffectType
+ * @brief This enumeration defines the audio effect types of the VSS3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4VSS3GPP_kAudioEffectType_None = 0,
+ M4VSS3GPP_kAudioEffectType_FadeIn = 8, /**< Intended for begin effect */
+ M4VSS3GPP_kAudioEffectType_FadeOut = 16 /**< Intended for end effect */
+
+} M4VSS3GPP_AudioEffectType;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_VideoTransitionType
+ * @brief This enumeration defines the video effect that can be applied during a transition.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4VSS3GPP_kVideoTransitionType_None = 0,
+ M4VSS3GPP_kVideoTransitionType_CrossFade = 1,
+ M4VSS3GPP_kVideoTransitionType_External = 256
+ /* reserved 256 + n */ /**< External transition number n */
+
+} M4VSS3GPP_VideoTransitionType;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_AudioTransitionType
+ * @brief This enumeration defines the audio effect that can be applied during a transition.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4VSS3GPP_kAudioTransitionType_None = 0,
+ M4VSS3GPP_kAudioTransitionType_CrossFade
+
+} M4VSS3GPP_AudioTransitionType;
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_ExternalProgress
+ * @brief This structure contains information provided to the external Effect
+ * and Transition functions
+ * @note The uiProgress value should be enough for most cases
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**< Progress of the Effect or the Transition, from 0 to 1000 (one thousand) */
+ M4OSA_UInt32 uiProgress;
+ /**< Index of the current clip (first clip in case of a Transition), from 0 to N */
+ //M4OSA_UInt8 uiCurrentClip;
+ /**< Current time, in milliseconds, in the current clip time-line */
+ M4OSA_UInt32 uiClipTime;
+ /**< Current time, in milliseconds, in the output clip time-line */
+ M4OSA_UInt32 uiOutputTime;
+ M4OSA_Bool bIsLast;
+
+} M4VSS3GPP_ExternalProgress;
+
+
+/**
+ ************************************************************************
+ * enum M4VSS3GPP_codecType
+ * @brief This enum defines the codec types used to create interfaces
+ * @note This enum is used internally by the VSS3GPP services to identify
+ * a currently supported codec interface. Each codec is
+ * registered with one of this type associated.
+ * When a codec instance is needed, this type is used to
+ * identify and retrieve its interface.
+ * This can be extended for other codecs.
+ ************************************************************************
+ */
+typedef enum
+{
+ /* Video Decoder Types */
+ M4VSS3GPP_kVideoDecMPEG4 = 0,
+ M4VSS3GPP_kVideoDecH264,
+
+ /* Video Encoder Types */
+ M4VSS3GPP_kVideoEncMPEG4,
+ M4VSS3GPP_kVideoEncH263,
+ M4VSS3GPP_kVideoEncH264,
+
+ /* Audio Decoder Types */
+ M4VSS3GPP_kAudioDecAMRNB,
+ M4VSS3GPP_kAudioDecAAC,
+ M4VSS3GPP_kAudioDecMP3,
+
+ /* Audio Encoder Types */
+ M4VSS3GPP_kAudioEncAMRNB,
+ M4VSS3GPP_kAudioEncAAC,
+
+ /* number of codecs, keep it as last enum entry, before invlaid type */
+ M4VSS3GPP_kCodecType_NB,
+ /* invalid codec type */
+ M4VSS3GPP_kCodecTypeInvalid = 255
+
+} M4VSS3GPP_codecType;
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_editVideoEffectFct
+ * @brief Begin and End video effect functions implemented by the integrator
+ * must match this prototype.
+ * @note The function is provided with the original image of the clip.
+ * It must apply the video effect to build the output image.
+ * The progress of the effect is given, on a scale from 0 to 1000.
+ * When the effect function is called, all the image plane structures
+ * and buffers are valid and owned by the VSS 3GPP.
+ *
+ * @param pFunctionContext (IN) The function context, previously set by the integrator
+ * @param pInputPlanes (IN) Input YUV420 image: pointer to an array of three valid
+ image planes (Y, U and V)
+ * @param pOutputPlanes (IN/OUT) Output (filtered) YUV420 image: pointer to an array
+ of three valid image planes (Y, U and V)
+ * @param pProgress (IN) Set of information about the video transition progress.
+ * @param uiExternalEffectId (IN) Which effect function should be used (for external effects)
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4VSS3GPP_editVideoEffectFct)
+(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiExternalEffectId
+);
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_editVideoTransitionFct
+ * @brief External transition functions implemented by the integrator
+ * must match this prototype.
+ * @note The function is provided with the image of the first clip and
+ * the image of the second clip. It must build the output image
+ * from the two input images.
+ * The progress of the transition is given, on a scale from 0 to 1000.
+ * When the external function is called, all the image plane
+ * structures and buffers are valid and owned by the VSS 3GPP.
+ *
+ * @param pFunctionContext (IN) The function context, previously set by the integrator
+ * @param pClip1InputPlanes (IN) First input YUV420 image: pointer to an array of three
+ valid image planes (Y, U and V)
+ * @param pClip2InputPlanes (IN) Second input YUV420 image: pointer to an array of three
+ valid image planes (Y, U and V)
+ * @param pOutputPlanes (IN/OUT) Output (filtered) YUV420 image: pointer to an array
+ of three valid image planes (Y, U and V)
+ * @param pProgress (IN) Set of information about the video effect progress.
+ * @param uiExternalTransitionId (IN) Which transition function should be used
+ (for external transitions)
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4VSS3GPP_editVideoTransitionFct)
+(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pClip1InputPlanes,
+ M4VIFI_ImagePlane *pClip2InputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiExternalTransitionId
+);
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_EffectSettings
+ * @brief This structure defines an audio/video effect for the edition.
+ * @note Effect start time is relative to output clip.
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_UInt32 uiStartTime; /**< In ms */
+ M4OSA_UInt32 uiDuration; /**< In ms */
+ M4VSS3GPP_VideoEffectType VideoEffectType; /**< None, FadeIn, FadeOut, etc. */
+ M4VSS3GPP_editVideoEffectFct ExtVideoEffectFct; /**< External effect function */
+ M4OSA_Void *pExtVideoEffectFctCtxt;/**< Context given to the external
+ effect function */
+ M4VSS3GPP_AudioEffectType AudioEffectType; /**< None, FadeIn, FadeOut */
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+ M4xVSS_EffectSettings xVSS;
+#endif
+
+} M4VSS3GPP_EffectSettings;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_TransitionBehaviour
+ * @brief Transition behavior
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4VSS3GPP_TransitionBehaviour_SpeedUp = 0,
+ M4VSS3GPP_TransitionBehaviour_Linear,
+ M4VSS3GPP_TransitionBehaviour_SpeedDown,
+ M4VSS3GPP_TransitionBehaviour_SlowMiddle,
+ M4VSS3GPP_TransitionBehaviour_FastMiddle
+} M4VSS3GPP_TransitionBehaviour;
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_TransitionSettings
+ * @brief This structure defines the transition to be applied when assembling two clips.
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**< Duration of the transition, in milliseconds (set to 0 to get no transition) */
+ M4OSA_UInt32 uiTransitionDuration;
+
+ /**< Type of the video transition */
+ M4VSS3GPP_VideoTransitionType VideoTransitionType;
+
+ /**< External transition video effect function */
+ M4VSS3GPP_editVideoTransitionFct ExtVideoTransitionFct;
+
+ /**< Context of the external transition video effect function */
+ M4OSA_Void *pExtVideoTransitionFctCtxt;
+ M4VSS3GPP_AudioTransitionType AudioTransitionType; /**< Type of the audio transition */
+ M4VSS3GPP_TransitionBehaviour TransitionBehaviour; /**<Transition behaviour*/
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+ M4xVSS_TransitionSettings xVSS;
+#endif
+
+} M4VSS3GPP_TransitionSettings;
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_ClipSettings
+ * @brief This structure defines an input clip for the edition.
+ * @note It also contains the settings for the cut and begin/end effects applied to the clip.
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_Void *pFile; /**< Clip file descriptor */
+ M4VIDEOEDITING_FileType FileType; /**< .3gp, .amr, .mp3 */
+ M4OSA_UInt32 filePathSize; /**< Clip path size
+ (add because of UTF16 conversion)*/
+ M4VIDEOEDITING_ClipProperties ClipProperties; /**< Clip analysis previously computed
+ with M4VSS3GPP_editAnalyseClip */
+ M4OSA_UInt32 uiBeginCutTime; /**< Begin cut time, in milliseconds */
+ M4OSA_UInt32 uiEndCutTime; /**< End cut time, in milliseconds */
+ M4OSA_Bool bTranscodingRequired;
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+ M4xVSS_ClipSettings xVSS;
+#endif
+
+} M4VSS3GPP_ClipSettings;
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_EditSettings
+ * @brief This structure gathers all the information needed to define a complete
+ * edition operation
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**< Number of element of the clip list pClipList */
+ M4OSA_UInt8 uiClipNumber;
+ /**< The properties of this clip will be used as a reference for compatibility checking */
+ M4OSA_UInt8 uiMasterClip;
+ /**< List of the input clips settings. Pointer to an array of uiClipNumber
+ clip settings pointers */
+ M4VSS3GPP_ClipSettings **pClipList;
+ /**< List of the transition settings. Pointer to an array of uiClipNumber-1
+ transition settings pointers */
+ M4VSS3GPP_TransitionSettings **pTransitionList;
+ M4VSS3GPP_EffectSettings *Effects; /**< List of effects */
+ M4OSA_UInt8 nbEffects; /**< Number of effects in the above list */
+ /**< Frame rate at which the modified video sections will be encoded */
+ M4VIDEOEDITING_VideoFramerate videoFrameRate;
+ M4OSA_Void *pOutputFile; /**< Output 3GPP clip file descriptor */
+ M4OSA_UInt32 uiOutputPathSize; /**< Output file path size*/
+ /**< Temporary file to store metadata ("moov.bin") */
+ M4OSA_Void *pTemporaryFile;
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+ M4xVSS_EditSettings xVSS;
+#endif
+ M4OSA_Float PTVolLevel;
+} M4VSS3GPP_EditSettings;
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
+ * @brief This function allows checking if a clip is compatible with VSS 3GPP editing
+ * @note It also fills a ClipAnalysis structure, which can be used to check if two
+ * clips are compatible
+ * @param pClip (IN) File descriptor of the input 3GPP/MP3 clip file.
+ * @param pClipProperties (IN) Pointer to a valid ClipProperties structure.
+ * @param FileType (IN) Type of the input file (.3gp, .amr, .mp3)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ * @return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
+ * @return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editAnalyseClip(M4OSA_Void *pClip, M4VIDEOEDITING_FileType FileType,
+ M4VIDEOEDITING_ClipProperties *pClipProperties,
+ M4OSA_FileReadPointer *pFileReadPtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
+ * @brief This function allows checking if two clips are compatible with each other
+ * for VSS 3GPP editing assembly feature.
+ * @note
+ * @param pClip1Properties (IN) Clip analysis of the first clip
+ * @param pClip2Properties (IN) Clip analysis of the second clip
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_PLATFORM
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility(M4VIDEOEDITING_ClipProperties *pClip1Properties,
+ M4VIDEOEDITING_ClipProperties *pClip2Properties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editInit()
+ * @brief Initializes the VSS 3GPP edit operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS 3GPP edit context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editInit(
+ M4VSS3GPP_EditContext* pContext,
+ M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
+ * @brief Allows filling a clip settings structure with default values
+ *
+ * @note WARNING: pClipSettings->pFile will be allocated in this function.
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pFile (IN) Clip file name
+ * @param filePathSize (IN) Size of the clip path (needed for UTF16 conversion)
+ * @param nbEffects (IN) Nb of effect settings to allocate
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings,
+ M4OSA_Void* pFile, M4OSA_UInt32 filePathSize,
+ M4OSA_UInt8 nbEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
+ * @brief Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param pClipSettingsDest (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pClipSettingsOrig (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param bCopyEffects (IN) Flag to know if we have to duplicate effects (deprecated)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
+ M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+ M4OSA_Bool bCopyEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
+ * @brief Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editFreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editOpen()
+ * @brief Set the VSS 3GPP input and output files, and set the settings.
+ * @note
+ * @param pContext (IN) VSS 3GPP edit context
+ * @param pSettings (IN) Edit settings
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS is not in an appropriate state for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editOpen(M4VSS3GPP_EditContext pContext, M4VSS3GPP_EditSettings *pSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editStep()
+ * @brief Perform one step of editing.
+ * @note
+ * @param pContext (IN) VSS 3GPP edit context
+ * @param pProgress (OUT) Progress percentage (0 to 100) of the editing operation
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS 3GPP is not in an appropriate state for this function to
+ * be called
+ * @return M4VSS3GPP_WAR_EDITING_DONE:Edition is done, user should now call M4VSS3GPP_editClose()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editStep(M4VSS3GPP_EditContext pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editClose()
+ * @brief Finish the VSS 3GPP edit operation.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) VSS 3GPP edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS 3GPP is not in an appropriate state for this function
+ * to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editClose(M4VSS3GPP_EditContext pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCleanUp()
+ * @brief Free all resources used by the VSS 3GPP edit operation.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) VSS 3GPP edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCleanUp(M4VSS3GPP_EditContext pContext);
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ * Audio Mixing Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+/**
+ * Public type of the VSS audio mixing context */
+typedef M4OSA_Void* M4VSS3GPP_AudioMixingContext;
+
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_AudioMixingSettings
+ * @brief This structure defines the settings of the audio mixing operation.
+ ******************************************************************************
+ */
+typedef struct {
+ M4OSA_Void* pOriginalClipFile; /**< Input 3GPP clip file */
+ M4OSA_Void* pAddedAudioTrackFile; /**< New audio track */
+ M4VIDEOEDITING_FileType AddedAudioFileType; /**< File Format of the new audio file */
+ M4OSA_UInt32 uiAddCts; /**< Time, in milliseconds,
+ at which the added audio track is inserted */
+ M4OSA_UInt32 uiAddVolume; /**< Volume, in percentage,
+ of the added audio track */
+ M4OSA_UInt32 uiBeginLoop; /**< Describes in milli-second the
+ start time of the loop */
+ M4OSA_UInt32 uiEndLoop; /**< Describes in milli-second the end
+ time of the loop (0 means no loop) */
+ M4OSA_Bool bRemoveOriginal; /**< If true, the original audio track
+ is not taken into account */
+ M4OSA_Void* pOutputClipFile; /**< Output 3GPP clip file */
+ M4OSA_Void* pTemporaryFile; /**< Temporary file to store metadata
+ ("moov.bin") */
+ /**< The following parameters are optionnal. They are just used in case of MP3 replacement. */
+ M4VIDEOEDITING_AudioSamplingFrequency outputASF; /**< Output sampling frequency */
+ M4VIDEOEDITING_AudioFormat outputAudioFormat; /**< Output audio codec(AAC/AMR)*/
+ M4VIDEOEDITING_Bitrate outputAudioBitrate; /**< Output audio bitrate */
+ M4OSA_UInt8 outputNBChannels; /**< Output audio nb of channels */
+ M4OSA_Bool b_DuckingNeedeed;
+ M4OSA_Int32 InDucking_threshold;
+ M4OSA_Float fBTVolLevel;
+ M4OSA_Float fPTVolLevel;
+ M4OSA_Float InDucking_lowVolume;
+ M4OSA_Bool bLoop;
+ M4OSA_UInt32 uiSamplingFrequency;
+ M4OSA_UInt32 uiNumChannels;
+} M4VSS3GPP_AudioMixingSettings;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
+ * M4VSS3GPP_AudioMixingSettings* pSettings)
+ * @brief Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param pSettings (IN) Pointer to valid audio mixing settings
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingInit(
+ M4VSS3GPP_AudioMixingContext* pContext,
+ M4VSS3GPP_AudioMixingSettings* pSettings,
+ M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingStep()
+ * @brief Perform one step of audio mixing.
+ * @note
+ * @param pContext (IN) VSS 3GPP audio mixing context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @param pProgress (OUT) Progress percentage (0 to 100)
+ of the finalization operation
+ * @return M4ERR_STATE: VSS is not in an appropriate state for
+ this function to be called
+ * @return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should
+ now call M4VSS3GPP_audioMixingCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext,
+ M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp()
+ * @brief Free all resources used by the VSS audio mixing operation.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) VSS 3GPP audio mixing context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext);
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ * Extract Picture Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+/**
+ * Public type of the VSS extract picture context */
+typedef M4OSA_Void* M4VSS3GPP_ExtractPictureContext;
+
+/**
+ ******************************************************************************
+ * struct M4VSS3GPP_ExtractPictureSettings
+ * @brief This structure defines the settings of the extract picture audio operation.
+ ******************************************************************************
+ */
+typedef struct {
+ M4OSA_Void* pInputClipFile; /**< Input 3GPP clip file */
+ M4OSA_Int32 iExtractionTime; /**< frame time (in ms) to be extracted */
+ M4OSA_Void* pOutputYuvPic; /**< Output YUV picture name */
+} M4VSS3GPP_ExtractPictureSettings;
+
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureInit()
+ * @brief Initializes the VSS extract picture operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS extract picture context to allocate
+ * @param pSettings (IN) Pointer to valid extract picture settings
+ * @param pWidth (OUT) video stream width
+ * @param pHeight (OUT) video stream height
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return M4VSS3GPP_ERR_INVALID_CLIP1: The input clip is empty
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureInit(
+ M4VSS3GPP_ExtractPictureContext* pContext,
+ M4VSS3GPP_ExtractPictureSettings* pSettings,
+ M4OSA_UInt32 *pWidth,
+ M4OSA_UInt32 *pHeight,
+ M4OSA_FileReadPointer* pFileReadPtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureStep()
+ * @brief Perform one step of picture extraction.
+ * @note
+ * @param pContext (IN) VSS extract picture context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @param pDecPlanes (OUT) Plane in wich the extracted picture is copied
+ * @param pProgress (OUT) Progress percentage (0 to 100)
+ of the picture extraction
+ * @return M4ERR_STATE: VSS is not in an appropriate state for this
+ function to be called
+ * @return VSS_WAR_END_OF_EXTRACT_PICTURE: Picture extraction is over, user should now
+ call M4VSS3GPP_extractPictureCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureStep(M4VSS3GPP_ExtractPictureContext pContext,
+ M4VIFI_ImagePlane *pDecPlanes, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureCleanUp()
+ * @brief Free all resources used by the VSS picture extraction.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) VSS extract picture context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureCleanUp(M4VSS3GPP_ExtractPictureContext pContext);
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ * Common features
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetVersion()
+ * @brief Get the VSS version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+
+
+#ifdef WIN32
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
+ * @brief Return a string describing the given error code
+ * @note The input string must be already allocated (and long enough!)
+ * @param err (IN) Error code to get the description from
+ * @param sMessage (IN/OUT) Allocated string in which the description will be copied
+ * @return M4NO_ERROR: Input error is from the VSS3GPP module
+ * @return M4ERR_PARAMETER:Input error is not from the VSS3GPP module
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetErrorMessage(M4OSA_ERR err, M4OSA_Char* sMessage);
+#endif /**< WIN32 */
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VSS3GPP_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
new file mode 100755
index 0000000..4bf2e84
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_ErrorCodes.h
+ * @brief Video Studio Service 3GPP error definitions.
+ * @note
+ ******************************************************************************
+ */
+
+#ifndef __M4VSS3GPP_ErrorCodes_H__
+#define __M4VSS3GPP_ErrorCodes_H__
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/************************************************************************/
+/* Warning codes */
+/************************************************************************/
+
+/**
+ * End of edition, user should now call M4VSS3GPP_editClose() */
+#define M4VSS3GPP_WAR_EDITING_DONE M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0001)
+
+/**
+ * End of audio mixing, user should now call M4VSS3GPP_audioMixingCleanUp() */
+#define M4VSS3GPP_WAR_END_OF_AUDIO_MIXING M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0010)
+
+/**
+ * End of extract picture, user should now call M4VSS3GPP_extractPictureCleanUp() */
+#define M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0020)
+/* RC: to know when a file has been processed */
+#define M4VSS3GPP_WAR_SWITCH_CLIP M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
+
+/************************************************************************/
+/* Error codes */
+/************************************************************************/
+
+/**
+ * Invalid file type */
+#define M4VSS3GPP_ERR_INVALID_FILE_TYPE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0001)
+/**
+ * Invalid effect kind */
+#define M4VSS3GPP_ERR_INVALID_EFFECT_KIND M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0002)
+/**
+ * Invalid effect type for video */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0003)
+/**
+ * Invalid effect type for audio */
+#define M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0004)
+/**
+ * Invalid transition type for video */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0005)
+/**
+ * Invalid transition type for audio */
+#define M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0006)
+/**
+ * Invalid video encoding frame rate */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE \
+ M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0007)
+ /**
+ * External effect function is used without being set */
+#define M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0008)
+/**
+ * External transition function is used without being set */
+#define M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0009)
+
+/**
+ * Begin cut time is larger than the clip duration */
+#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0010)
+/**
+ * Begin cut time is larger or equal than end cut */
+#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0011)
+/**
+ * Two consecutive transitions are overlapping on one clip */
+#define M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0012)
+
+/**
+ * An input 3GPP file is invalid/corrupted */
+#define M4VSS3GPP_ERR_INVALID_3GPP_FILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0016)
+/**
+ * A file contains an unsupported video format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0017)
+/**
+ * A file contains an unsupported audio format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0018)
+
+/**
+ * A file format is not supported by the VSS */
+#define M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0019)
+ /**
+ * An input clip has an unexpectedly large Video AU */
+#define M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001A)
+/**
+ * An input clip has an unexpectedly large Audio AU */
+#define M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001B)
+/**
+ * An input clip has a corrupted Audio AMR AU */
+#define M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001C)
+/**
+ * The video encoder encountered an Acces Unit error: very probably a file write error */
+#define M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001D)
+
+
+/************************************************************************/
+/* Errors returned by M4VSS3GPP_editAnalyseClip() */
+/************************************************************************/
+
+/**
+ * Unsupported video format for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0020)
+/**
+ * Unsupported H263 profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0021)
+/**
+ * Unsupported MPEG-4 profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE \
+ M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0022)
+/**
+ * Unsupported MPEG-4 RVLC tool for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0023)
+/**
+ * Unsupported audio format for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0024)
+ /**
+ * File contains no supported stream */
+#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0025)
+/**
+ * File contains no video stream or an unsupported video stream */
+#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0026)
+/**
+ * Unsupported video profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_PROFILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0027)
+
+/**
+ * Unsupported video profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_LEVEL M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0028)
+
+/************************************************************************/
+/* Errors returned by M4VSS3GPP_editCheckClipCompatibility() */
+/************************************************************************/
+
+/**
+ * At least one of the clip analysis has been generated by another version of the VSS 3GPP */
+#define M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0030)
+/**
+ * Clips don't have the same video format (H263 or MPEG4) */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0031)
+/**
+ * Clips don't have the same frame size */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0032)
+/**
+ * Clips don't have the same MPEG-4 time scale */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0033)
+/**
+ * Clips don't have the same use of MPEG-4 data partitioning */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0034)
+/**
+ * MP3 clips can't be assembled */
+#define M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0035)
+/**
+ * Clips don't have the same audio stream type (ex: AMR != AAC) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0036)
+/**
+ * Clips don't have the same audio number of channels (ex: stereo != mono) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS M4OSA_ERR_CREATE( M4_WAR,\
+ M4VSS3GPP, 0x0037)
+/**
+ * Clips don't have the same sampling frequency (ex: 44100Hz != 16000Hz) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY M4OSA_ERR_CREATE( M4_WAR,\
+ M4VSS3GPP, 0x0038)
+
+/************************************************************************/
+/* Audio mixing error codes */
+/************************************************************************/
+
+/**
+ * The input 3GPP file does not contain any supported audio or video track */
+#define M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0050)
+/**
+ * The Volume of the added audio track (AddVolume) must be strictly superior than zero */
+#define M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0051)
+/**
+ * The time at which the audio track is added (AddCts) can't be superior than the
+ input video track duration */
+#define M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0052)
+/**
+ * The audio track file format setting is undefined */
+#define M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0053)
+/**
+ * The added audio track stream has an unsupported format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0054)
+/**
+ * The audio mixing feature doesn't support EVRC, MP3 audio tracks */
+#define M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0055)
+/**
+ * An added audio track limit the available features: uiAddCts must be 0
+ and bRemoveOriginal must be M4OSA_TRUE */
+#define M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0056)
+/**
+ * Input audio track is not AMR-NB nor AAC so it can't be mixed with output */
+#define M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0057)
+/**
+ * Input clip must be a 3gpp file */
+#define M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0058)
+/**
+ * Begin loop time is higher than end loop time or higher than added clip duration */
+#define M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0059)
+
+
+/************************************************************************/
+/* Audio mixing and extract picture error code */
+/************************************************************************/
+
+/**
+ * H263 Profile 3 level 10 is not supported */
+#define M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0060)
+/**
+ * File contains no video stream or an unsupported video stream */
+#define M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE M4OSA_ERR_CREATE( M4_ERR,\
+ M4VSS3GPP, 0x0061)
+
+
+/************************************************************************/
+/* Internal error and warning codes */
+/************************************************************************/
+
+/**
+ * Internal state error */
+#define M4VSS3GPP_ERR_INTERNAL_STATE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0100)
+/**
+ * Luminance filter effect error */
+#define M4VSS3GPP_ERR_LUMA_FILTER_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0104)
+/**
+ * Transition filter effect error */
+#define M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0106)
+/**
+ * The audio decoder initialization failed */
+#define M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0110)
+/**
+ * The decoder produced an unattended amount of PCM */
+#define M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0115)
+/**
+ * Output file must be 3GPP or MP3 */
+#define M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0117)
+
+#endif /* __M4VSS3GPP_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
new file mode 100755
index 0000000..9668b67
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_EXTENDED_API_H__
+#define __M4VSS3GPP_EXTENDED_API_H__
+
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_Extended_API.h
+ * @brief API of xVSS
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef M4VSS_SUPPORT_EXTENDED_FEATURES
+#error "*** the flag M4VSS_SUPPORT_EXTENDED_FEATURES should be activated in CompilerSwitches\
+ for VideoStudio ***"
+#endif
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_getTextRgbBufferFct
+ * @brief External text to RGB buffer functions implemented by the integrator
+ * must match this prototype.
+ * @note The function is provided with the renderingData, the text buffer and
+ * its size. It must build the output RGB image plane containing the text.
+ *
+ * @param pRenderingData (IN) The data given by the user in M4xVSS_EffectSettings
+ * @param pTextBuffer (IN) Text buffer given by the user in M4xVSS_EffectSettings
+ * @param textBufferSize (IN) Text buffer size given by the user in M4xVSS_EffectSettings
+ * @param pOutputPlane (IN/OUT) Output RGB565 image
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_getTextRgbBufferFct)
+(
+ M4OSA_Void *pRenderingData,
+ M4OSA_Void *pTextBuffer,
+ M4OSA_UInt32 textBufferSize,
+ M4VIFI_ImagePlane **pOutputPlane
+);
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_BGMSettings
+ * @brief This structure gathers all the information needed to add Background music to 3gp file
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Void *pFile; /**< Input file path */
+ M4VIDEOEDITING_FileType FileType; /**< .3gp, .amr, .mp3 */
+ M4OSA_UInt32 uiAddCts; /**< Time, in milliseconds, at which the added
+ audio track is inserted */
+ M4OSA_UInt32 uiAddVolume; /**< Volume, in percentage, of the added audio track */
+ M4OSA_UInt32 uiBeginLoop; /**< Describes in milli-second the start time
+ of the loop */
+ M4OSA_UInt32 uiEndLoop; /**< Describes in milli-second the end time of the
+ loop (0 means no loop) */
+ M4OSA_Bool b_DuckingNeedeed;
+ M4OSA_Int32 InDucking_threshold; /**< Threshold value at which background
+ music shall duck */
+ M4OSA_Float lowVolume; /**< lower the background track to this factor
+ and increase the primary track to inverse of this factor */
+ M4OSA_Bool bLoop;
+ M4OSA_UInt32 uiSamplingFrequency;
+ M4OSA_UInt32 uiNumChannels;
+} M4xVSS_BGMSettings;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_VideoEffectType
+ * @brief This enumeration defines the video effect types of the VSS3GPP
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VSS3GPP_kRGB888 = 0, /**< RGB888 data type */
+ M4VSS3GPP_kRGB565 = 1 /**< RGB565 data type */
+
+} M4VSS3GPP_RGBType;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_EffectSettings
+ * @brief This structure defines an audio/video effect for the edition.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**< In percent of the cut clip duration */
+ M4OSA_UInt32 uiStartPercent;
+ /**< In percent of the ((clip duration) - (effect starttime)) */
+ M4OSA_UInt32 uiDurationPercent;
+ /**< Framing file path (GIF/PNG file), used only if VideoEffectType == framing */
+ M4OSA_Void *pFramingFilePath;
+ /**< Framing RGB565 buffer, used only if VideoEffectType == framing */
+ M4VIFI_ImagePlane *pFramingBuffer;
+ /**<RGB Buffer type,used only if VideoEffectType == framing */
+ M4VSS3GPP_RGBType rgbType;
+ /**< The top-left X coordinate in the output picture where the added frame will be displayed.
+ Used only if VideoEffectType == framing || VideoEffectType == text */
+ M4OSA_UInt32 topleft_x;
+ /**< The top-left Y coordinate in the output picture where the added frame will be displayed.
+ Used only if VideoEffectType == framing || VideoEffectType == text */
+ M4OSA_UInt32 topleft_y;
+ /**< Does framing image is resized to output video size.
+ Used only if VideoEffectType == framing */
+ M4OSA_Bool bResize;
+ M4VIDEOEDITING_VideoFrameSize framingScaledSize;
+/**< Size to which the the framing file needs to be resized */
+ /**< Text buffer. Used only if VideoEffectType == text */
+ M4OSA_Void* pTextBuffer;
+ /**< Text buffer size. Used only if VideoEffectType == text */
+ M4OSA_UInt32 textBufferSize;
+ /**< Pointer containing specific data used by the font engine (size, color...) */
+ M4OSA_Void* pRenderingData;
+ /**< Text plane width. Used only if VideoEffectType == text */
+ M4OSA_UInt32 uiTextBufferWidth;
+ /**< Text plane height. Used only if VideoEffectType == text */
+ M4OSA_UInt32 uiTextBufferHeight;
+ /**< Processing rate of the effect added when using the Fifties effect */
+ M4OSA_UInt32 uiFiftiesOutFrameRate;
+ /**< RGB16 input color of the effect added when using the rgb16 color effect */
+ M4OSA_UInt16 uiRgb16InputColor;
+
+ M4OSA_UInt8 uialphaBlendingStart; /*Start percentage of Alpha blending*/
+ M4OSA_UInt8 uialphaBlendingMiddle; /*Middle percentage of Alpha blending*/
+ M4OSA_UInt8 uialphaBlendingEnd; /*End percentage of Alpha blending*/
+ M4OSA_UInt8 uialphaBlendingFadeInTime; /*Duration, in percentage of
+ effect duration, of the FadeIn phase*/
+ M4OSA_UInt8 uialphaBlendingFadeOutTime; /*Duration, in percentage of effect
+ duration, of the FadeOut phase*/
+ M4OSA_UInt32 width; /*width of the ARGB8888 clip .
+ Used only if video effect is framming */
+ M4OSA_UInt32 height; /*height of the ARGB8888 clip .
+ Used only if video effect is framming */
+} M4xVSS_EffectSettings;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_AlphaMagicSettings
+ * @brief This structure defines the alpha magic transition settings
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Void* pAlphaFilePath; /**< Alpha file path (JPG file) */
+ M4OSA_Int32 blendingPercent; /**< Blending Percentage between 0 and 100 */
+ M4OSA_Bool isreverse; /**< direct effect or reverse */
+ /*To support ARGB8888 : get the width and height */
+ M4OSA_UInt32 width;
+ M4OSA_UInt32 height;
+} M4xVSS_AlphaMagicSettings;
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_SlideTransition_Direction
+ * @brief Defines directions for the slide transition
+ ******************************************************************************
+*/
+
+typedef enum {
+ M4xVSS_SlideTransition_RightOutLeftIn,
+ M4xVSS_SlideTransition_LeftOutRightIn,
+ M4xVSS_SlideTransition_TopOutBottomIn,
+ M4xVSS_SlideTransition_BottomOutTopIn
+} M4xVSS_SlideTransition_Direction;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_AlphaMagicSettings
+ * @brief This structure defines the slide transition settings
+ ******************************************************************************
+*/
+
+typedef struct
+{
+ M4xVSS_SlideTransition_Direction direction; /* direction of the slide */
+} M4xVSS_SlideTransitionSettings;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_TransitionSettings
+ * @brief This structure defines additional transition settings specific to
+ * xVSS, which are appended to the VSS3GPP transition settings
+ * structure.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /* Anything xVSS-specific, but common to all transitions, would go here,
+ before the union. */
+ union {
+ /**< AlphaMagic settings, used only if VideoTransitionType ==
+ M4xVSS_kVideoTransitionType_AlphaMagic */
+ M4xVSS_AlphaMagicSettings *pAlphaMagicSettings;
+ /* only in case of slide transition. */
+ M4xVSS_SlideTransitionSettings *pSlideTransitionSettings;
+ } transitionSpecific;
+} M4xVSS_TransitionSettings;
+
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_MediaRendering
+ * @brief This enum defines different media rendering using exif orientation
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kResizing = 0, /*The picture is resized, the aspect ratio can be different
+ from the original one. All of the picture is rendered*/
+ M4xVSS_kCropping, /*The picture is cropped, the aspect ratio is the same as
+ the original one. The picture is not rendered entirely*/
+ M4xVSS_kBlackBorders /*Black borders are rendered in order to keep the original
+ aspect ratio. All the picture is rendered*/
+
+} M4xVSS_MediaRendering;
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_ClipSettings
+ * @brief This structure defines an input clip for the edition.
+ * @note It also contains the settings for the cut and begin/end effects applied to the clip.
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 uiBeginCutPercent; /**< Begin cut time, in percent of clip
+ duration (only for 3GPP clip !) */
+ M4OSA_UInt32 uiEndCutPercent; /**< End cut time, in percent of clip
+ duration (only for 3GPP clip !) */
+ M4OSA_UInt32 uiDuration; /**< Duration of the clip, if different
+ from 0, has priority on
+ uiEndCutTime or uiEndCutPercent */
+ M4OSA_Bool isPanZoom; /**< RC: Boolean used to know if the
+ pan and zoom mode is enabled */
+ M4OSA_UInt16 PanZoomXa; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftXa; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftYa; /**< RC */
+ M4OSA_UInt16 PanZoomXb; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftXb; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftYb; /**< RC */
+ M4xVSS_MediaRendering MediaRendering; /**< FB only used with JPEG: to crop,
+ resize, or render black borders*/
+
+} M4xVSS_ClipSettings;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_EditSettings
+ * @brief This structure gathers all the information needed to define a complete
+ * edition operation
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**< Output video size */
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize;
+ /**< Output video format (MPEG4 / H263) */
+ M4VIDEOEDITING_VideoFormat outputVideoFormat;
+ /**< Output audio format (AAC, AMRNB ...) */
+ M4VIDEOEDITING_AudioFormat outputAudioFormat;
+ /**< Output audio sampling freq (8000Hz,...) */
+ M4VIDEOEDITING_AudioSamplingFrequency outputAudioSamplFreq;
+ /**< Maximum output file size in BYTES (if set to 0, no limit */
+ M4OSA_UInt32 outputFileSize;
+ /**< Is output audio must be Mono ? Valid only for AAC */
+ M4OSA_Bool bAudioMono;
+ /**< Output video bitrate*/
+ M4OSA_UInt32 outputVideoBitrate;
+ /**< Output audio bitrate*/
+ M4OSA_UInt32 outputAudioBitrate;
+ /**< Background music track settings */
+ M4xVSS_BGMSettings *pBGMtrack;
+ /**< Function pointer on text rendering engine, if not used, must be set to NULL !! */
+ M4xVSS_getTextRgbBufferFct pTextRenderingFct;
+ /** output video profile and level*/
+ M4OSA_Int32 outputVideoProfile;
+ M4OSA_Int32 outputVideoLevel;
+
+} M4xVSS_EditSettings;
+
+#endif /* __M4VSS3GPP_EXTENDED_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
new file mode 100755
index 0000000..2669feb
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_INTERNALCONFIG_H__
+#define __M4VSS3GPP_INTERNALCONFIG_H__
+
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_InternalConfig.h
+ * @brief This file contains some magical and configuration parameters.
+ ******************************************************************************
+*/
+
+/***********************/
+/* VideoEdition config */
+/***********************/
+
+#define M4VSS3GPP_MINIMAL_TRANSITION_DURATION 100 /**< 100 milliseconds */
+#define M4VSS3GPP_NB_AU_PREFETCH 4 /**< prefect 4 AUs */
+#define M4VSS3GPP_NO_STSS_JUMP_POINT 40000 /**< If 3gp file does not contain
+ an STSS table (no rap frames),
+ jump backward 40 s maximum */
+
+/*****************/
+/* Writer config */
+/*****************/
+
+#define M4VSS3GPP_WRITER_AUDIO_STREAM_ID 1
+#define M4VSS3GPP_WRITER_VIDEO_STREAM_ID 2
+
+/**< Max AU size will be 0.8 times the YUV4:2:0 frame size */
+#define M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO 0.9F
+/**< Max chunk size will be 1.2 times the max AU size */
+#define M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO 1.2F
+
+/** READ CAREFULLY IN CASE OF REPORTED RUNNING TROUBLES
+The max AU size is used to pre-set max size of AU that can be written in the 3GP writer
+For audio standard with variable AU size, there could be some encoding settings leading to AU size
+exceeding this limit.
+For AAC streams for instance the average AU size is given by:
+av AU size = (av bitrate * 1024)/(sampling freq)
+If VSS returns the message:
+>> ERROR: audio AU size (XXXX) to copy larger than allocated one (YYYY) => abort
+>> PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY
+Error is most likely to happen when mixing with audio full replacement
+ */
+/**< AAC max AU size - READ EXPLANATION ABOVE */
+#define M4VSS3GPP_AUDIO_MAX_AU_SIZE 2048
+/**< set to x4 max AU size per chunk */
+#define M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE 8192
+
+
+/***********************/
+/* H263 / MPEG4 config */
+/***********************/
+
+#define M4VSS3GPP_EDIT_H263_MODULO_TIME 255
+
+#ifdef BIG_ENDIAN
+/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
+#define M4VSS3GPP_EDIT_GOV_HEADER 0x000001b3
+#else
+/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
+#define M4VSS3GPP_EDIT_GOV_HEADER 0xb3010000
+#endif
+
+/**************/
+/* AMR config */
+/**************/
+
+#define M4VSS3GPP_WRITTEN_AMR_TRACK_TIME_SCALE 8000
+#define M4VSS3GPP_AMR_DECODED_PCM_SAMPLE_NUMBER 160 /**< 20ms at 8000hz -->
+ 20x8=160 samples */
+#define M4VSS3GPP_AMR_DEFAULT_BITRATE 12200 /**< 12.2 kbps */
+
+/**************/
+/* EVRC config */
+/**************/
+
+#define M4VSS3GPP_EVRC_DEFAULT_BITRATE 9200 /**< 9.2 kbps */
+
+/**************/
+/* MP3 config */
+/**************/
+
+/** Macro to make a jump on the MP3 track on several steps
+ To avoid to block the system with an long MP3 jump, this process
+ is divided on several steps.
+ */
+#define M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX 100
+
+/** Macro to define the number of read AU to analyse the bitrate
+ So the process will read the first n AU of the MP3 stream to get
+ the average bitrate. n is defined by this define.
+ */
+#define M4VSS3GPP_MP3_AU_NUMBER_MAX 500
+
+/*****************************/
+/* define AMR silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+{
+ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33,
+ 0xFF, 0xE0, 0x00, 0x00, 0x00
+};
+#else
+extern const M4OSA_UInt8 \
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+#endif
+
+/*****************************/
+/* define AAC silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE 4
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
+{
+ 0x00, 0xC8, 0x20, 0x07
+};
+#else
+extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
+#endif
+
+#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE 6
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
+{
+ 0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
+};
+#else
+extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALCONFIG_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
new file mode 100755
index 0000000..e855882
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_InternalFunctions.h
+ * @brief This file contains all function prototypes not visible to the external world.
+ * @note
+ ******************************************************************************
+*/
+
+
+#ifndef __M4VSS3GPP_INTERNALFUNCTIONS_H__
+#define __M4VSS3GPP_INTERNALFUNCTIONS_H__
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * VSS public API and types */
+#include "M4VSS3GPP_API.h"
+
+/**
+ * VSS private types */
+#include "M4VSS3GPP_InternalTypes.h"
+
+
+#include "M4READER_Common.h" /**< for M4_AccessUnit definition */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* All errors are fatal in the VSS */
+#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
+ * @brief One step of video processing
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepVideo(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
+ * @brief One step of audio processing
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepAudio(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
+ * @brief One step of audio processing for the MP3 clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepMP3(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intOpenClip()
+ * @brief Open next clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intOpenClip(M4VSS3GPP_InternalEditContext *pC, M4VSS3GPP_ClipContext **hClip,
+ M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
+ * @brief Destroy the video encoder
+ * @note
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
+ * @brief Creates the video encoder
+ * @note
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
+ * @brief Do what to do when the end of a clip video track is reached
+ * @note If there is audio on the current clip, process it, else switch to the next clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
+ * @brief Do what to do when the end of a clip audio track is reached
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
+ * @brief Check if the clip is compatible with VSS editing
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @param pClipProperties (OUT) Pointer to a valid ClipProperties structure.
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(M4VIDEOEDITING_ClipProperties \
+ *pClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipOpen()
+ * @brief Open a clip. Creates a clip context.
+ * @note
+ * @param hClipCtxt (OUT) Return the internal clip context
+ * @param pClipSettings (IN) Edit settings of this clip. The module will keep a
+ * reference to this pointer
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param bSkipAudioTrack (IN) If true, do not open the audio
+ * @param bFastOpenMode (IN) If true, use the fast mode of the 3gpp reader
+ * (only the first AU is read)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipInit (
+ M4VSS3GPP_ClipContext **hClipCtxt,
+ M4OSA_FileReadPointer *pFileReadPtrFct
+);
+
+M4OSA_ERR M4VSS3GPP_intClipOpen (
+ M4VSS3GPP_ClipContext *pClipCtxt,
+ M4VSS3GPP_ClipSettings *pClipSettings,
+ M4OSA_Bool bSkipAudioTrack,
+ M4OSA_Bool bFastOpenMode,
+ M4OSA_Bool bAvoidOpeningVideoDec
+);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ ******************************************************************************
+*/
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
+ * @brief Jump to the previous RAP and decode up to the current video time
+ * @param pClipCtxt (IN) Internal clip context
+ * @param iCts (IN) Target CTS
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts(M4VSS3GPP_ClipContext* pClipCtxt, M4OSA_Int32 iCts);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
+ * @brief Read one AU frame in the clip
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
+ * @brief Decode the current AUDIO frame.
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
+ * @brief Jump in the audio track of the clip.
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @param pJumpCts (IN/OUT) in:target CTS, out: reached CTS
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt(M4VSS3GPP_ClipContext *pClipCtxt, M4OSA_Int32 *pJumpCts);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipClose()
+ * @brief Close a clip. Destroy the context.
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipClose(M4VSS3GPP_ClipContext *pClipCtxt);
+
+M4OSA_ERR M4VSS3GPP_intClipCleanUp(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
+ * @brief One step of jumping processing for the MP3 clip.
+ * @note On one step, the jump of several AU is done
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditJumpMP3(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerWriter()
+ * @brief This function will register a specific file format writer.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the writer context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext,pWtrGlobalInterface or pWtrDataInterface is
+ * M4OSA_NULL (debug only), or invalid MediaType
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4WRITER_OutputFileType MediaType,
+ M4WRITER_GlobalInterface* pWtrGlobalInterface,
+ M4WRITER_DataInterface* pWtrDataInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerEncoder()
+ * @brief This function will register a specific video encoder.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ * or invalid MediaType
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4ENCODER_Format MediaType,
+ M4ENCODER_GlobalInterface *pEncGlobalInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerAudioEncoder()
+ * @brief This function will register a specific audio encoder.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @param mediaType: (IN) The media type.
+ * @param pEncGlobalInterface: (OUT) the encoder interface functions.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4ENCODER_AudioFormat MediaType,
+ M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerReader()
+ * @brief Register reader.
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4READER_MediaType mediaType,
+ M4READER_GlobalInterface *pRdrGlobalInterface,
+ M4READER_DataInterface *pRdrDataInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerVideoDecoder()
+ * @brief Register video decoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @param pDecoderInterface (IN) Decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only), or the decoder type
+ * is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4DECODER_VideoType decoderType,
+ M4DECODER_VideoInterface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerAudioDecoder()
+ * @brief Register audio decoder
+ * @note This function is used internaly by the VSS to register audio decoders,
+ * @param context (IN/OUT) VSS context.
+ * @param decoderType (IN) Audio decoder type
+ * @param pDecoderInterface (IN) Audio decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null, or the decoder type is invalid
+ * (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_registerAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4AD_Type decoderType,
+ M4AD_Interface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllWriters()
+ * @brief Unregister writer
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_unRegisterAllWriters(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders()
+ * @brief Unregister the encoders
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllReaders()
+ * @brief Unregister reader
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_unRegisterAllReaders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders()
+ * @brief Unregister the decoders
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentWriter()
+ * @brief Set current writer
+ * @param pContext (IN/OUT) VSS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder()
+ * @brief Set a video encoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4SYS_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder()
+ * @brief Set an audio encoder
+ * @param context (IN/OUT) VSS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4SYS_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentReader()
+ * @brief Set current reader
+ * @param pContext (IN/OUT) VSS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder()
+ * @brief Set a video decoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder()
+ * @brief Set an audio decoder
+ * @param context (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_clearInterfaceTables()
+ * @brief Clear encoders, decoders, reader and writers interfaces tables
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: The context is null
+ ************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_clearInterfaceTables(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
+ * @brief This function registers the reader, decoders, writers and encoders
+ * in the VSS.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext is NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
+ * @brief Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param pAudioFrame (IN) AMRNB frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
+ * @brief Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ * 0 1 2 3
+ * +-+-+-+-+
+ * |fr type| RFC 3558
+ * +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ * The frame type indicates the type of the corresponding codec data
+ * frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value Rate Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ * 0 Blank 0 (0 bit)
+ * 1 1/8 2 (16 bits)
+ * 2 1/4 5 (40 bits; not valid for EVRC)
+ * 3 1/2 10 (80 bits)
+ * 4 1 22 (171 bits; 5 padded at end with zeros)
+ * 5 Erasure 0 (SHOULD NOT be transmitted by sender)
+ *
+ * @param pCpAudioFrame (IN) EVRC frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
+ * @brief Get video and audio properties from the clip streams
+ * @note This function must return fatal errors only (errors that should not happen in the
+ * final integrated product).
+ * @param pClipCtxt (IN) internal clip context
+ * @param pClipProperties (OUT) Pointer to a valid ClipProperties structure.
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intBuildAnalysis(M4VSS3GPP_ClipContext *pClipCtxt,
+ M4VIDEOEDITING_ClipProperties *pClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
+ * @brief Reset the audio encoder (Create it if needed)
+ * @note
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder(M4VSS3GPP_EncodeWriteContext *pC_ewc,
+ M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+ M4OSA_UInt32 uiAudioBitrate);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
+ * @brief Creates and prepare the output MP3 file
+ * @note Creates the writer, Creates the output file, Adds the streams, Readies the
+ * writing process
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile(M4VSS3GPP_EncodeWriteContext *pC_ewc,
+ M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+ M4OSA_FileWriterPointer *pOsaFileWritPtr,
+ M4OSA_Void* pOutputFile,
+ M4OSA_FileReadPointer *pOsaFileReadPtr,
+ M4OSA_Void* pTempFile,
+ M4OSA_UInt32 maxOutputFileSize);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
+ * @brief This function allows checking if two clips are compatible with each other for
+ * VSS 3GPP audio mixing feature.
+ * @note
+ * @param pC (IN) Context of the audio mixer
+ * @param pInputClipProperties (IN) Clip analysis of the first clip
+ * @param pAddedClipProperties (IN) Clip analysis of the second clip
+ * @return M4NO_ERROR: No error
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
+ * @return M4NO_ERROR
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility(M4VSS3GPP_InternalAudioMixingContext *pC,
+ M4VIDEOEDITING_ClipProperties \
+ *pInputClipProperties,
+ M4VIDEOEDITING_ClipProperties \
+ *pAddedClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ ******************************************************************************
+*/
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intStartAU()
+ * @brief StartAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param pContext: (IN) It is the VSS 3GPP context in our case
+ * @param streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param pAU: (IN/OUT) Access Unit to be prepared.
+ * @return M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intStartAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* pAU);
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intProcessAU()
+ * @brief ProcessAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param pContext: (IN) It is the VSS 3GPP context in our case
+ * @param streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param pAU: (IN/OUT) Access Unit to be written
+ * @return M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intProcessAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ M4SYS_AccessUnit* pAU);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVPP()
+ * @brief We implement our own VideoPreProcessing function
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the VSS 3GPP context in our case
+ * @param pPlaneIn (IN)
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the
+ * output YUV420 image
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALFUNCTIONS_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
new file mode 100755
index 0000000..a7900f0
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
@@ -0,0 +1,781 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_InternalTypes.h
+ * @brief This file contains all enum and types not visible to the external world.
+ * @note
+ ******************************************************************************
+*/
+
+
+#ifndef __M4VSS3GPP_INTERNALTYPES_H__
+#define __M4VSS3GPP_INTERNALTYPES_H__
+
+#define M4VSS_VERSION_MAJOR 3
+#define M4VSS_VERSION_MINOR 2
+#define M4VSS_VERSION_REVISION 5
+
+#include "NXPSW_CompilerSwitches.h"
+
+/**
+ * VSS public API and types */
+#include "M4VSS3GPP_API.h"
+
+/**
+ * Internally used modules */
+#include "M4READER_Common.h" /**< Reader common interface */
+#include "M4WRITER_common.h" /**< Writer common interface */
+#include "M4DECODER_Common.h" /**< Decoder common interface */
+#include "M4ENCODER_common.h" /**< Encoder common interface */
+#include "M4VIFI_FiltersAPI.h" /**< Image planes definition */
+#include "M4READER_3gpCom.h" /**< Read 3GPP file */
+#include "M4AD_Common.h" /**< Decoder audio */
+#include "M4ENCODER_AudioCommon.h" /**< Encode audio */
+
+
+#include "SSRC.h" /**< SSRC */
+#include "From2iToMono_16.h" /**< Stereo to Mono */
+#include "MonoTo2I_16.h" /**< Mono to Stereo */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define WINDOW_SIZE 10
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_EditState
+ * @brief Main state machine of the VSS 3GPP edit operation.
+ ******************************************************************************
+*/
+
+typedef enum
+{
+ M4VSS3GPP_kEditState_CREATED = 0, /**< M4VSS3GPP_editInit has been called */
+ M4VSS3GPP_kEditState_VIDEO = 1, /**< Processing video track */
+ M4VSS3GPP_kEditState_AUDIO = 2, /**< Processing audio track */
+ M4VSS3GPP_kEditState_MP3 = 3, /**< Processing MP3 audio track */
+ M4VSS3GPP_kEditState_MP3_JUMP = 4, /**< Processing a jump in a MP3 audio track */
+ M4VSS3GPP_kEditState_FINISHED = 5, /**< Processing done, VSS 3GPP can be closed */
+ M4VSS3GPP_kEditState_CLOSED = 6 /**< Output file has been closed,
+ VSS 3GPP can be destroyed */
+}
+M4VSS3GPP_EditState;
+
+typedef enum
+{
+ /**< Doing Read/Write operation. This operation will have no processing
+ * on input frames. Only time stamp manipulations in output file. */
+ M4VSS3GPP_kEditVideoState_READ_WRITE = 10,
+ /**< Decode encode to create an I frame. This is done for a single frame
+ * to create a new reference frame. */
+ M4VSS3GPP_kEditVideoState_BEGIN_CUT = 11,
+ /**< Doing Read->Decode->Filter->Encode->Write operation on the input file
+ * to create the output file. */
+ M4VSS3GPP_kEditVideoState_DECODE_ENCODE = 12,
+ /**< Applied when Transition is active and blending of two videos is
+ * required. */
+ M4VSS3GPP_kEditVideoState_TRANSITION = 13,
+ /**< Special Read/Write mode used after BEGIN_CUT state. The frame
+ * is already coded as I frame in BEGIN_CUT state; so skip it. */
+ M4VSS3GPP_kEditVideoState_AFTER_CUT = 14
+}
+M4VSS3GPP_EditVideoState;
+
+typedef enum
+{
+ M4VSS3GPP_kEditAudioState_READ_WRITE = 20, /**< Doing Read/Write operation
+ (no decoding/encoding) */
+ M4VSS3GPP_kEditAudioState_DECODE_ENCODE = 21, /**< Doing Read-Decode/Filter/
+ Encode-Write operation */
+ M4VSS3GPP_kEditAudioState_TRANSITION = 22 /**< Transition; blending of two audio */
+}
+M4VSS3GPP_EditAudioState;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_ClipStatus
+ * @brief Status of the clip.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VSS3GPP_kClipStatus_READ = 0, /**< The clip is currently ready for reading */
+ M4VSS3GPP_kClipStatus_DECODE = 1, /**< The clip is currently ready for decoding */
+ M4VSS3GPP_kClipStatus_DECODE_UP_TO = 2 /**< The clip is currently in splitted
+ decodeUpTo() processing */
+}
+M4VSS3GPP_ClipStatus;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_ClipCurrentEffect
+ * @brief Current effect applied to the clip.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VSS3GPP_kClipCurrentEffect_NONE = 0, /**< None */
+ M4VSS3GPP_kClipCurrentEffect_BEGIN = 1, /**< Begin effect currently applied */
+ M4VSS3GPP_kClipCurrentEffect_END = 2 /**< End effect currently applied */
+}
+M4VSS3GPP_ClipCurrentEffect;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_AudioMixingState
+ * @brief Main state machine of the VSS audio mixing operation.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VSS3GPP_kAudioMixingState_VIDEO = 0, /**< Video is being processed */
+ M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT, /**< Audio is being processed */
+ M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT, /**< Audio is being processed */
+ M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT, /**< Audio is being processed */
+ M4VSS3GPP_kAudioMixingState_FINISHED /**< Processing finished, user must now
+ call M4VSS3GPP_audioMixingCleanUp*/
+}
+M4VSS3GPP_AudioMixingState;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_ExtractPictureState
+ * @brief Main state machine of the VSS picture extraction.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4VSS3GPP_kExtractPictureState_OPENED = 0, /**< Video clip is opened and ready to be read
+ until the RAP before the picture to extract */
+ M4VSS3GPP_kExtractPictureState_PROCESS = 1, /**< Video is decoded from the previous RAP
+ to the picture to extract */
+ M4VSS3GPP_kExtractPictureState_EXTRACTED= 2 /**< Video AU has been decoded, user must now
+ call M4VSS3GPP_extractPictureCleanUp */
+}
+M4VSS3GPP_ExtractPictureState;
+
+
+/**
+ ******************************************************************************
+ * @brief Codecs registration same as in VPS and VES, so less mapping
+ * is required toward VSS api types
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4WRITER_GlobalInterface* pGlobalFcts; /**< open, close, setoption,etc... functions */
+ M4WRITER_DataInterface* pDataFcts; /**< data manipulation functions */
+} M4VSS3GPP_WriterInterface;
+/**
+ ******************************************************************************
+ * struct AAC_DEC_STREAM_PROPS
+ * @brief AAC Stream properties
+ * @Note aNoChan and aSampFreq are used for parsing even the user parameters
+ * are different. User parameters will be input for the output behaviour
+ * of the decoder whereas for parsing bitstream properties are used.
+ ******************************************************************************
+ */
+typedef struct {
+ M4OSA_Int32 aAudioObjectType; /**< Audio object type of the stream - in fact
+ the type found in the Access Unit parsed */
+ M4OSA_Int32 aNumChan; /**< number of channels (=1(mono) or =2(stereo))
+ as indicated by input bitstream*/
+ M4OSA_Int32 aSampFreq; /**< sampling frequency in Hz */
+ M4OSA_Int32 aExtensionSampFreq; /**< extended sampling frequency in Hz, = 0 is
+ no extended frequency */
+ M4OSA_Int32 aSBRPresent; /**< presence=1/absence=0 of SBR */
+ M4OSA_Int32 aPSPresent; /**< presence=1/absence=0 of PS */
+ M4OSA_Int32 aMaxPCMSamplesPerCh; /**< max number of PCM samples per channel */
+} AAC_DEC_STREAM_PROPS;
+
+
+/**
+ ******************************************************************************
+ * enum M4VSS3GPP_MediaAndCodecCtxt
+ * @brief Filesystem and codec registration function pointers
+ ******************************************************************************
+*/
+typedef struct {
+ /**
+ * Media and Codec registration */
+ /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
+ M4VSS3GPP_WriterInterface WriterInterface[M4WRITER_kType_NB];
+ /**< open, close, setoption,etc... functions of the used writer*/
+ M4WRITER_GlobalInterface* pWriterGlobalFcts;
+ /**< data manipulation functions of the used writer */
+ M4WRITER_DataInterface* pWriterDataFcts;
+
+ /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
+ M4ENCODER_GlobalInterface* pVideoEncoderInterface[M4ENCODER_kVideo_NB];
+ /**< Functions of the used encoder */
+ M4ENCODER_GlobalInterface* pVideoEncoderGlobalFcts;
+
+ M4OSA_Void* pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
+ M4OSA_Void* pCurrentVideoEncoderExternalAPI;
+ M4OSA_Void* pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
+ M4OSA_Void* pCurrentVideoEncoderUserData;
+
+ /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
+ M4ENCODER_AudioGlobalInterface* pAudioEncoderInterface[M4ENCODER_kAudio_NB];
+ /**< Table of internal/external flags for avalaible encoders list */
+ M4OSA_Bool pAudioEncoderFlag[M4ENCODER_kAudio_NB];
+ /**< Functions of the used encoder */
+ M4ENCODER_AudioGlobalInterface* pAudioEncoderGlobalFcts;
+
+ M4READER_GlobalInterface* m_pReaderGlobalItTable[M4READER_kMediaType_NB];
+ M4READER_DataInterface* m_pReaderDataItTable[M4READER_kMediaType_NB];
+ M4READER_GlobalInterface* m_pReader;
+ M4READER_DataInterface* m_pReaderDataIt;
+ M4OSA_UInt8 m_uiNbRegisteredReaders;
+
+ M4DECODER_VideoInterface* m_pVideoDecoder;
+ M4DECODER_VideoInterface* m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
+ M4OSA_UInt8 m_uiNbRegisteredVideoDec;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ M4OSA_Void* m_pCurrentVideoDecoderUserData;
+ M4OSA_Void* m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
+#endif
+
+ M4AD_Interface* m_pAudioDecoder;
+ M4AD_Interface* m_pAudioDecoderItTable[M4AD_kType_NB];
+ /**< store indices of external decoders */
+ M4OSA_Bool m_pAudioDecoderFlagTable[M4AD_kType_NB];
+
+ M4OSA_Void* pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
+ M4OSA_Void* pCurrentAudioEncoderUserData;
+
+ M4OSA_Void* pAudioDecoderUserDataTable[M4AD_kType_NB];
+ M4OSA_Void* pCurrentAudioDecoderUserData;
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /* boolean to tell whether registered external OMX codecs should be freed during cleanup
+ or new codec registration*/
+ M4OSA_Bool bAllowFreeingOMXCodecInterface;
+#endif
+
+
+} M4VSS3GPP_MediaAndCodecCtxt;
+
+
+/**
+ ******************************************************************************
+ * structure M4VSS3GPP_ClipContext
+ * @brief This structure contains informations related to one 3GPP clip (private)
+ * @note This structure is used to store the context related to one clip
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4VSS3GPP_ClipSettings* pSettings; /**< Pointer to the clip settings
+ (not possessed) */
+
+ M4VSS3GPP_ClipStatus Vstatus; /**< Video status of the clip reading */
+ M4VSS3GPP_ClipStatus Astatus; /**< Audio status of the clip reading */
+
+ M4OSA_Int32 iVoffset; /**< [Milliseconds] Offset between the
+ clip and the output video stream
+ (begin cut taken into account) */
+ M4OSA_Int32 iAoffset; /**< [Timescale] Offset between the clip
+ and the output audio stream (begin
+ cut taken into account) */
+
+ /**
+ * 3GPP reader Stuff */
+ M4OSA_FileReadPointer* pFileReadPtrFct;
+ M4OSA_Context pReaderContext; /**< Context of the 3GPP reader module */
+ M4_VideoStreamHandler* pVideoStream; /**< Description of the read video stream */
+ M4_AudioStreamHandler* pAudioStream; /**< Description of the read audio stream */
+ M4_AccessUnit VideoAU; /**< Read video access unit (we do not use a
+ pointer to allocate later, because
+ most of the time we will need it) */
+ M4_AccessUnit AudioAU; /**< Read audio access unit (we do not use a
+ pointer to allocate later, because most
+ of the time we will need it) */
+ M4OSA_Bool bVideoAuAvailable; /**< Tell if a video AU is available
+ (previously read) */
+ /**< Boolean only used to fix the BZZ bug... */
+ M4OSA_Bool bFirstAuWritten;
+
+ /**
+ * Video decoder stuff */
+ M4OSA_Context pViDecCtxt; /**< Video decoder context */
+ M4OSA_Int32 iVideoDecCts; /**< [Milliseconds] For video decodeUpTo(),
+ the actual reached cts */
+ M4OSA_Int32 iVideoRenderCts; /**< [Milliseconds] For video render(),
+ the actual reached cts */
+ M4OSA_Bool isRenderDup; /**< To handle duplicate frame rendering in
+ case of external decoding */
+ M4VIFI_ImagePlane* lastDecodedPlane; /**< Last decoded plane */
+
+ /**
+ * MPEG4 time info stuff at clip level */
+ M4OSA_Bool bMpeg4GovState; /**< Namely, update or initialization */
+ M4OSA_UInt32 uiMpeg4PrevGovValueGet; /**< Previous Gov value read (in second) */
+ M4OSA_UInt32 uiMpeg4PrevGovValueSet; /**< Previous Gov value write (in second) */
+
+ /**
+ * Time-line stuff */
+ /**< [Milliseconds] CTS at which the video clip actually starts */
+ M4OSA_Int32 iActualVideoBeginCut;
+ /**< [Milliseconds] CTS at which the audio clip actually starts */
+ M4OSA_Int32 iActualAudioBeginCut;
+ /**< [Milliseconds] Time at which the clip must end */
+ M4OSA_Int32 iEndTime;
+
+ /**
+ * Audio decoder stuff */
+ M4OSA_Context pAudioDecCtxt; /**< Context of the AMR decoder */
+ M4AD_Buffer AudioDecBufferIn; /**< Input structure for the audio decoder */
+ M4AD_Buffer AudioDecBufferOut; /**< Buffer for the decoded PCM data */
+ AAC_DEC_STREAM_PROPS AacProperties; /**< Structure for new api to get AAC
+ properties */
+
+ /**
+ * Audio AU to Frame split stuff */
+ M4OSA_Bool bAudioFrameAvailable; /**< True if an audio frame is available */
+ M4OSA_MemAddr8 pAudioFramePtr; /**< Pointer to the Audio frame */
+ M4OSA_UInt32 uiAudioFrameSize; /**< Size of the audio frame available */
+ M4OSA_Int32 iAudioFrameCts; /**< [Timescale] CTS of the audio frame
+ available */
+
+ /**
+ * Silence frame stuff */
+ /**< Size to reserve to store a pcm full of zeros compatible with master clip stream type */
+ M4OSA_UInt32 uiSilencePcmSize;
+ /**< Pointer to silence frame data compatible with master clip stream type */
+ M4OSA_UInt8* pSilenceFrameData;
+ /**< Size of silence frame data compatible with master clip stream type */
+ M4OSA_UInt32 uiSilenceFrameSize;
+ /**< [Timescale] Duration of silence frame data compatible with master clip stream type */
+ M4OSA_Int32 iSilenceFrameDuration;
+ M4OSA_Double scale_audio; /**< frequency / 1000.0 */
+
+ /**
+ * Interfaces of the used modules */
+ /**< Filesystem and shell reader, decoder functions */
+ M4VSS3GPP_MediaAndCodecCtxt ShellAPI;
+ M4VIFI_ImagePlane *pPlaneYuv; /* YUV420 image plane, converted from ARGB888 */
+ M4VIFI_ImagePlane* m_pPreResizeFrame; /* The decoded image before resize
+ (allocated only if resize needed)*/
+ M4VIFI_ImagePlane *pPlaneYuvWithEffect; /* YUV420 image plane, with color effect */
+ M4OSA_Bool bGetYuvDataFromDecoder; /* Boolean used to get YUV data from dummy video decoder only for first time */
+} M4VSS3GPP_ClipContext;
+
+
+/**
+ ******************************************************************************
+ * enum anonymous enum
+ * @brief enum to keep track of the encoder state
+ ******************************************************************************
+*/
+enum
+{
+ M4VSS3GPP_kNoEncoder,
+ M4VSS3GPP_kEncoderClosed,
+ M4VSS3GPP_kEncoderStopped,
+ M4VSS3GPP_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure M4VSS3GPP_AudioVideoContext
+ * @brief This structure defines the audio video context (private)
+ * @note This structure is used for all audio/video, encoding/writing operations.
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**
+ * Timing Stuff */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ /**< [Milliseconds] Duration of the output file, used for progress computation */
+ M4OSA_Double dInputVidCts;
+ /**< [Milliseconds] Current CTS of the video output stream */
+ M4OSA_Double dOutputVidCts;
+/**< [Milliseconds] Current CTS of the audio output stream */
+ M4OSA_Double dATo;
+ /**< [Milliseconds] Duration of the output file, used for progress computation */
+ M4OSA_Int32 iOutputDuration;
+
+ /**
+ * Output Video Stream Stuff */
+ M4SYS_StreamType VideoStreamType; /**< Output video codec */
+ M4OSA_Int32 outputVideoProfile; /**< Output video profile */
+ M4OSA_Int32 outputVideoLevel; /**< Output video level */
+ M4OSA_UInt32 uiVideoBitrate; /**< Average video bitrate of the output file,
+ computed from input bitrates, durations,
+ transitions and cuts */
+ M4OSA_UInt32 uiVideoWidth; /**< Output image width */
+ M4OSA_UInt32 uiVideoHeight; /**< Output image height */
+ M4OSA_UInt32 uiVideoTimeScale; /**< Time scale to use for the encoding
+ of the transition (if MPEG-4) */
+ M4OSA_Bool bVideoDataPartitioning; /**< Data partitioning to use for the
+ encoding of the transition
+ (if MPEG-4) */
+ M4OSA_MemAddr8 pVideoOutputDsi; /**< Decoder Specific Info of the output
+ MPEG-4 track */
+ M4OSA_UInt16 uiVideoOutputDsiSize; /**< Size of the Decoder Specific Info
+ of the output MPEG-4 track */
+ /**
+ * Output Audio Stream Stuff */
+ M4SYS_StreamType AudioStreamType; /**< Type of the output audio stream */
+ M4OSA_UInt32 uiNbChannels; /**< Number of channels in the output
+ stream (1=mono, 2=stereo) */
+ M4OSA_UInt32 uiAudioBitrate; /**< Audio average bitrate (in bps) */
+ M4OSA_UInt32 uiSamplingFrequency; /**< Sampling audio frequency (8000 for
+ amr, 16000 or more for aac) */
+ M4OSA_MemAddr8 pAudioOutputDsi; /**< Decoder Specific Info of the
+ output audio track */
+ M4OSA_UInt16 uiAudioOutputDsiSize; /**< Size of the Decoder Specific Info
+ of the output audio track */
+
+ /**
+ * Audio Encoder stuff */
+ M4OSA_Context pAudioEncCtxt; /**< Context of the audio encoder */
+ M4ENCODER_AudioDecSpecificInfo pAudioEncDSI; /**< Decoder specific info built by the
+ encoder */
+ M4ENCODER_AudioParams AudioEncParams; /**< Config of the audio encoder */
+
+ /**
+ * Silence frame stuff */
+ M4OSA_UInt32 uiSilencePcmSize; /**< Size to reserve to store a pcm full
+ of zeros compatible with master clip
+ stream type */
+ M4OSA_UInt8* pSilenceFrameData; /**< Pointer to silence frame data
+ compatible with master clip
+ stream type */
+ M4OSA_UInt32 uiSilenceFrameSize; /**< Size of silence frame data compatible
+ with master clip stream type */
+ M4OSA_Int32 iSilenceFrameDuration; /**< [Timescale] Duration of silence frame
+ data compatible with master clip
+ stream type */
+ M4OSA_Double scale_audio; /**< frequency / 1000.0 */
+
+ /**
+ * Video Encoder stuff */
+ M4ENCODER_Context pEncContext; /**< Context of the encoder */
+ M4WRITER_DataInterface OurWriterDataInterface; /**< Our own implementation of the
+ writer interface, to give to
+ the encoder shell */
+ M4OSA_MemAddr32 pDummyAuBuffer; /**< Buffer given to the encoder for
+ it to write AUs we don't want
+ in the output */
+ M4OSA_Int32 iMpeg4GovOffset; /**< Clip GOV offset in ms between
+ video and system time */
+ M4OSA_ERR VppError; /**< Error for VPP are masked by Video
+ Encoder, so we must remember it */
+ M4OSA_UInt32 encoderState;
+
+ /**
+ * Writer stuff */
+ M4WRITER_Context p3gpWriterContext; /**< Context of the 3GPP writer module */
+ M4SYS_StreamDescription WriterVideoStream; /**< Description of the written
+ video stream */
+ M4SYS_StreamDescription WriterAudioStream; /**< Description of the written
+ audio stream */
+ M4WRITER_StreamVideoInfos WriterVideoStreamInfo; /**< Video properties of the written
+ video stream */
+ M4WRITER_StreamAudioInfos WriterAudioStreamInfo; /**< Audio properties of the written
+ audio stream */
+ M4SYS_AccessUnit WriterVideoAU; /**< Written video access unit */
+ M4SYS_AccessUnit WriterAudioAU; /**< Written audio access unit */
+ M4OSA_UInt32 uiVideoMaxAuSize; /**< Max AU size set to the writer
+ for the video */
+ M4OSA_UInt32 uiAudioMaxAuSize; /**< Max AU size set to the writer
+ for the audio */
+ M4OSA_UInt32 uiOutputAverageVideoBitrate; /**< Average video bitrate of the
+ output file, computed from
+ input bitrates, durations,
+ transitions and cuts */
+
+} M4VSS3GPP_EncodeWriteContext;
+
+
+/**
+ ******************************************************************************
+ * structure M4VSS3GPP_InternalEditContext
+ * @brief This structure defines the edit VSS context (private)
+ * @note This structure is used for all VSS edit operations to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**
+ * VSS 3GPP main variables */
+ M4VSS3GPP_EditState State; /**< VSS internal state */
+ M4VSS3GPP_EditVideoState Vstate;
+ M4VSS3GPP_EditAudioState Astate;
+
+ /**
+ * User Settings (copied, thus owned by VSS3GPP) */
+ M4OSA_UInt8 uiClipNumber; /**< Number of element of the clip
+ list pClipList. */
+ M4VSS3GPP_ClipSettings *pClipList; /**< List of the input clips settings
+ Array of uiClipNumber clip settings */
+ M4VSS3GPP_TransitionSettings *pTransitionList; /**< List of the transition settings.
+ Array of uiClipNumber-1 transition settings */
+ M4VSS3GPP_EffectSettings *pEffectsList; /**< List of the effects settings.
+ Array of nbEffects RC */
+ M4OSA_UInt8 *pActiveEffectsList; /**< List of the active effects
+ settings. Array of nbEffects RC */
+ M4OSA_UInt8 nbEffects; /**< Numbers of effects RC */
+ M4OSA_UInt8 nbActiveEffects; /**< Numbers of active effects RC */
+
+ /**
+ * Input Stuff */
+ M4OSA_UInt8 uiCurrentClip; /**< Index of the current clip 1 in
+ the input clip list */
+ M4VSS3GPP_ClipContext* pC1; /**< Context of the current clip 1 */
+ M4VSS3GPP_ClipContext* pC2; /**< Context of the current clip 2 */
+
+ /**
+ * Decoder stuff */
+ M4OSA_Double dOutputFrameDuration; /**< [Milliseconds] directly related to
+ output frame rate */
+ M4VIFI_ImagePlane yuv1[3]; /**< First temporary YUV420 image plane */
+ M4VIFI_ImagePlane yuv2[3]; /**< Second temporary YUV420 image plane */
+ M4VIFI_ImagePlane yuv3[3]; /**< Third temporary YUV420 image plane RC */
+ M4VIFI_ImagePlane yuv4[3]; /**< Fourth temporary YUV420 image plane RC */
+
+ /**
+ * Effect stuff */
+ M4OSA_Bool bClip1AtBeginCut; /**< [Milliseconds] The clip1 is at
+ its begin cut */
+ M4OSA_Int8 iClip1ActiveEffect; /**< The index of the active effect
+ on Clip1 (<0 means none)
+ (used for video and audio but
+ not simultaneously) */
+ M4OSA_Int8 iClip2ActiveEffect; /**< The index of the active effect
+ on Clip2 (<0 means none)
+ (used for video and audio but
+ not simultaneously) */
+ M4OSA_Bool bTransitionEffect; /**< True if the transition effect
+ must be applied at the current
+ time */
+
+ /**
+ * Encoding and Writing operations */
+ M4OSA_Bool bSupportSilence; /**< Flag to know if the output stream can
+ support silence (even if not editable,
+ for example AAC+, but not EVRC) */
+ M4VSS3GPP_EncodeWriteContext ewc; /**< Audio and video encode/write stuff */
+ M4OSA_Bool bIsMMS; /**< Boolean used to know if we are
+ processing a file with an output
+ size constraint */
+ M4OSA_UInt32 uiMMSVideoBitrate; /**< If in MMS mode,
+ targeted video bitrate */
+ M4VIDEOEDITING_VideoFramerate MMSvideoFramerate; /**< If in MMS mode,
+ targeted video framerate */
+
+ /**
+ * Filesystem functions */
+ M4OSA_FileReadPointer* pOsaFileReadPtr; /**< OSAL file read functions,
+ to be provided by user */
+ M4OSA_FileWriterPointer* pOsaFileWritPtr; /**< OSAL file write functions,
+ to be provided by user */
+
+ /**
+ * Interfaces of the used modules */
+ M4VSS3GPP_MediaAndCodecCtxt ShellAPI; /**< Filesystem and shell reader,
+ decoder functions */
+ M4OSA_Bool bIssecondClip;
+ M4OSA_UInt8 *pActiveEffectsList1; /**< List of the active effects settings. Array of nbEffects RC */
+ M4OSA_UInt8 nbActiveEffects1; /**< Numbers of active effects RC */
+ M4OSA_Bool m_bClipExternalHasStarted; /**< Flag to indicate that an
+ external effect is active */
+ M4OSA_Int32 iInOutTimeOffset;
+ M4OSA_Bool bEncodeTillEoF;
+ M4xVSS_EditSettings xVSS;
+ M4OSA_Context m_air_context;
+
+ M4OSA_Bool bClip1ActiveFramingEffect; /**< Overlay flag for clip1 */
+ M4OSA_Bool bClip2ActiveFramingEffect; /**< Overlay flag for clip2, used in transition */
+} M4VSS3GPP_InternalEditContext;
+
+
+/**
+ ******************************************************************************
+ * structure M4VSS3GPP_InternalAudioMixingContext
+ * @brief This structure defines the audio mixing VSS 3GPP context (private)
+ * @note This structure is used for all VSS 3GPP audio mixing operations to store
+ * the context
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**
+ * VSS main variables */
+ M4VSS3GPP_AudioMixingState State; /**< VSS audio mixing internal state */
+
+ /**
+ * Internal copy of the input settings */
+ M4OSA_Int32 iAddCts; /**< [Milliseconds] Time, in milliseconds,
+ at which the added audio track is
+ inserted */
+ M4OSA_UInt32 uiBeginLoop; /**< Describes in milli-second the
+ start time of the loop */
+ M4OSA_UInt32 uiEndLoop; /**< Describes in milli-second the end
+ time of the loop (0 means no loop) */
+ M4OSA_Bool bRemoveOriginal; /**< If true, the original audio track
+ is not taken into account */
+
+ /**
+ * Input audio/video file */
+ M4VSS3GPP_ClipSettings InputClipSettings; /**< Structure internally used to
+ manage the input 3GPP settings */
+ M4VSS3GPP_ClipContext* pInputClipCtxt; /**< Context of the input 3GPP clip */
+
+ /**
+ * Added audio file stuff */
+ M4VSS3GPP_ClipSettings AddedClipSettings; /**< Structure internally used to
+ manage the added settings */
+ M4VSS3GPP_ClipContext* pAddedClipCtxt; /**< Context of the added 3GPP clip */
+
+ /**
+ * Audio stuff */
+ M4OSA_Float fOrigFactor; /**< Factor to apply to the original
+ audio track for the mixing */
+ M4OSA_Float fAddedFactor; /**< Factor to apply to the added
+ audio track for the mixing */
+ M4OSA_Bool bSupportSilence; /**< Flag to know if the output stream can
+ support silence (even if not editable,
+ for example AAC+, but not EVRC) */
+ M4OSA_Bool bHasAudio; /**< Flag to know if we have to delete
+ audio track */
+ M4OSA_Bool bAudioMixingIsNeeded; /**< Flag to know if we have to do mixing */
+
+ /**
+ * Encoding and Writing operations */
+ M4VSS3GPP_EncodeWriteContext ewc; /**< Audio and video encode/write stuff */
+
+ /**
+ * Filesystem functions */
+ M4OSA_FileReadPointer* pOsaFileReadPtr; /**< OSAL file read functions,
+ to be provided by user */
+ M4OSA_FileWriterPointer* pOsaFileWritPtr; /**< OSAL file write functions,
+ to be provided by user */
+
+ /**
+ * Interfaces of the used modules */
+ M4VSS3GPP_MediaAndCodecCtxt ShellAPI; /**< Filesystem and shell reader,
+ decoder functions */
+
+ /**
+ * Sample Rate Convertor (SSRC) stuff (needed in case of mixing with != ASF/nb of channels) */
+ M4OSA_Bool b_SSRCneeded; /**< If true, SSRC is needed
+ (!= ASF or nb of channels) */
+ M4OSA_UInt8 ChannelConversion; /**< 1=Conversion from Mono to Stereo
+ 2=Stereo to Mono, 0=no conversion */
+ SSRC_Instance_t SsrcInstance; /**< Context of the Ssrc */
+ SSRC_Scratch_t* SsrcScratch; /**< Working memory of the Ssrc */
+ short iSsrcNbSamplIn; /**< Number of sample the Ssrc needs as input */
+ short iSsrcNbSamplOut; /**< Number of sample the Ssrc outputs */
+ M4OSA_MemAddr8 pSsrcBufferIn; /**< Input of the SSRC */
+ M4OSA_MemAddr8 pSsrcBufferOut; /**< Output of the SSRC */
+ M4OSA_MemAddr8 pPosInSsrcBufferIn; /**< Position into the SSRC in buffer */
+ M4OSA_MemAddr8 pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
+ M4OSA_MemAddr8 pTempBuffer; /**< Temporary buffer */
+ M4OSA_MemAddr8 pPosInTempBuffer; /**< Position in temporary buffer */
+ M4OSA_UInt32 minimumBufferIn; /**< Minimum amount of decoded data to be
+ processed by SSRC and channel
+ convertor */
+ M4OSA_Bool b_DuckingNeedeed;
+ M4OSA_Int32 InDucking_threshold; /**< Threshold value at which background
+ music shall duck */
+ M4OSA_Float InDucking_lowVolume; /**< lower the background track to this
+ factor and increase the primary
+ track to inverse of this factor */
+ M4OSA_Float lowVolume;
+ M4OSA_Int32 audioVolumeArray[WINDOW_SIZE]; // store peak audio vol. level
+ // for duration for WINDOW_SIZE
+ M4OSA_Int32 audVolArrIndex;
+ M4OSA_Float duckingFactor ; /**< multiply by this factor to bring
+ FADE IN/FADE OUT effect */
+ M4OSA_Float fBTVolLevel;
+ M4OSA_Float fPTVolLevel;
+ M4OSA_Bool bDoDucking;
+ M4OSA_Bool bLoop;
+ M4OSA_Bool bNoLooping;
+ M4OSA_Context pLVAudioResampler;
+ M4OSA_Bool bjumpflag;
+
+} M4VSS3GPP_InternalAudioMixingContext;
+
+
+/**
+ ******************************************************************************
+ * structure M4VSS3GPP_InternalExtractPictureContext
+ * @brief This structure defines the extract picture VSS context (private)
+ * @note This structure is used for all VSS picture extractions to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+ /**
+ * VSS main variables */
+ M4VSS3GPP_ExtractPictureState State; /**< VSS extract pictureinternal state */
+
+ /**
+ * Input files */
+ M4VSS3GPP_ClipSettings ClipSettings; /**< Structure internally used to
+ manage the input 3FPP settings */
+ M4VSS3GPP_ClipContext* pInputClipCtxt; /**< Context of the input 3GPP clip */
+
+ /**
+ * Settings */
+ M4OSA_Int32 iExtractCts; /**< [Milliseconds] Cts of the AU
+ to be extracted */
+
+ /**
+ * Video stuff */
+ M4VIFI_ImagePlane decPlanes[3]; /**< Decoded YUV420 picture plane */
+ M4OSA_UInt32 uiVideoWidth; /**< Decoded image width */
+ M4OSA_UInt32 uiVideoHeight; /**< Decoded image height */
+
+ /*
+ * Decoder info */
+ M4OSA_Int32 iDecCts; /**< [Milliseconds] Decoded AU Cts */
+ M4OSA_Bool bJumpFlag; /**< 1 if a jump has been made */
+ M4OSA_Int32 iDeltaTime; /**< [Milliseconds] Time between previous RAP and
+ picture to extract */
+ M4OSA_Int32 iGap; /**< [Milliseconds] Time between jump AU and
+ extraction time */
+ M4OSA_UInt32 uiStep; /**< [Milliseconds] Progress bar time increment */
+
+ /**
+ * Filesystem functions */
+ /**< OSAL file read functions, to be provided by user */
+ M4OSA_FileReadPointer* pOsaFileReadPtr;
+ /**< OSAL file write functions, to be provided by user */
+ M4OSA_FileWriterPointer* pOsaFileWritPtr;
+
+ M4OSA_Bool bClipOpened;
+} M4VSS3GPP_InternalExtractPictureContext;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4xVSS_API.h b/libvideoeditor/vss/inc/M4xVSS_API.h
new file mode 100755
index 0000000..5ce102f
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4xVSS_API.h
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4XVSS_API_H__
+#define __M4XVSS_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/**
+ ******************************************************************************
+ * @file M4xVSS_API.h
+ * @brief API of Video Studio 2.1
+ * @note
+ ******************************************************************************
+*/
+
+#define M4VSS_SUPPORT_EXTENDED_FEATURES
+
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_Extended_API.h"
+#include "M4DECODER_Common.h"
+/* Errors codes */
+
+/**
+ * End of analyzing => the user can call M4xVSS_PreviewStart or M4xVSS_SaveStart */
+#define M4VSS3GPP_WAR_ANALYZING_DONE M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0001)
+
+/**
+ * End of preview generating => the user can launch vps to see preview. Once preview is over,
+ the user must call M4xVSS_PreviewStop() to be able to save edited file, or to call another
+ M4xVSS_SendCommand() */
+#define M4VSS3GPP_WAR_PREVIEW_READY M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0002)
+
+/**
+ * End of saved file generation => the user must call M4xVSS_SaveStop() */
+#define M4VSS3GPP_WAR_SAVING_DONE M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0003)
+
+/**
+ * Transcoding is necessary to go further -> if the user does not want to continue,
+ he must call M4xVSS_sendCommand() */
+#define M4VSS3GPP_WAR_TRANSCODING_NECESSARY M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0004)
+
+/**
+ * In case of MMS, the output file size won't be reached */
+#define M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0005)
+
+/**
+ * JPG input file dimensions are too high */
+#define M4VSS3GPP_ERR_JPG_TOO_BIG M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0001)
+
+/**
+ * UTF Conversion, warning on the size of the temporary converted buffer*/
+#define M4xVSSWAR_BUFFER_OUT_TOO_SMALL M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0006)
+
+/**
+ * SWIKAR :Error whan NO_MORE_SPACE*/
+#define M4xVSSERR_NO_MORE_SPACE M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0007)
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_VideoEffectType
+ * @brief This enumeration defines the video effect types of the xVSS
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kVideoEffectType_BlackAndWhite = M4VSS3GPP_kVideoEffectType_External+1, /* 257 */
+ M4xVSS_kVideoEffectType_Pink, /* 258 */
+ M4xVSS_kVideoEffectType_Green, /* 259 */
+ M4xVSS_kVideoEffectType_Sepia, /* 260 */
+ M4xVSS_kVideoEffectType_Negative, /* 261 */
+ M4xVSS_kVideoEffectType_Framing, /* 262 */
+ M4xVSS_kVideoEffectType_Text, /* Text overlay */ /* 263 */
+ M4xVSS_kVideoEffectType_ZoomIn, /* 264 */
+ M4xVSS_kVideoEffectType_ZoomOut, /* 265 */
+ M4xVSS_kVideoEffectType_Fifties, /*266 */
+ M4xVSS_kVideoEffectType_ColorRGB16, /*267 */
+ M4xVSS_kVideoEffectType_Gradient /*268*/
+} M4xVSS_VideoEffectType;
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_VideoTransitionType
+ * @brief This enumeration defines the video effect that can be applied during a transition.
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kVideoTransitionType_External = M4VSS3GPP_kVideoTransitionType_External, /*256*/
+ M4xVSS_kVideoTransitionType_AlphaMagic,
+ M4xVSS_kVideoTransitionType_SlideTransition,
+ M4xVSS_kVideoTransitionType_FadeBlack
+
+} M4xVSS_VideoTransitionType;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_PreviewSettings
+ * @brief This structure gathers all the information needed by the VPS for preview
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Void *p3gpPreviewFile;
+ M4OSA_Void *pPCMFile;
+ M4VIDEOEDITING_AudioSamplingFrequency outPCM_ASF;
+ M4OSA_Bool bAudioMono;
+ M4VSS3GPP_EffectSettings *Effects;
+ M4OSA_UInt8 nbEffects;
+
+} M4xVSS_PreviewSettings;
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_toUTF8Fct
+ * @brief This prototype defines the function implemented by the integrator
+ * to convert a string encoded in any format to an UTF8 string.
+ * @note
+ *
+ * @param pBufferIn IN Buffer containing the string to convert to UTF8
+ * @param pBufferOut IN Buffer containing the UTF8 converted string
+ * @param bufferOutSize IN/OUT IN: Size of the given output buffer
+ * OUT: Size of the converted buffer
+ *
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_toUTF8Fct)
+(
+ M4OSA_Void *pBufferIn,
+ M4OSA_UInt8 *pBufferOut,
+ M4OSA_UInt32 *bufferOutSize
+);
+
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_fromUTF8Fct
+ * @brief This prototype defines the function implemented by the integrator
+ * to convert an UTF8 string to a string encoded in any format.
+ * @note
+ *
+ * @param pBufferIn IN Buffer containing the UTF8 string to convert
+ * to the desired format.
+ * @param pBufferOut IN Buffer containing the converted string
+ * @param bufferOutSize IN/OUT IN: Size of the given output buffer
+ * OUT: Size of the converted buffer
+ *
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_fromUTF8Fct)
+(
+ M4OSA_UInt8 *pBufferIn,
+ M4OSA_Void *pBufferOut,
+ M4OSA_UInt32 *bufferOutSize
+);
+
+
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_InitParams
+ * @brief This structure defines parameters for xVSS.
+ * @note
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_FileReadPointer* pFileReadPtr;
+ M4OSA_FileWriterPointer* pFileWritePtr;
+ M4OSA_Void* pTempPath;
+ /*Function pointer on an external text conversion function */
+ M4xVSS_toUTF8Fct pConvToUTF8Fct;
+ /*Function pointer on an external text conversion function */
+ M4xVSS_fromUTF8Fct pConvFromUTF8Fct;
+
+
+
+} M4xVSS_InitParams;
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_Init
+ * @brief This function initializes the xVSS
+ * @note Initializes the xVSS edit operation (allocates an execution context).
+ *
+ * @param pContext (OUT) Pointer on the xVSS edit context to allocate
+ * @param params (IN) Parameters mandatory for xVSS
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* params);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_ReduceTranscode
+ * @brief This function changes the given editing structure in order to
+ * minimize the transcoding time.
+ * @note The xVSS analyses this structure, and if needed, changes the
+ * output parameters (Video codec, video size, audio codec,
+ * audio nb of channels) to minimize the transcoding time.
+ *
+ * @param pContext (OUT) Pointer on the xVSS edit context to allocate
+ * @param pSettings (IN) Edition settings (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_ReduceTranscode(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_SendCommand
+ * @brief This function gives to the xVSS an editing structure
+ * @note The xVSS analyses this structure, and prepare edition
+ * This function must be called after M4xVSS_Init, after
+ * M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
+ * After this function, the user must call M4xVSS_Step until
+ * it returns another error than M4NO_ERROR.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pSettings (IN) Edition settings (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_PreviewStart
+ * @brief This function prepare the preview
+ * @note The xVSS create 3GP preview file and fill pPreviewSettings with
+ * preview parameters.
+ * This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_ANALYZING_DONE
+ * After this function, the user must call M4xVSS_Step until
+ * it returns another error than M4NO_ERROR.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pPreviewSettings (IN) Preview settings (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_PreviewStart(M4OSA_Context pContext, M4xVSS_PreviewSettings* pPreviewSettings);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_PreviewStop
+ * @brief This function unallocate preview ressources and change xVSS
+ * internal state to allow saving or resend an editing command
+ * @note This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_PREVIEW_READY
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_PreviewStop(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_SaveStart
+ * @brief This function prepare the save
+ * @note The xVSS create 3GP edited final file
+ * This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_ANALYZING_DONE
+ * After this function, the user must call M4xVSS_Step until
+ * it returns another error than M4NO_ERROR.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pFilePath (IN) If the user wants to provide a different
+ * output filename, else can be NULL (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Void* pFilePath,
+ M4OSA_UInt32 filePathSize);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_SaveStop
+ * @brief This function unallocate save ressources and change xVSS
+ * internal state.
+ * @note This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_SAVING_DONE
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_Step
+ * @brief This function executes differents tasks, depending of xVSS
+ * internal state.
+ * @note This function:
+ * - analyses editing structure if called after M4xVSS_SendCommand
+ * - generates preview file if called after M4xVSS_PreviewStart
+ * - generates final edited file if called after M4xVSS_SaveStart
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pContext (OUT) Progress indication from 0 to 100
+ * @return M4NO_ERROR: No error, the user must call M4xVSS_Step again
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ * @return M4VSS3GPP_WAR_PREVIEW_READY: Preview file is generated
+ * @return M4VSS3GPP_WAR_SAVING_DONE: Final edited file is generated
+ * @return M4VSS3GPP_WAR_ANALYZING_DONE: Analyse is done
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_CloseCommand
+ * @brief This function deletes current editing profile, unallocate
+ * ressources and change xVSS internal state.
+ * @note After this function, the user can call a new M4xVSS_SendCommand
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_CleanUp
+ * @brief This function deletes all xVSS ressources
+ * @note This function must be called after M4xVSS_CloseCommand.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_GetVersion(M4_VersionInfo *pVersion)
+ * @brief This function get the version of the Video Studio 2.1
+ *
+ * @param pVersion (IN) Pointer on the version info struct
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_GetVersion(M4_VersionInfo *pVersion);
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function apply a color effect on an input YUV420 planar frame
+ * @note The prototype of this effect function is exposed because it needs to
+ * called by the VPS during the preview
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor
+(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function add a fixed or animated image on an input YUV420 planar frame
+ * @note The prototype of this effect function is exposed because it needs to
+ * called by the VPS during the preview
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming
+(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function make a video look as if it was taken in the fifties
+ * @note
+ * @param pUserData (IN) Context
+ * @param pPlaneIn (IN) Input YUV420 planar
+ * @param pPlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ * @return M4ERR_PARAMETER: pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties
+(
+ M4OSA_Void *pUserData,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pPlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind
+);
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectZoom(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function add a fixed or animated image on an input YUV420 planar frame
+ * @note The prototype of this effect function is exposed because it needs to
+ * called by the VPS during the preview
+ * @param pFunctionContext(IN) Contains which zoom to apply (In/Out)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom
+(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_CreateClipSettings()
+ * @brief Allows filling a clip settings structure with default values
+ *
+ * @note WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ * pClipSettings->pFile will be allocated in this function.
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pFile (IN) Clip file name
+ * @param filePathSize (IN) Size of the clip path (needed for the UTF16 conversion)
+ * @param nbEffects (IN) Nb of effect settings to allocate
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Void* pFile,
+ M4OSA_UInt32 filePathSize, M4OSA_UInt8 nbEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_DuplicateClipSettings()
+ * @brief Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param pClipSettingsDest (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pClipSettingsOrig (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param bCopyEffects (IN) Flag to know if we have to duplicate effects
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_DuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
+ M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+ M4OSA_Bool bCopyEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_FreeClipSettings()
+ * @brief Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_FreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
+ * @brief This function returns the MCS context within the xVSS internal context
+ * @note This function must be called only after VSS state has moved to analyzing state
+ * or beyond
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param mcsContext (OUT) Pointer to pointer of mcs context to return
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext);
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
+ * M4OSA_Context* mcsContext)
+ * @brief This function returns the VSS3GPP context within the xVSS internal context
+ * @note This function must be called only after VSS state has moved to Generating
+ * preview or beyond
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param vss3gppContext (OUT) Pointer to pointer of vss3gpp context to return
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext, M4OSA_Context* vss3gppContext);
+
+// Get supported video decoders and capabilities.
+M4OSA_ERR M4xVSS_getVideoDecoderCapabilities(M4DECODER_VideoDecoders **decoders);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __M4XVSS_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4xVSS_Internal.h b/libvideoeditor/vss/inc/M4xVSS_Internal.h
new file mode 100755
index 0000000..5296572
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4xVSS_Internal.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4XVSS_INTERNAL_H__
+#define __M4XVSS_INTERNAL_H__
+
+/**
+ ******************************************************************************
+ * @file M4xVSS_Internal.h
+ * @brief Internal of Video Authoring.
+ * @note
+ ******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+
+#include "M4PTO3GPP_API.h"
+#include "M4PTO3GPP_ErrorCodes.h"
+
+#include "M4AIR_API.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define M4_xVSS_MAJOR 1
+#define M4_xVSS_MINOR 5
+#define M4_xVSS_REVISION 5
+
+/* The following defines describe the max dimensions of an input JPG */
+#define M4XVSS_MX_JPG_NB_OF_PIXELS 3926016
+
+/*Size of the UTF temporary conversion buffer keep in the VA internal context and
+allocate at the initialization*/
+#define UTF_CONVERSION_BUFFER_SIZE 2048
+
+/* Max path length size */
+#define M4XVSS_MAX_PATH_LEN 256
+
+/** Determine absolute value of a. */
+#define M4xVSS_ABS(a) ( ( (a) < (0) ) ? (-(a)) : (a) )
+
+/** Y,U,V values in case of black borders rendering */
+#define Y_PLANE_BORDER_VALUE 0x00
+#define U_PLANE_BORDER_VALUE 0x80
+#define V_PLANE_BORDER_VALUE 0x80
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_EffectsAlphaBlending
+ * @brief Internal effects alpha blending parameters
+ * @note This structure contains all internal informations to create an alpha
+ * blending for the effects text and framing
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt8 m_fadeInTime; /*Start percentage of Alpha blending*/
+ M4OSA_UInt8 m_fadeOutTime; /*Middle percentage of Alpha blending*/
+ M4OSA_UInt8 m_end; /*End percentage of Alpha blending*/
+ M4OSA_UInt8 m_middle; /*Duration, in percentage of effect duration,
+ of the FadeIn phase*/
+ M4OSA_UInt8 m_start; /*Duration, in percentage of effect duration,
+ of the FadeOut phase*/
+
+} M4xVSS_internalEffectsAlphaBlending;
+
+/**
+ ******************************************************************************
+ * THIS STRUCTURE MUST NOT BE MODIFIED
+ * struct M4xVSS_FramingStruct
+ * @brief It is used internally by xVSS for framing effect, and by VPS for previewing
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4VIFI_ImagePlane *FramingRgb; /**< decoded BGR565 plane */
+ M4VIFI_ImagePlane *FramingYuv; /**< converted YUV420 planar plane */
+ M4OSA_Int32 duration; /**< Duration of the frame */
+ M4OSA_Int32 previousClipTime; /**< Previous clip time, used by framing
+ filter for SAVING */
+ M4OSA_Int32 previewOffsetClipTime; /**< Previous clip time, used by framing
+ filter for PREVIEW */
+ M4OSA_Int32 previewClipTime; /**< Current clip time, used by framing
+ filter for PREVIEW */
+ M4OSA_Void* pCurrent; /**< Current M4xVSS_FramingStruct used by
+ framing filter */
+ M4OSA_Void* pNext; /**< Next M4xVSS_FramingStruct, if no more,
+ point on current M4xVSS_FramingStruct */
+ M4OSA_UInt32 topleft_x; /**< The top-left X coordinate in the output
+ picture of the first decoded pixel */
+ M4OSA_UInt32 topleft_y; /**< The top-left Y coordinate in the output
+ picture of the first decoded pixel */
+ M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct; /* Alpha blending Struct */
+/*To support ARGB8888 : get the width and height in case of file ARGB888 used in framing
+ as video effect */
+ M4OSA_UInt32 width; /*width of the ARGB8888 clip
+ .Used only if video effect is framming */
+ M4OSA_UInt32 height; /*height of the ARGB8888 clip .
+ Used only if video effect is framming */
+
+} M4xVSS_FramingStruct;
+
+#ifdef DECODE_GIF_ON_SAVING
+/**
+ ******************************************************************************
+ * THIS STRUCTURE MUST NOT BE MODIFIED
+ * struct M4xVSS_FramingContext
+ * @brief It is used internally by xVSS for framing effect, when the flag
+ DECODE_GIF_ON_SAVING is activated
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4xVSS_FramingStruct* aFramingCtx; /**<Framing struct for the decoding
+ of the current frame of the gif*/
+ M4xVSS_FramingStruct* aFramingCtx_last; /**<Framing struct for the decoding of
+ the previous frame of the gif*/
+ M4OSA_FileReadPointer* pFileReadPtr; /**< Pointer on OSAL file read functions */
+ M4OSA_FileWriterPointer* pFileWritePtr; /**< Pointer on OSAL file write functions */
+ M4OSA_Void* pSPSContext; /**<SPS context for the GIF decoding*/
+ //M4SPS_Stream inputStream; /**<GIF input stream buffer pointer*/
+ M4OSA_Void* pEffectFilePath; /**<file path of the gif*/
+ M4VIDEOEDITING_VideoFrameSize outputVideoSize; /**< Output video size RC */
+ //M4SPS_DisposalMode disposal; /**<previous frame GIF disposal*/
+ M4OSA_UInt16 b_animated; /**<Is the GIF animated?*/
+ M4OSA_Bool bEffectResize; /**<Is the gif resize*/
+ M4OSA_UInt32 topleft_x; /**< The top-left X coordinate in the
+ output picture of the first
+ decoded pixel */
+ M4OSA_UInt32 topleft_y; /**< The top-left Y coordinate in the
+ output picture of the first
+ decoded pixel */
+ M4OSA_UInt32 width; /**<GIF width, fill during the
+ initialization with the SPS*/
+ M4OSA_UInt32 height; /**<GIF height, fill during the
+ initialization with the SPS*/
+ M4OSA_UInt32 effectDuration; /**<Effect duration*/
+ M4OSA_Int32 effectStartTime; /**<Effect start time*/
+ M4OSA_UInt32 clipTime; /**<current output clip time for the
+ current frame*/
+ M4OSA_UInt32 last_clipTime; /**<previous output clip time for the
+ previous frame*/
+ M4OSA_UInt32 lastStepDuration; /**<Time interval between the previous
+ frame and the current frame*/
+ M4OSA_Bool b_IsFileGif; /**<Is the framing using a gif file*/
+ M4OSA_UInt32 last_width; /**<Last frame width*/
+ M4OSA_UInt32 last_height; /**<Last frame height*/
+ M4OSA_UInt32 last_topleft_x; /**<Last frame x topleft*/
+ M4OSA_UInt32 last_topleft_y; /**<Last frame y topleft*/
+ M4OSA_UInt32 current_gif_time; /**< Current time os the GIF in output
+ file time */
+ M4OSA_Float frameDurationRatio; /**< Frame duration ratio */
+ M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct;/*Alpha blending structure*/
+#ifdef DEBUG_GIF
+ M4OSA_UInt8 uiDebug_fileCounter;/**<for debug purpose,
+ count the frame of the gif*/
+#endif /*DEBUG_GIF*/
+}M4xVSS_FramingContext;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_Pto3GPP_params
+ * @brief Internal xVSS parameter for Pto3GPP module
+ * @note This structure is filled by M4xVSS_sendCommand function,
+ * @note and is used during M4xVSS_Step function to initialize Pto3GPP module
+ * @note All the JPG files to transform to 3GP are chained
+ ******************************************************************************
+*/
+typedef struct {
+ M4OSA_Char* pFileIn;
+ M4OSA_Char* pFileOut;
+ M4OSA_Char* pFileTemp; /**< temporary file used for
+ metadata writing, NULL is cstmem
+ writer not used */
+ M4OSA_UInt32 duration;
+ M4VIDEOEDITING_FileType InputFileType;
+ M4OSA_Bool isCreated; /**< This boolean is used to know if
+ the output file is already
+ created or not */
+ M4OSA_Bool isPanZoom; /**< RC: Boolean used to know if the
+ pan and zoom mode is enabled */
+ M4OSA_UInt16 PanZoomXa; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftXa; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftYa; /**< RC */
+ M4OSA_UInt16 PanZoomXb; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftXb; /**< RC */
+ M4OSA_UInt16 PanZoomTopleftYb; /**< RC */
+ M4xVSS_MediaRendering MediaRendering; /**< FB: to render or not picture
+ aspect ratio */
+ M4VIDEOEDITING_VideoFramerate framerate; /**< RC */
+ M4OSA_Void* pNext; /**< Address of next M4xVSS_Pto3GPP_params*
+ element */
+ /*To support ARGB8888:width and height */
+ M4OSA_UInt32 width;
+ M4OSA_UInt32 height;
+
+} M4xVSS_Pto3GPP_params;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_fiftiesStruct
+ * @brief It is used internally by xVSS for fifties effect
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 fiftiesEffectDuration; /**< Duration of the same effect in a video */
+ M4OSA_Int32 previousClipTime; /**< Previous clip time, used by framing filter
+ for SAVING */
+ M4OSA_UInt32 shiftRandomValue; /**< Vertical shift of the image */
+ M4OSA_UInt32 stripeRandomValue; /**< Horizontal position of the stripe */
+
+} M4xVSS_FiftiesStruct;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_ColorRGB16
+ * @brief It is used internally by xVSS for RGB16 color effect
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4xVSS_VideoEffectType colorEffectType; /*Color type of effect*/
+ M4OSA_UInt16 rgb16ColorData; /*RGB16 color only for the RGB16 color effect*/
+} M4xVSS_ColorStruct;
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_PictureCallbackCtxt
+ * @brief The Callback Context parameters for Pto3GPP
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_Char* m_FileIn;
+ M4OSA_UInt32 m_NbImage;
+ M4OSA_UInt32 m_ImageCounter;
+ M4OSA_Double m_timeDuration;
+ M4OSA_FileReadPointer* m_pFileReadPtr;
+ M4VIFI_ImagePlane* m_pDecodedPlane; /* Used for Pan and Zoom only */
+ M4xVSS_Pto3GPP_params* m_pPto3GPPparams;
+ M4OSA_Context m_air_context;
+ M4xVSS_MediaRendering m_mediaRendering;
+
+} M4xVSS_PictureCallbackCtxt;
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_State
+ * @brief Internal State of the xVSS
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kStateInitialized = 0,
+ M4xVSS_kStateAnalyzing,
+ M4xVSS_kStateOpened,
+ //M4xVSS_kStateGeneratingPreview,
+ //M4xVSS_kStatePreview,
+ M4xVSS_kStateSaving,
+ M4xVSS_kStateSaved
+
+} M4xVSS_State;
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_editMicroState
+ * @brief Internal Micro state of the xVSS for previewing/saving states
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kMicroStateEditing = 0,
+ M4xVSS_kMicroStateAudioMixing
+
+} M4xVSS_editMicroState;
+
+/**
+ ******************************************************************************
+ * enum M4xVSS_editMicroState
+ * @brief Internal Micro state of the xVSS for analyzing states
+ ******************************************************************************
+*/
+typedef enum
+{
+ M4xVSS_kMicroStateAnalysePto3GPP = 0,
+ M4xVSS_kMicroStateConvertPto3GPP,
+ M4xVSS_kMicroStateAnalyzeMCS,
+ M4xVSS_kMicroStateTranscodeMCS
+
+} M4xVSS_analyseMicroState;
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_MCS_params
+ * @brief Internal xVSS parameter for MCS module
+ * @note This structure is filled by M4xVSS_sendCommand function,
+ * @note and is used during M4xVSS_Step function to initialize MCS module
+ * @note All the input files to transcode are chained
+ ******************************************************************************
+*/
+typedef struct {
+ M4OSA_Void* pFileIn;
+ M4OSA_Void* pFileOut;
+ /**< temporary file used for metadata writing, NULL is cstmem writer not used */
+ M4OSA_Void* pFileTemp;
+ M4VIDEOEDITING_FileType InputFileType;
+ M4VIDEOEDITING_FileType OutputFileType;
+ M4VIDEOEDITING_VideoFormat OutputVideoFormat;
+ M4VIDEOEDITING_VideoFrameSize OutputVideoFrameSize;
+ M4VIDEOEDITING_VideoFramerate OutputVideoFrameRate;
+ M4VIDEOEDITING_AudioFormat OutputAudioFormat;
+ M4VIDEOEDITING_AudioSamplingFrequency OutputAudioSamplingFrequency;
+ M4OSA_Bool bAudioMono;
+ M4VIDEOEDITING_Bitrate OutputVideoBitrate;
+ M4VIDEOEDITING_Bitrate OutputAudioBitrate;
+ M4OSA_Bool isBGM;
+ /**< This boolean is used to know if the output file is already created or not */
+ M4OSA_Bool isCreated;
+ /**< Address of next M4xVSS_MCS_params* element */
+ M4OSA_Void* pNext;
+
+ /*FB: transcoding per parts*/
+ M4OSA_UInt32 BeginCutTime; /**< Beginning cut time in input file */
+ M4OSA_UInt32 EndCutTime; /**< End cut time in input file */
+ M4OSA_UInt32 OutputVideoTimescale; /*Output timescale*/
+
+ M4MCS_MediaRendering MediaRendering; /**< FB: to crop, resize, or render
+ black borders*/
+ M4OSA_UInt32 videoclipnumber;
+ M4OSA_UInt32 outputVideoProfile;
+ M4OSA_UInt32 outputVideoLevel;
+} M4xVSS_MCS_params;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_internal_AlphaMagicSettings
+ * @brief This structure defines the alpha magic transition settings
+ ******************************************************************************
+*/
+typedef struct {
+ M4VIFI_ImagePlane *pPlane;
+ M4OSA_Int32 blendingthreshold; /**< Blending Range */
+ M4OSA_Bool isreverse; /**< direct effect or reverse */
+
+} M4xVSS_internal_AlphaMagicSettings;
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_internal_SlideTransitionSettings
+ * @brief This structure defines the internal slide transition settings
+ * @note This type happens to match the external transition settings
+ * structure (i.e. the one which is given by the application), but are
+ * conceptually different types, so that if (or rather when) some day
+ * translation needs to occur when loading the settings from the app,
+ * this separate type will already be ready.
+ ******************************************************************************
+*/
+
+typedef M4xVSS_SlideTransitionSettings M4xVSS_internal_SlideTransitionSettings;
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_internalJpegChunkMode
+ * @brief This structure defines the parameters of the chunk callback to decode
+ * a JPEG by chunk mode.
+ ******************************************************************************
+*/
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_UTFConversionContext
+ * @brief Internal UTF conversion context
+ * @note This structure contains the UTF conversion informations
+ * needed by the xVSS to manage the different formats (UTF8/16/ASCII)
+ ******************************************************************************
+*/
+typedef struct
+{
+ /*Function pointer on an external text conversion function */
+ M4xVSS_toUTF8Fct pConvToUTF8Fct;
+ /*Function pointer on an external text conversion function */
+ M4xVSS_fromUTF8Fct pConvFromUTF8Fct;
+ /*Temporary buffer that contains the result of each conversion*/
+ M4OSA_Void* pTempOutConversionBuffer;
+ /*Size of the previous buffer, the size is prederminated*/
+ M4OSA_UInt32 m_TempOutConversionSize;
+} M4xVSS_UTFConversionContext;
+
+
+
+/**
+ ******************************************************************************
+ * struct M4xVSS_Context
+ * @brief Internal context of the xVSS
+ * @note This structure contains all internal informations needed by the xVSS
+ ******************************************************************************
+*/
+typedef struct {
+ /**< Pointer on OSAL file read functions */
+ M4OSA_FileReadPointer* pFileReadPtr;
+ /**< Pointer on OSAL file write functions */
+ M4OSA_FileWriterPointer* pFileWritePtr;
+ /**< Local copy of video editor settings */
+ M4VSS3GPP_EditSettings* pSettings;
+ /**< Current Settings of video editor to use in step functions for preview/save */
+ M4VSS3GPP_EditSettings* pCurrentEditSettings;
+ /**< Current context of video editor to use in step functions for preview/save */
+ M4VSS3GPP_EditContext pCurrentEditContext;
+ /**< This is to know if a previous M4xVSS_sendCommand has already been called */
+ M4OSA_UInt8 previousClipNumber;
+ /**< Audio mixing settings, needed to free it in M4xVSS_internalCloseAudioMixedFile function*/
+ M4VSS3GPP_AudioMixingSettings* pAudioMixSettings;
+ /**< Audio mixing context */
+ M4VSS3GPP_AudioMixingContext pAudioMixContext;
+ /**< File path for PCM output file: used for preview, given to user */
+ M4OSA_Char* pcmPreviewFile;
+ /**< Duplication of output file pointer, to be able to use audio mixing */
+ M4OSA_Char* pOutputFile;
+ /**< Duplication of temporary file pointer, to be able to use audio mixing */
+ M4OSA_Char* pTemporaryFile;
+ /**< Micro state for Saving/Previewing state */
+ M4xVSS_editMicroState editingStep;
+ /**< Micro state for Analyzing state */
+ M4xVSS_analyseMicroState analyseStep;
+ /**< Nb of step for analysis or save/preview. Used to compute progression
+ of analysis or save/preview */
+ M4OSA_UInt8 nbStepTotal;
+ /**< Current step number for analysis or save/preview */
+ M4OSA_UInt8 currentStep;
+ /**< To be able to free pEffects during preview close */
+ M4xVSS_PreviewSettings* pPreviewSettings;
+ /**< Temporary file path: all temporary files are created here */
+ M4OSA_Char* pTempPath;
+ /**< Current state of xVSS */
+ M4xVSS_State m_state;
+ /**< List of still pictures input to convert to 3GP with parameters */
+ M4xVSS_Pto3GPP_params* pPTo3GPPparamsList;
+ /**< Current element of the above chained list beeing processd by the Pto3GPP */
+ M4xVSS_Pto3GPP_params* pPTo3GPPcurrentParams;
+ /**< Current Pto3GPP context, needed to call Pto3GPP_step function in M4xVSS_step function */
+ M4PTO3GPP_Context pM4PTO3GPP_Ctxt;
+ /**< Pointer on the callback function of the Pto3GPP module */
+ M4xVSS_PictureCallbackCtxt* pCallBackCtxt;
+ /**< List of files to transcode with parameters */
+ M4xVSS_MCS_params* pMCSparamsList;
+ /**< Current element of the above chained list beeing processd by the MCS */
+ M4xVSS_MCS_params* pMCScurrentParams;
+ /**< Current MCS context, needed to call MCS_step function in M4xVSS_step function*/
+ M4MCS_Context pMCS_Ctxt;
+ /**< Index to have unique temporary filename */
+ M4OSA_UInt32 tempFileIndex;
+ /**< In case of MMS use case, targeted bitrate to reach output file size */
+ M4OSA_UInt32 targetedBitrate;
+ /**< If the sendCommand fct is called twice or more, the first computed timescale
+ recorded here must be reused */
+ M4OSA_UInt32 targetedTimescale;
+
+ /*UTF Conversion support*/
+ M4xVSS_UTFConversionContext UTFConversionContext; /*UTF conversion context structure*/
+
+} M4xVSS_Context;
+
+/**
+ * Internal function prototypes */
+
+M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext,
+ M4OSA_UInt32 *rotationDegree);
+
+M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeJPG(M4OSA_Void* pFileIn, M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane** pImagePlanes);
+
+M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane** pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height);
+M4OSA_ERR M4xVSS_internalDecodeAndResizeJPG(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane* pImagePlanes);
+M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height);
+
+M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx);
+
+#ifdef DECODE_GIF_ON_SAVING
+M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeGIF_Initialization(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeGIF_Cleaning(M4OSA_Context pContext);
+
+#else
+M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext, M4VSS3GPP_EffectSettings* pEffect,
+ M4xVSS_FramingStruct* framingCtx);
+#endif /*DECODE_GIF_ON_SAVING*/
+
+M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
+ M4VSS3GPP_EffectSettings* pEffect,
+ M4xVSS_FramingStruct* framingCtx,
+ M4VIDEOEDITING_VideoFrameSize \
+ OutputVideoResolution);
+
+M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings);
+
+M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
+ M4VIDEOEDITING_ClipProperties *pFileProperties);
+
+M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3],M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+ M4VSS3GPP_EditSettings* pSettings,
+ M4OSA_UInt32* pTargetedTimeScale);
+
+M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+ M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
+
+
+M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+ M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /* __M4XVSS_INTERNAL_H__ */
+
diff --git a/libvideoeditor/vss/mcs/Android.mk b/libvideoeditor/vss/mcs/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/vss/mcs/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_API.h b/libvideoeditor/vss/mcs/inc/M4MCS_API.h
new file mode 100755
index 0000000..a8987e2
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_API.h
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4MCS_API.h
+ * @brief Media Conversion Service public API.
+ * @note MCS allows transcoding a 3gp/mp4 file into a new 3gp/mp4 file changing the
+ * video and audio encoding settings.
+ * It is a straightforward and fully synchronous API.
+ ******************************************************************************
+ */
+
+#ifndef __M4MCS_API_H__
+#define __M4MCS_API_H__
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ * Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+
+/**
+ * To enable external audio codecs registering*/
+#include "M4AD_Common.h"
+#include "M4ENCODER_AudioCommon.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Public type of the MCS context */
+typedef M4OSA_Void* M4MCS_Context;
+
+
+/**
+ ******************************************************************************
+ * enum M4MCS_MediaRendering
+ * @brief This enum defines different media rendering
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kResizing = 0, /**< The media is resized, the aspect ratio can be
+ different from the original one.
+ All of the media is rendered */
+ M4MCS_kCropping, /**< The media is cropped, the aspect ratio is the
+ same as the original one.
+ The media is not rendered entirely */
+ M4MCS_kBlackBorders /**< Black borders are rendered in order to keep the
+ original aspect ratio. All the media is rendered */
+} M4MCS_MediaRendering;
+
+
+/**
+ ******************************************************************************
+ * struct M4MCS_ExternalProgress
+ * @brief This structure contains information provided to the external Effect functions
+ * @note The uiProgress value should be enough for most cases
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_UInt32 uiProgress; /**< Progress of the Effect from 0 to 1000 (one thousand) */
+ M4OSA_UInt32 uiClipTime; /**< Current time, in milliseconds,
+ in the current clip time-line */
+ M4OSA_UInt32 uiOutputTime; /**< Current time, in milliseconds,
+ in the output clip time-line */
+
+} M4MCS_ExternalProgress;
+
+
+/**
+ ******************************************************************************
+ * enum M4MCS_AudioEffectType
+ * @brief This enumeration defines the audio effect types of the MCS
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kAudioEffectType_None = 0,
+ M4MCS_kAudioEffectType_FadeIn = 8, /**< Intended for begin effect */
+ M4MCS_kAudioEffectType_FadeOut = 16, /**< Intended for end effect */
+ M4MCS_kAudioEffectType_External = 256
+
+} M4MCS_AudioEffectType;
+
+
+/**
+ ******************************************************************************
+ * prototype M4MCS_editAudioEffectFct
+ * @brief Audio effect functions implemented by the integrator
+ * must match this prototype.
+ * @note The function is provided with the original PCM data buffer and its size.
+ * Audio effect have to be applied on it.
+ * The progress of the effect is given, on a scale from 0 to 1000.
+ * When the effect function is called, all the buffers are valid and
+ * owned by the MCS.
+ *
+ * @param pFunctionContext (IN) The function context, previously set by the integrator
+ * @param pPCMdata (IN/OUT) valid PCM data buffer
+ * @param uiPCMsize (IN/OUT) PCM data buffer corresponding size
+ * @param pProgress (IN) Set of information about the audio effect progress.
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4MCS_editAudioEffectFct)
+(
+ M4OSA_Void *pFunctionContext,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize,
+ M4MCS_ExternalProgress *pProgress
+);
+
+
+/**
+ ******************************************************************************
+ * struct M4MCS_EffectSettings
+ * @brief This structure defines an audio effect for the edition.
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_UInt32 uiStartTime; /**< In ms */
+ M4OSA_UInt32 uiDuration; /**< In ms */
+ M4MCS_editAudioEffectFct ExtAudioEffectFct; /**< External effect function */
+ M4OSA_Void *pExtAudioEffectFctCtxt; /**< Context given to the external
+ effect function */
+ M4MCS_AudioEffectType AudioEffectType; /**< None, FadeIn, FadeOut */
+
+} M4MCS_EffectSettings;
+
+
+/**
+ ******************************************************************************
+ * struct M4MCS_OutputParams
+ * @brief MCS Output parameters
+ * @note Following parameters are used for still picture inputs :
+ * - OutputFileType (must be set to M4VIDEOEDITING_kFileType_JPG)
+ * - bDiscardExif must be set to M4OSA_TRUE or M4OSA_FALSE
+ * - bAdjustOrientation must be set to M4OSA_TRUE or M4OSA_FALSE
+ * - (MediaRendering is not handled : output image resolution is always
+ set according to BestFit criteria)
+ * bDiscardExif and bAdjustOrientation are still picture only parameters
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**< Format of the output file */
+ M4VIDEOEDITING_FileType OutputFileType;
+ /**< Output video compression format, see enum */
+ M4VIDEOEDITING_VideoFormat OutputVideoFormat;
+ /**< Output frame size : QQVGA, QCIF or SQCIF */
+ M4VIDEOEDITING_VideoFrameSize OutputVideoFrameSize;
+ /**< Targeted Output framerate, see enum */
+ M4VIDEOEDITING_VideoFramerate OutputVideoFrameRate;
+ /**< Format of the audio in the stream */
+ M4VIDEOEDITING_AudioFormat OutputAudioFormat;
+ /**< Sampling frequency of the audio in the stream */
+ M4VIDEOEDITING_AudioSamplingFrequency OutputAudioSamplingFrequency;
+ /**< Set to M4OSA_TRUE if the output audio is mono */
+ M4OSA_Bool bAudioMono;
+ /**< Output PCM file if not NULL */
+ M4OSA_Char *pOutputPCMfile;
+ /**< To crop, resize, or render black borders*/
+ M4MCS_MediaRendering MediaRendering;
+ /**< List of effects */
+ M4MCS_EffectSettings *pEffects;
+ /**< Number of effects in the above list */
+ M4OSA_UInt8 nbEffects;
+
+ /*--- STILL PICTURE ---*/
+ /**< TRUE: Even if the input file contains an EXIF section,
+ the output file won't contain any EXIF section.*/
+ M4OSA_Bool bDiscardExif ;
+
+ /**< =TRUE : picture must be rotated if Exif tags hold a rotation info
+ (and rotation info is set to 0)*/
+ M4OSA_Bool bAdjustOrientation ;
+ /*--- STILL PICTURE ---*/
+ M4OSA_Int32 outputVideoProfile;
+ M4OSA_Int32 outputVideoLevel;
+} M4MCS_OutputParams;
+
+/*--- STILL PICTURE ---*/
+/**
+ ******************************************************************************
+ * enum M4MCS_SPOutputResolution
+ * @brief Still picture specific : MCS output targeted file resolution
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kResSameAsInput = 0x00, /*width x height*/
+ M4MCS_kResQVGA = 0x01, /*320x240*/
+ M4MCS_kResVGA = 0x02, /*640x480*/
+ M4MCS_kResWQVGA = 0x03, /*400x240*/
+ M4MCS_kResWVGA = 0x04, /*800x480*/
+ M4MCS_kResXGA = 0x05, /*1024x768*/
+ M4MCS_kResCustom = 0xFF /*Size is set via StillPictureCustomWidth/Height*/
+} M4MCS_SPOutputResolution ;
+
+
+/**
+ ******************************************************************************
+ * enum M4MCS_SPStrategy
+ * @brief Still picture specific : MCS strategy to configure the encoding parameters
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kFileSizeOnlyFixed = 0x00, /*StillPictureResolution and
+ QualityFactor are ignored*/
+ M4MCS_kFileSizeAndResFixed = 0x01, /*QualityFactor is ignored*/
+ M4MCS_kQualityAndResFixed = 0x02 /*OutputFileSize is ignored*/
+} M4MCS_SPStrategy ;
+
+
+/**
+ ******************************************************************************
+ * enum M4MCS_SPCrop
+ * @brief Still picture specific : indicate whether cropping should be done
+ before changing the resolution
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kNoCrop = 0x00, /*No Cropping is performed*/
+ M4MCS_kCropBeforeResize = 0x01 /*Input image is cropped (before changing resolution)*/
+} M4MCS_SPCrop ;
+
+
+/**
+ ******************************************************************************
+ * struct M4MCS_EncodingParams
+ * @brief MCS file size, bitrate and cut parameters
+ * @note Following parameters are used for still picture inputs :
+ * - OutputFileSize
+ * - StillPictureResolution
+ * - QualityFactor
+ * - StillPictureStrategy
+ * - StillPictureCustomWidth/Height (if StillPictureResolution==M4MCS_kResCustom)
+ * Still picture only parameters : StillPictureResolution, QualityFactor,
+ * StillPictureStrategy and StillPictureCustomWidth/Height
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4VIDEOEDITING_Bitrate OutputVideoBitrate; /**< Targeted video bitrate */
+ M4VIDEOEDITING_Bitrate OutputAudioBitrate; /**< Targeted audio bitrate */
+ M4OSA_UInt32 BeginCutTime; /**< Beginning cut time in input file */
+ M4OSA_UInt32 EndCutTime; /**< End cut time in input file */
+ M4OSA_UInt32 OutputFileSize; /**< Expected resulting file size */
+ M4OSA_UInt32 OutputVideoTimescale; /**< Optional parameter used to fix a
+ timescale during transcoding */
+
+ /*--- STILL PICTURE ---*/
+ M4OSA_Int32 QualityFactor ; /**< =-1 (undefined) or 0(lowest)..
+ 50(best) : This parameter is the
+ quality indication for the JPEG output
+ file (if =-1 the MCS will set quality
+ automatically)*/
+ M4MCS_SPStrategy StillPictureStrategy ; /**< Defines which input parameters
+ will be taken into account by MCS*/
+ M4MCS_SPOutputResolution StillPictureResolution;/**< Desired output resolution for
+ a still picture file */
+ /**< (only if Resolution==M4MCS_kResCustom) : Custom output image width */
+ M4OSA_UInt32 StillPictureCustomWidth;
+ /**< (only if Resolution==M4MCS_kResCustom) : Custom output image height */
+ M4OSA_UInt32 StillPictureCustomHeight;
+ /**< Indicate whether Crop should be performed */
+ M4MCS_SPCrop StillPictureCrop;
+ /**< (only if cropping) X coordinate of topleft corner of the crop window */
+ M4OSA_UInt32 StillPictureCrop_X;
+ /**< (only if cropping) Y coordinate of topleft corner of the crop window */
+ M4OSA_UInt32 StillPictureCrop_Y;
+ /**< (only if cropping) Width of the crop window (in pixels) */
+ M4OSA_UInt32 StillPictureCrop_W;
+ /**< (only if cropping) Height of the crop window (in pixels) */
+ M4OSA_UInt32 StillPictureCrop_H;
+ /*--- STILL PICTURE ---*/
+} M4MCS_EncodingParams;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+ * @brief Get the MCS version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct);
+ * @brief Initializes the MCS (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the MCS context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn, M4OSA_Void* pFileOut,
+ M4OSA_UInt32 uiMaxMetadataSize);
+ * @brief Set the MCS input and output files.
+ * @note It opens the input file, but the output file is not created yet.
+ * In case of still picture, four InputFileType are possible
+ * (M4VIDEOEDITING_kFileType_JPG/BMP/GIF/PNG
+ * If one of them is set, the OutputFileType SHALL be set to M4VIDEOEDITING_kFileType_JPG
+ * @param pContext (IN) MCS context
+ * @param pFileIn (IN) Input file to transcode (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param mediaType (IN) Container type (.3gp,.amr, ...) of input file.
+ * @param pFileOut (IN) Output file to create (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param pTempFile (IN) Temporary file for the constant memory writer to store
+ * metadata ("moov.bin").
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return M4ERR_FILE_NOT_FOUND: The input file has not been found
+ * @return M4MCS_ERR_INVALID_INPUT_FILE: The input file is not a valid file, or is corrupted
+ * @return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM: The input file contains no
+ * supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ M4VIDEOEDITING_FileType InputFileType,
+ M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+ * @brief Perform one step of trancoding.
+ * @note
+ * @param pContext (IN) MCS context
+ * @param pProgress (OUT) Progress percentage (0 to 100) of the transcoding
+ * @note pProgress must be a valid address.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: One of the parameters is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
+ * @return M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB, MP3) failed
+ * @return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
+ * with an invalid sampling frequency
+ * (should never happen)
+ * @return M4MCS_WAR_PICTURE_AUTO_RESIZE: Picture will be automatically resized to fit
+ * into requirements
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+ * @brief Pause the transcoding i.e. release the (external hardware) video decoder.
+ * @note This function is not needed if no hardware accelerators are used.
+ * In that case, pausing the MCS is simply achieved by temporarily suspending
+ * the M4MCS_step function calls.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+ * @brief Resume the transcoding after a pause (see M4MCS_pause).
+ * @note This function is not needed if no hardware accelerators are used.
+ * In that case, resuming the MCS is simply achieved by calling
+ * the M4MCS_step function.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+ * @brief Finish the MCS transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+ * @brief Free all resources used by the MCS.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+ * @brief Finish the MCS transcoding and free all resources used by the MCS
+ * whatever the state is.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+ * M4VIDEOEDITING_ClipProperties* pFileProperties);
+ * @brief Retrieves the properties of the audio and video streams from the input file.
+ * @param pContext (IN) MCS context
+ * @param pProperties (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
+ structure which is filled with the input stream properties.
+ * @note The structure pProperties must be allocated and further de-allocated
+ by the application. The function must be called in the opened state.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+ M4VIDEOEDITING_ClipProperties *pFileProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+ * @brief Set the MCS video output parameters.
+ * @note Must be called after M4MCS_open. Must be called before M4MCS_step.
+ * @param pContext (IN) MCS context
+ * @param pParams (IN/OUT) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
+ * incompatible with H263 encoding
+ * @return M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
+ * incompatible with H263 encoding
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT : Undefined output video format parameter
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
+ * @return M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
+ * (no audio and video)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief Set the values of the encoding parameters
+ * @note Must be called before M4MCS_checkParamsAndStart().
+ * @param pContext (IN) MCS context
+ * @param pRates (IN) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
+ * 12.2 for amr, 8 for mp3)
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
+ * the input clip duration
+ * @return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
+ * file at given bitrates
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_LOW: Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief Get the extended values of the encoding parameters
+ * @note Could be called after M4MCS_setEncodingParams.
+ * @param pContext (IN) MCS context
+ * @param pRates (OUT) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a
+ * null duration clip = encoding is impossible
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
+ * @brief
+ * @note
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
+ * 12.2 for amr, 8 for mp3)
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
+ * the input clip duration
+ * @return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
+ * file at given bitrates
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_LOW: Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4MCS_API_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h b/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
new file mode 100755
index 0000000..c042dbb
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file M4MCS_API.h
+ * @brief MCS error codes definitions (Media Compressor Service)
+ * @note
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_ErrorCodes_H__
+#define __M4MCS_ErrorCodes_H__
+
+/**
+ * OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ * OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/************************************************************************/
+/* Warning codes */
+/************************************************************************/
+
+/* End of processing, user should now call M4MCS_close() */
+#define M4MCS_WAR_TRANSCODING_DONE M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x1)
+/* Mediatype is not supported by the MCS */
+#define M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x2)
+/* Indicate that picture will be automatically resized to fit into the required
+ parameters (file size) */
+#define M4MCS_WAR_PICTURE_AUTO_RESIZE M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x3)
+
+/************************************************************************/
+/* Error codes */
+/************************************************************************/
+
+
+/* ----- OPEN ERRORS ----- */
+
+/* The input file contains no supported stream (may be a corrupted file) */
+#define M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x01)
+/* The input file is invalid/corrupted */
+#define M4MCS_ERR_INVALID_INPUT_FILE M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x02)
+/* The input video frame size parameter is undefined */
+#define M4MCS_ERR_INVALID_INPUT_VIDEO_FRAME_SIZE M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x03)
+/* The input video frame size is non multiple of 16 */
+#define M4MCS_ERR_INPUT_VIDEO_SIZE_NON_X16 M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x04)
+
+
+/* ----- SET OUTPUT PARAMS ERRORS ----- */
+
+/* The output video format parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x10)
+/* The output video frame size parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x11)
+/* The output video frame rate parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x12)
+/* The output audio format parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x13)
+/* The output video frame size parameter is incompatible with H263 encoding */
+#define M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x14)
+/* The output video frame rate parameter is incompatible with H263 encoding
+ (It can't happen in current version of MCS!) */
+#define M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x15)
+/* A null clip duration as been computed, which is unvalid (should never happen!) */
+#define M4MCS_ERR_DURATION_IS_NULL M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x16)
+/* The .mp4 container cannot handle h263 codec */
+#define M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x17)
+
+
+/* ----- PREPARE DECODERS ERRORS ----- */
+
+/* H263 Profile (other than 0) is not supported */
+#define M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x20)
+/* The input file contains an AAC audio track with an invalid sampling frequency
+ (should never happen) */
+#define M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x21)
+/* The audio conversion (AAC to AMR-NB, or MP3) failed */
+#define M4MCS_ERR_AUDIO_CONVERSION_FAILED M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x22)
+
+
+/* ----- SET ENCODING PARAMS ERRORS ----- */
+
+/* Begin cut time is larger than the input clip duration */
+#define M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x30)
+/* Begin cut and End cut are equals */
+#define M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x31)
+/* End cut time is smaller than begin cut time */
+#define M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x32)
+/* Not enough space to store whole output file at given bitrates */
+#define M4MCS_ERR_MAXFILESIZE_TOO_SMALL M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x33)
+/* Video bitrate is too low (avoid ugly video) */
+#define M4MCS_ERR_VIDEOBITRATE_TOO_LOW M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x34)
+/* Audio bitrate is too low (16 kbps min for aac, 12.2 for amr, 8 for mp3) */
+#define M4MCS_ERR_AUDIOBITRATE_TOO_LOW M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x35)
+/* Video bitrate too high (we limit to 800 kbps) */
+#define M4MCS_ERR_VIDEOBITRATE_TOO_HIGH M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x36)
+/* Audio bitrate too high (we limit to 96 kbps) */
+#define M4MCS_ERR_AUDIOBITRATE_TOO_HIGH M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x37)
+
+/* ----- OTHERS ERRORS ----- */
+#define M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x50)
+#define M4MCS_ERR_NOMORE_SPACE M4OSA_ERR_CREATE(M4_ERR, M4MCS, 0x51)
+#define M4MCS_ERR_FILE_DRM_PROTECTED M4OSA_ERR_CREATE(M4_ERR, M4MCS, 0x52)
+#endif /* __M4MCS_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
new file mode 100755
index 0000000..efaf1e6
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file M4MCS_API.h
+ * @brief MCS internal constant values settings
+ * @note This header file is not public
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_INTERNALCONFIG_H__
+#define __M4MCS_INTERNALCONFIG_H__
+
+
+/**
+ * Definition of max AU size */
+#define M4MCS_AUDIO_MAX_CHUNK_SIZE 7168 /**< add mp3 encoder and writer,
+ max bitrate is now 320kbps instead of 128kbps
+ so this value has to be increased accordingly
+ = ((sizeof(M4OSA_UInt8)*max_channel_number)+3
+ to take a margin(after tests, 2 was not enough
+ ))*MAX_PCM_GRANULARITY_SAMPLES*/
+ /**< Before: 4000*//**< Magical */
+
+/**
+ * Video max AU and fragment size */
+#define M4MCS_VIDEO_MIN_COMPRESSION_RATIO 0.8 /**< Magical. Used to define the max AU size */
+#define M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO 1.2 /**< Magical. Used to define the max chunk size */
+
+/**
+ * Various Magicals */
+#define M4MCS_WRITER_AUDIO_STREAM_ID 1
+#define M4MCS_WRITER_VIDEO_STREAM_ID 2
+
+/**
+ * Granularity for audio encoder */
+ /**< minimum number of samples to pass in AMR encoding case */
+#define M4MCS_PCM_AMR_GRANULARITY_SAMPLES 160
+/**< minimum number of samples to pass in AAC encoding case */
+#define M4MCS_PCM_AAC_GRANULARITY_SAMPLES 1024
+/**< minimum number of samples to pass in MP3 encoding case */
+#define M4MCS_PCM_MP3_GRANULARITY_SAMPLES 576
+
+#define M4MCS_AUDIO_MAX_AU_SIZE 1024 /**< add mp3 encoder and writer
+ This value is not used anymore, now the max AU
+ size is computed dynamically according to the
+ number of channels,the max PCM granularity sample
+ and a margin.*/
+ /**< Before: 1024*//**< Magical */
+/**
+ * Writer file and moov size estimation */
+#define M4MCS_MOOV_OVER_FILESIZE_RATIO 1.04 /**< magical moov size is less than 4%
+ of file size in average */
+
+/**
+ * If 3gp file does not contain an STSS table (no rap frames),
+ jump backward to a specified limit */
+#define M4MCS_NO_STSS_JUMP_POINT 40000 /**< 40 s */
+
+#endif /* __M4MCS_INTERNALCONFIG_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
new file mode 100755
index 0000000..21c679e
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4MCS_InternalFunctions.h
+ * @brief This file contains all functions declarations internal
+ * to the MCS.
+ *************************************************************************
+ */
+
+#ifndef __M4MCS_INTERNALFUNCTIONS_H__
+#define __M4MCS_INTERNALFUNCTIONS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "M4VPP_API.h"
+#include "M4ENCODER_common.h"
+
+/**
+ **************************************************************************
+ * M4OSA_ERR M4MCS_intApplyVPP( M4VPP_Context pContext,
+ * M4VIFI_ImagePlane* pPlaneIn,
+ * M4VIFI_ImagePlane* pPlaneOut)
+ * @brief Do the video rendering and the resize (if needed)
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the MCS
+ * internal context in our case
+ * @param pPlaneIn (IN) Contains the image
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will
+ * contain the output YUV420 image
+ * @return M4NO_ERROR: No error
+ * @return ERR_MCS_VIDEO_DECODE_ERROR: the video decoding failed
+ * @return ERR_MCS_RESIZE_ERROR: the resizing failed
+ * @return Any error returned by an underlaying module
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+/**
+ **************************************************************************
+ * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
+ * @brief This function registers the reader, decoders, writers and encoders
+ * in the MCS.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext is NULL
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext);
+
+/**
+ **************************************************************************
+ * @brief Clear encoders, decoders, reader and writers interfaces tables
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: The context is null
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_clearInterfaceTables(M4MCS_Context pContext);
+
+/**
+ **************************************************************************
+ * M4OSA_ERR M4MCS_registerWriter(M4MCS_Context pContext,
+ * M4VIDEOEDITING_FileType MediaType,
+ * M4WRITER_GlobalInterface *pWtrGlobalInterface,
+ * M4WRITER_DataInterface *pWtrDataInterface)
+ * @brief This function will register a specific file format writer.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the writer context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext,pWtrGlobalInterface or pWtrDataInterface
+ * is M4OSA_NULL (debug only), or invalid MediaType
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_registerWriter(
+ M4MCS_Context pContext,
+ M4WRITER_OutputFileType MediaType,
+ M4WRITER_GlobalInterface* pWtrGlobalInterface,
+ M4WRITER_DataInterface* pWtrDataInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerEncoder( M4MCS_Context pContext,
+ * M4VIDEOEDITING_VideoFormat mediaType,
+ * M4ENCODER_GlobalInterface *pEncGlobalInterface)
+ * @brief This function will register a specific video encoder.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext or pEncGlobalInterface is
+ * M4OSA_NULL (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoEncoder(
+ M4MCS_Context pContext,
+ M4ENCODER_Format MediaType,
+ M4ENCODER_GlobalInterface *pEncGlobalInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioEncoder( M4MCS_Context pContext,
+ * M4ENCODER_AudioFormat mediaType,
+ * M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+ * @brief This function will register a specific audio encoder.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @param mediaType: (IN) The media type.
+ * @param pEncGlobalInterface: (OUT) The encoder interface functions.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pEncGlobalInterface is
+ * M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioEncoder(
+ M4MCS_Context pContext,
+ M4ENCODER_AudioFormat MediaType,
+ M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
+
+/**
+ **************************************************************************
+ * @brief Register reader.
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_registerReader( M4MCS_Context pContext,
+ M4READER_MediaType mediaType,
+ M4READER_GlobalInterface *pRdrGlobalInterface,
+ M4READER_DataInterface *pRdrDataInterface);
+
+/**
+ **************************************************************************
+ * @brief Register video decoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @param pDecoderInterface (IN) Decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only),or the
+ * decoder type is invalid
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoDecoder( M4MCS_Context pContext,
+ M4DECODER_VideoType decoderType,
+ M4DECODER_VideoInterface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * @brief Register audio decoder
+ * @note This function is used internaly by the MCS to register Core audio decoders,
+ * @param context (IN/OUT) MCS context.
+ * @param decoderType (IN) Audio decoder type
+ * @param pDecoderInterface (IN) Audio decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioDecoder(M4MCS_Context pContext, M4AD_Type decoderType,
+ M4AD_Interface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * @brief Unregister writer
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllWriters(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief Unregister the encoders
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllEncoders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief Unregister reader
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllReaders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief Unregister the decoders
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllDecoders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief Set current writer
+ * @param pContext (IN/OUT) MCS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentWriter( M4MCS_Context pContext,
+ M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * @brief Set a video encoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentVideoEncoder( M4MCS_Context pContext,
+ M4VIDEOEDITING_VideoFormat mediaType);
+
+/**
+ ************************************************************************
+ * @brief Set an audio encoder
+ * @param context (IN/OUT) MCS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentAudioEncoder( M4MCS_Context pContext,
+ M4VIDEOEDITING_AudioFormat mediaType);
+
+/**
+ ************************************************************************
+ * @brief Set current reader
+ * @param pContext (IN/OUT) MCS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentReader( M4MCS_Context pContext,
+ M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * @brief Set a video decoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentVideoDecoder( M4MCS_Context pContext,
+ M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * @brief Set an audio decoder
+ * @param context (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentAudioDecoder(M4MCS_Context pContext, M4_StreamType mediaType);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
+ * @brief Check if an effect has to be applied currently
+ * @note It is called by the stepEncoding function
+ * @param pContext (IN) MCS internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief Apply audio effect FadeIn to pPCMdata
+ * @param pC (IN/OUT) Internal edit context
+ * @param pPCMdata (IN/OUT) Input and Output PCM audio data
+ * @param uiPCMsize (IN) Size of pPCMdata
+ * @param pProgress (IN) Effect progress
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn( M4OSA_Void *pFunctionContext,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize,
+ M4MCS_ExternalProgress *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief Apply audio effect FadeIn to pPCMdata
+ * @param pC (IN/OUT) Internal edit context
+ * @param pPCMdata (IN/OUT) Input and Output PCM audio data
+ * @param uiPCMsize (IN) Size of pPCMdata
+ * @param pProgress (IN) Effect progress
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize,
+ M4MCS_ExternalProgress *pProgress);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4MCS_INTERNALFUNCTIONS_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
new file mode 100755
index 0000000..5e4f236
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4MCS_API.h
+ * @brief MCS internal types and structures definitions
+ * @note This header file is not public
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_INTERNALTYPES_H__
+#define __M4MCS_INTERNALTYPES_H__
+
+/**
+ * MCS public API and types */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+/** Determine absolute value of a. */
+#define M4MCS_ABS(a) ( ( (a) < (0) ) ? (-(a)) : (a) )
+
+
+#define Y_PLANE_BORDER_VALUE 0x00
+#define U_PLANE_BORDER_VALUE 0x80
+#define V_PLANE_BORDER_VALUE 0x80
+
+
+/**
+ * Internally used modules */
+#include "M4READER_3gpCom.h" /**< Read 3GPP file */
+#include "M4DECODER_Common.h" /**< Decode video */
+#include "M4VIFI_FiltersAPI.h" /**< Video resize */
+#include "M4AD_Common.h" /**< Decoder audio */
+#include "SSRC.h" /**< SSRC */
+#include "From2iToMono_16.h" /**< Stereo to Mono */
+#include "MonoTo2I_16.h" /**< Mono to Stereo */
+#include "M4ENCODER_AudioCommon.h" /**< Encode audio */
+#include "M4WRITER_common.h" /**< Writer common interface */
+#include "M4ENCODER_common.h"
+
+/**
+ * Instead of including AAC core properties, it is better to redefine the needed type
+ * AAC_DEC_STREAM_PROPS
+ * In case of external AAC decoder, it will be necessary to put this type as public
+ */
+
+/**
+ ******************************************************************************
+ * struct AAC_DEC_STREAM_PROPS
+ * @brief AAC Stream properties
+ * @Note aNoChan and aSampFreq are used for parsing even the user parameters
+ * are different. User parameters will be input for the output behaviour
+ * of the decoder whereas for parsing bitstream properties are used.
+ ******************************************************************************
+ */
+typedef struct {
+ M4OSA_Int32 aAudioObjectType; /**< Audio object type of the stream - in fact
+ the type found in the Access Unit parsed */
+ M4OSA_Int32 aNumChan; /**< number of channels (=1(mono) or =2(stereo))
+ as indicated by input bitstream*/
+ M4OSA_Int32 aSampFreq; /**< sampling frequency in Hz */
+ M4OSA_Int32 aExtensionSampFreq; /**< extended sampling frequency in Hz, = 0 is
+ no extended frequency */
+ M4OSA_Int32 aSBRPresent; /**< presence=1/absence=0 of SBR */
+ M4OSA_Int32 aPSPresent; /**< presence=1/absence=0 of PS */
+ M4OSA_Int32 aMaxPCMSamplesPerCh; /**< max number of PCM samples per channel */
+} AAC_DEC_STREAM_PROPS;
+
+/**
+ ******************************************************************************
+ * @brief Codecs registration same as in VPS and VES, so less mapping
+ * is required toward MCS api types
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4WRITER_GlobalInterface* pGlobalFcts; /**< open, close, setoption,etc... functions */
+ M4WRITER_DataInterface* pDataFcts; /**< data manipulation functions */
+} M4MCS_WriterInterface;
+
+/**
+ ******************************************************************************
+ * enum M4MCS_States
+ * @brief Main state machine of the MCS.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kState_CREATED, /**< M4MCS_init has been called */
+ M4MCS_kState_OPENED, /**< M4MCS_open has been called */
+ M4MCS_kState_SET, /**< All mandatory parameters have been set */
+ M4MCS_kState_READY, /**< All optionnal parameters have been set */
+ M4MCS_kState_BEGINVIDEOJUMP, /**< Must jump to the Iframe before the begin cut */
+ M4MCS_kState_BEGINVIDEODECODE, /**< Must decode up to the begin cut */
+ M4MCS_kState_PROCESSING, /**< Step can be called */
+ M4MCS_kState_PAUSED, /**< Paused, Resume can be called */
+ M4MCS_kState_FINISHED, /**< Transcoding is finished */
+ M4MCS_kState_CLOSED /**< Output file has been created */
+} M4MCS_States;
+
+/**
+ ******************************************************************************
+ * enum M4MCS_StreamState
+ * @brief State of a media stream encoding (audio or video).
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4MCS_kStreamState_NOSTREAM = 0, /**< No stream present */
+ M4MCS_kStreamState_STARTED = 1, /**< The stream encoding is in progress */
+ M4MCS_kStreamState_FINISHED = 2 /**< The stream has finished encoding */
+} M4MCS_StreamState;
+
+
+/**
+ ******************************************************************************
+ * enum anonymous enum
+ * @brief enum to keep track of the encoder state
+ ******************************************************************************
+ */
+enum
+{
+ M4MCS_kNoEncoder,
+ M4MCS_kEncoderClosed,
+ M4MCS_kEncoderStopped,
+ M4MCS_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure M4MCS_InternalContext
+ * @brief This structure defines the MCS context (private)
+ * @note This structure is used for all MCS calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4OSA_UInt32 bitPos;
+ /* bit count of number of bits used so far */
+
+ M4OSA_UInt8 *streamBuffer;
+ /* Bitstream Buffer */
+
+ M4OSA_UInt32 byteCnt;
+ /* Number of Bytes written in Bitstream buffer*/
+
+ M4OSA_UInt32 currBuff;
+ /* Current buffer holds, 4bytes of bitstream*/
+
+ M4OSA_UInt8 prevByte;
+ /* Previous byte written in the buffer */
+
+ M4OSA_UInt8 prevPrevByte;
+ /* Previous to previous byte written in the buffer */
+
+}NSWAVC_bitStream_t_MCS;
+
+#define _MAXnum_slice_groups 8
+#define _MAXnum_ref_frames_in_pic_order_cnt_cycle 256
+
+typedef struct
+{
+ M4OSA_UInt32 level_idc_index;
+ M4OSA_UInt32 MaxFrameNum;
+ M4OSA_UInt32 expectedDeltaPerPicOrderCntCycle;
+ M4OSA_Int32 MaxPicOrderCntLsb;
+ M4OSA_Int32 max_dec_frame_buffering;
+
+ /* (pic_order_cnt_type == 1) */
+ M4OSA_Int32 offset_for_non_ref_pic;
+ M4OSA_Int32 offset_for_top_to_bottom_field;
+ M4OSA_Int32 frame_crop_left_offset;
+ M4OSA_Int32 frame_crop_right_offset;
+ M4OSA_Int32 frame_crop_top_offset;
+ M4OSA_Int32 frame_crop_bottom_offset;
+ M4OSA_Int32 offset_for_ref_frame[_MAXnum_ref_frames_in_pic_order_cnt_cycle];
+
+ M4OSA_UInt16 PicWidthInMbs;
+ M4OSA_UInt16 FrameHeightInMbs;
+ M4OSA_UInt16 pic_width_in_mbs_minus1;
+ M4OSA_UInt16 pic_height_in_map_units_minus1;
+
+#ifdef _CAP_FMO_
+ M4OSA_UInt16 NumSliceGroupMapUnits;
+ M4OSA_UInt16 MaxPicSizeInMbs;
+#endif /*_CAP_FMO_*/
+
+ M4OSA_UInt8 profile_idc;
+ M4OSA_UInt8 reserved_zero_4bits;
+ M4OSA_UInt8 level_idc;
+ M4OSA_UInt8 seq_parameter_set_id;
+ M4OSA_UInt8 log2_max_frame_num_minus4;
+ M4OSA_UInt8 pic_order_cnt_type;
+ /* if(pic_order_cnt_type == 0) */
+ M4OSA_UInt8 log2_max_pic_order_cnt_lsb_minus4;
+
+ M4OSA_UInt8 num_ref_frames_in_pic_order_cnt_cycle;
+ /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */
+ M4OSA_UInt8 num_ref_frames;
+
+ M4OSA_Bool constraint_set0_flag;
+ M4OSA_Bool constraint_set1_flag;
+ M4OSA_Bool constraint_set2_flag;
+ M4OSA_Bool constraint_set3_flag;
+ M4OSA_Bool delta_pic_order_always_zero_flag;
+ M4OSA_Bool gaps_in_frame_num_value_allowed_flag;
+ M4OSA_Bool frame_mbs_only_flag;
+ M4OSA_Bool mb_adaptive_frame_field_flag;
+ M4OSA_Bool direct_8x8_inference_flag;
+ M4OSA_Bool frame_cropping_flag;
+ M4OSA_Bool vui_parameters_present_flag;
+ M4OSA_Bool Active;
+
+ /* vui_seq_parameters_t vui_seq_parameters; */
+} ComSequenceParameterSet_t_MCS;
+
+typedef struct
+{
+ M4OSA_Int16 pic_init_qp_minus26;
+ M4OSA_Int16 pic_init_qs_minus26;
+ M4OSA_Int16 chroma_qp_index_offset;
+
+//#ifdef _CAP_FMO_
+ /* if( slice_group_map_type = = 0 ) */
+ M4OSA_UInt16 run_length_minus1[_MAXnum_slice_groups];
+ /* else if( slice_group_map_type = = 2 ) */
+ M4OSA_UInt16 top_left[_MAXnum_slice_groups];
+ M4OSA_UInt16 bottom_right[_MAXnum_slice_groups];
+ /* else if( slice_group_map_type = = 6 ) */
+ M4OSA_UInt16 pic_size_in_map_units_minus1;
+ M4OSA_UInt16 slice_group_change_rate_minus1;
+
+ M4OSA_UInt16 FirstMbInSliceGroup[_MAXnum_slice_groups];
+ M4OSA_UInt16 LastMbInSliceGroup[_MAXnum_slice_groups];
+
+
+ M4OSA_UInt8 *slice_group_id;
+ M4OSA_UInt8 *MapUnitToSliceGroupMap;
+ M4OSA_UInt8 *MbToSliceGroupMap;
+ M4OSA_UInt16 NumSliceGroupMapUnits;
+
+ M4OSA_UInt8 slice_group_map_type;
+ /* else if( slice_group_map_type = = 3 || 4 || 5 */
+ M4OSA_Bool slice_group_change_direction_flag;
+ M4OSA_Bool map_initialized;
+// #endif /*_CAP_FMO_*/
+
+ M4OSA_UInt8 pic_parameter_set_id;
+ M4OSA_UInt8 seq_parameter_set_id;
+ M4OSA_UInt8 num_ref_idx_l0_active_minus1;
+ M4OSA_UInt8 num_ref_idx_l1_active_minus1;
+ M4OSA_UInt8 weighted_bipred_idc;
+ M4OSA_UInt8 num_slice_groups_minus1;
+
+ M4OSA_Bool entropy_coding_mode_flag;
+ /* if( pic_order_cnt_type < 2 ) in the sequence parameter set */
+ M4OSA_Bool pic_order_present_flag;
+ M4OSA_Bool weighted_pred_flag;
+ M4OSA_Bool deblocking_filter_control_present_flag;
+ M4OSA_Bool constrained_intra_pred_flag;
+ M4OSA_Bool redundant_pic_cnt_present_flag;
+ M4OSA_Bool Active;
+
+ ComSequenceParameterSet_t_MCS *p_active_sps;
+} ComPictureParameterSet_t_MCS;
+
+typedef struct
+{
+ M4OSA_UInt32 bitPos; /*!< bit position in buffer */
+ M4OSA_UInt32 totalBits; /*!< bit position in file (total bits read so far) */
+
+ M4OSA_UInt32 lastTotalBits; /*!< bit position in file of the last VOP */
+ M4OSA_UInt32 numBitsInBuffer; /*!< number of bits in buffer */
+ M4OSA_UInt32 readableBytesInBuffer; /*!< number of bytes that can be read in decoder buffer*/
+ M4OSA_UInt32 maxBufferSize; /*!< max buffer size in bit units */
+ M4OSA_UInt8 *Buffer; /*!< char buffer at reading from file */
+ M4OSA_Int32 i8BitCnt;
+ M4OSA_UInt32 ui32TempBuff;
+ M4OSA_Int8*pui8BfrPtr;
+ M4OSA_UInt32 ui32LastTwoBytes; /*!< stores the last read two bytes */
+} ComBitStreamMCS_t;
+
+
+typedef struct
+{
+
+ M4OSA_Int32 prev_frame_num;
+ M4OSA_Int32 cur_frame_num;
+ M4OSA_Int32 prev_new_frame_num;
+ M4OSA_Int32 log2_max_frame_num_minus4;
+ M4OSA_Int32 is_done;
+ M4OSA_Int32 is_first;
+ M4OSA_Int32 frame_count;
+ M4OSA_Int32 frame_mod_count;
+ M4OSA_Int32 POC_lsb;
+ M4OSA_Int32 POC_lsb_mod;
+
+
+ M4OSA_UInt32 m_Num_Bytes_NALUnitLength;
+
+ M4OSA_UInt8* m_pDecoderSpecificInfo; /**< Pointer on specific information required
+ to create a decoder */
+ M4OSA_UInt32 m_decoderSpecificInfoSize;/**< Size of the specific information pointer above*/
+
+ M4OSA_UInt8* m_pEncoderSPS;
+ M4OSA_UInt32 m_encoderSPSSize;
+
+ M4OSA_UInt8* m_pEncoderPPS;
+ M4OSA_UInt32 m_encoderPPSSize;
+
+ M4OSA_UInt8* m_pFinalDSI;
+ M4OSA_UInt32 m_pFinalDSISize;
+
+ M4OSA_UInt32 m_encoder_SPS_Cnt;
+ ComSequenceParameterSet_t_MCS *p_clip_sps;
+ M4OSA_UInt32 m_encoder_PPS_Cnt;
+ ComPictureParameterSet_t_MCS *p_clip_pps;
+
+ ComSequenceParameterSet_t_MCS *p_encoder_sps;
+ ComPictureParameterSet_t_MCS *p_encoder_pps;
+
+
+ ComSequenceParameterSet_t_MCS encoder_sps;
+ ComPictureParameterSet_t_MCS encoder_pps;
+ ComSequenceParameterSet_t_MCS clip_sps;
+
+ /* Encoder SPS parameters */
+ M4OSA_UInt32 enc_seq_parameter_set_id;
+ M4OSA_UInt32 enc_log2_max_frame_num_minus4;
+ M4OSA_UInt32 enc_pic_order_cnt_type;
+ M4OSA_UInt32 enc_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
+ M4OSA_UInt32 enc_delta_pic_order_always_zero_flag;
+ M4OSA_Int32 enc_offset_for_non_ref_pic;
+ M4OSA_Int32 enc_offset_for_top_to_bottom_field;
+ M4OSA_UInt32 enc_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
+ /* array of size num_ref_frames_in_pic_order_cnt_cycle */
+ M4OSA_Int32 enc_offset_for_ref_frame[256];
+ M4OSA_UInt32 enc_num_ref_frames;
+ M4OSA_UInt32 enc_gaps_in_frame_num_value_allowed_flag;
+
+
+ /* Input clip SPS parameters */
+ M4OSA_UInt32 clip_seq_parameter_set_id;
+ M4OSA_UInt32 clip_log2_max_frame_num_minus4;
+ M4OSA_UInt32 clip_pic_order_cnt_type;
+ M4OSA_UInt32 clip_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
+ M4OSA_UInt32 clip_delta_pic_order_always_zero_flag;
+ M4OSA_Int32 clip_offset_for_non_ref_pic;
+ M4OSA_Int32 clip_offset_for_top_to_bottom_field;
+ M4OSA_UInt32 clip_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
+ /* array of size num_ref_frames_in_pic_order_cnt_cycle */
+ M4OSA_Int32 clip_offset_for_ref_frame[256];
+ M4OSA_UInt32 clip_num_ref_frames;
+ M4OSA_UInt32 clip_gaps_in_frame_num_value_allowed_flag;
+
+ M4OSA_UInt32 final_PPS_ID;
+ M4OSA_UInt32 final_SPS_ID;
+ NSWAVC_bitStream_t_MCS encbs;
+
+} NSWAVC_MCS_t;
+
+
+
+/**
+ ******************************************************************************
+ * structure M4MCS_InternalContext
+ * @brief This structure defines the MCS context (private)
+ * @note This structure is used for all MCS calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+ /**
+ * MCS State and settings stuff */
+ M4MCS_States State; /**< MCS internal state */
+ M4MCS_StreamState VideoState;/**< State of the video encoding */
+ M4MCS_StreamState AudioState;/**< State of the audio encoding */
+ M4OSA_Bool noaudio;/**< Flag to know if we have to deal with audio transcoding */
+ M4OSA_Bool novideo;/**< Flag to know if we have to deal with video transcoding */
+
+ M4VIDEOEDITING_ClipProperties InputFileProperties;/**< Input audio/video stream properties */
+ M4OSA_Void* pInputFile; /**< Remember input file pointer between fast
+ open and normal open */
+ M4VIDEOEDITING_FileType InputFileType; /**< Remember input file type between fast
+ open and normal open */
+ M4OSA_Bool bFileOpenedInFastMode; /**< Flag to know if a particular reader
+ supports fast open */
+ M4OSA_UInt32 uiMaxMetadataSize; /**< Limitation on the max acceptable moov
+ size of a 3gpp file */
+
+ M4ENCODER_Format EncodingVideoFormat; /**< Output video format, set by the user */
+ M4ENCODER_FrameWidth EncodingWidth; /**< Output video width, set by the user */
+ M4ENCODER_FrameHeight EncodingHeight; /**< Output video height, set by the user */
+ M4ENCODER_FrameRate EncodingVideoFramerate; /**< Output video framerate, set by the user*/
+
+ M4OSA_UInt32 uiBeginCutTime; /**< Begin cut time, in milliseconds */
+ M4OSA_UInt32 uiEndCutTime; /**< Begin cut time, in milliseconds */
+ M4OSA_UInt32 uiMaxFileSize; /**< Maximum output file size, in bytes */
+ M4OSA_UInt32 uiAudioBitrate; /**< Targeted audio bitrate in bps */
+ M4OSA_UInt32 uiVideoBitrate; /**< Targeted video bitrate in bps */
+
+ M4OSA_UInt8 uiProgress; /**< Progress information saved at each step to be able to
+ return it in case of pause */
+
+ /**
+ * Reader stuff */
+ M4OSA_Context pReaderContext; /**< Context of the reader module */
+ M4_VideoStreamHandler* pReaderVideoStream; /**< Description of the read video stream */
+ M4_AudioStreamHandler* pReaderAudioStream; /**< Description of the read audio stream */
+ M4OSA_Bool bUnsupportedVideoFound; /**< True if an unsupported video stream
+ type has been found */
+ M4OSA_Bool bUnsupportedAudioFound; /**< True if an unsupported audio stream
+ type has been found */
+ M4_AccessUnit ReaderVideoAU; /**< Read video access unit */
+ M4_AccessUnit ReaderVideoAU1; /**< Read video access unit */
+ M4_AccessUnit ReaderVideoAU2; /**< Read video access unit */
+ M4_AccessUnit ReaderAudioAU; /**< Read audio access unit */
+ M4_AccessUnit ReaderAudioAU1; /**< Read audio access unit */
+ M4_AccessUnit ReaderAudioAU2; /**< Read audio access unit */
+ M4OSA_MemAddr8 m_pDataAddress1; /**< Temporary buffer for Access Unit */
+ M4OSA_MemAddr8 m_pDataAddress2; /**< Temporary buffer for Access Unit */
+ M4OSA_MemAddr8 m_pDataVideoAddress1; /**< Temporary buffer for Access Unit */
+ M4OSA_MemAddr8 m_pDataVideoAddress2; /**< Temporary buffer for Access Unit */
+ M4OSA_UInt32 m_audioAUDuration; /**< Audio AU duration */
+ M4OSA_Int32 iAudioCtsOffset; /**< Audio AU CTS offset due to begin cut */
+
+ /**
+ * Video decoder stuff */
+ M4OSA_Context pViDecCtxt; /**< Video decoder context */
+ M4OSA_Double dViDecStartingCts; /**< Video CTS at which the decode/encode will start
+ (used for begin cut and pause/resume) */
+ M4OSA_Double dViDecCurrentCts; /**< Video CTS to decode */
+ M4OSA_Int32 iVideoBeginDecIncr; /**< CTS step for the begin cut decode (doesn't
+ need floating point precision) */
+ M4OSA_Double dCtsIncrement; /**< Cts increment from one video frame to another*/
+ M4OSA_Bool isRenderDup; /**< To handle duplicate frame rendering in case of
+ external decoding */
+ M4VIFI_ImagePlane* lastDecodedPlane; /**< Last decoded plane */
+
+ /**
+ * Video encoder stuff */
+ M4OSA_Context pViEncCtxt; /**< Video encoder context */
+ M4VIFI_ImagePlane* pPreResizeFrame; /**< The decoded image before resize
+ (allocated if resize needed only)*/
+ M4OSA_UInt32 uiEncVideoBitrate; /**< Actual video bitrate for the video encoder */
+ M4OSA_UInt32 outputVideoTimescale;
+ M4OSA_UInt32 encoderState;
+
+ /**
+ * Audio decoder stuff */
+ M4OSA_Context pAudioDecCtxt; /**< Audio (AAC) decoder context */
+ M4AD_Buffer AudioDecBufferIn; /**< Input structure for the audio decoder */
+ M4AD_Buffer AudioDecBufferOut; /**< Output structure for the audio decoder */
+ M4OSA_MemAddr8 pPosInDecBufferOut; /**< Position into the decoder buffer */
+ AAC_DEC_STREAM_PROPS AacProperties; /**< Structure for new api to get AAC properties */
+
+ /**
+ * Sample Rate Convertor (SSRC) stuff */
+ SSRC_Instance_t SsrcInstance; /**< Context of the Ssrc */
+ SSRC_Scratch_t* SsrcScratch; /**< Working memory of the Ssrc */
+ short iSsrcNbSamplIn; /**< Number of sample the Ssrc needs as input */
+ short iSsrcNbSamplOut; /**< Number of sample the Ssrc outputs */
+ M4OSA_MemAddr8 pSsrcBufferIn; /**< Input of the SSRC */
+ M4OSA_MemAddr8 pSsrcBufferOut; /**< Output of the SSRC */
+ M4OSA_MemAddr8 pPosInSsrcBufferIn; /**< Position into the SSRC in buffer */
+ M4OSA_MemAddr8 pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
+
+ M4OSA_Context pLVAudioResampler;
+
+
+ /**
+ * audio encoder stuff */
+ M4OSA_Context pAudioEncCtxt; /**< Context of the audio encoder */
+ M4ENCODER_AudioDecSpecificInfo pAudioEncDSI; /**< Decoder specific info built by the encoder*/
+ M4ENCODER_AudioParams AudioEncParams;/**< Config of the audio encoder */
+ M4OSA_MemAddr8 pAudioEncoderBuffer; /**< Input of the encoder */
+ M4OSA_MemAddr8 pPosInAudioEncoderBuffer; /**< Position into the encoder buffer */
+ M4OSA_UInt32 audioEncoderGranularity; /**< Minimum number of pcm samples needed
+ to feed audio encoder */
+
+ /**
+ * Writer stuff */
+ M4OSA_Context pWriterContext; /**< Context of the writer module */
+ M4OSA_Void* pOutputFile; /**< Output file to be created */
+ M4OSA_Void* pTemporaryFile; /**< Temporary file to be created to store
+ metadata ("moov.bin") */
+ M4SYS_StreamDescription WriterVideoStream; /**< Description of the written video stream */
+ M4SYS_StreamDescription WriterAudioStream; /**< Description of the written audio stream */
+ M4WRITER_StreamVideoInfos WriterVideoStreamInfo;/**< Video properties of the written video
+ stream */
+ M4SYS_AccessUnit WriterVideoAU; /**< Written video access unit */
+ M4SYS_AccessUnit WriterAudioAU; /**< Written audio access unit */
+ M4OSA_UInt32 uiVideoAUCount; /**< Number of video AU written in output
+ file */
+ M4OSA_UInt32 uiVideoMaxAuSize; /**< Max access unit size for the output
+ video stream */
+ M4OSA_UInt32 uiVideoMaxChunckSize; /**< Max chunck size for the output video
+ stream */
+ M4OSA_UInt32 uiAudioAUCount; /**< Number of audio AU written in output file */
+ M4OSA_UInt32 uiAudioMaxAuSize; /**< Max access unit size for the output
+ audio stream */
+ M4OSA_UInt32 uiAudioCts; /**< Audio AU cts (when audio is transcoded) */
+ M4OSA_Bool b_isRawWriter; /**< Boolean to know if the raw writer is
+ registered or not */
+ M4OSA_Context pOutputPCMfile; /**< Output PCM file if not NULL */
+
+ /**
+ * Filesystem functions */
+ M4OSA_FileReadPointer* pOsaFileReadPtr; /**< OSAL file read functions,
+ to be provided by user */
+ M4OSA_FileWriterPointer* pOsaFileWritPtr; /**< OSAL file write functions,
+ to be provided by user */
+
+ /**
+ * Media and Codec registration */
+ /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
+ M4MCS_WriterInterface WriterInterface[M4WRITER_kType_NB];
+ /**< open, close, setoption,etc... functions of the used writer*/
+ M4WRITER_GlobalInterface* pWriterGlobalFcts;
+ /**< data manipulation functions of the used writer */
+ M4WRITER_DataInterface* pWriterDataFcts;
+ /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
+ M4ENCODER_GlobalInterface* pVideoEncoderInterface[M4ENCODER_kVideo_NB];
+ /**< Functions of the used encoder */
+ M4ENCODER_GlobalInterface* pVideoEncoderGlobalFcts;
+
+ M4OSA_Void* pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
+ M4OSA_Void* pCurrentVideoEncoderExternalAPI;
+ M4OSA_Void* pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
+ M4OSA_Void* pCurrentVideoEncoderUserData;
+
+ /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
+ M4ENCODER_AudioGlobalInterface* pAudioEncoderInterface[M4ENCODER_kAudio_NB];
+ /**< Table of internal/external flags for avalaible encoders list */
+ M4OSA_Bool pAudioEncoderFlag[M4ENCODER_kAudio_NB];
+ /**< Functions of the used encoder */
+ M4ENCODER_AudioGlobalInterface* pAudioEncoderGlobalFcts;
+ M4OSA_Void* pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
+ M4OSA_Void* pCurrentAudioEncoderUserData;
+
+ M4READER_GlobalInterface* m_pReaderGlobalItTable[M4READER_kMediaType_NB];
+ M4READER_DataInterface* m_pReaderDataItTable[M4READER_kMediaType_NB];
+ M4READER_GlobalInterface* m_pReader;
+ M4READER_DataInterface* m_pReaderDataIt;
+ M4OSA_UInt8 m_uiNbRegisteredReaders;
+
+ M4DECODER_VideoInterface* m_pVideoDecoder;
+ M4DECODER_VideoInterface* m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ M4OSA_Void* m_pCurrentVideoDecoderUserData;
+ M4OSA_Void* m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+ M4OSA_UInt8 m_uiNbRegisteredVideoDec;
+
+ M4AD_Interface* m_pAudioDecoder;
+ M4AD_Interface* m_pAudioDecoderItTable[M4AD_kType_NB];
+ M4OSA_Bool m_pAudioDecoderFlagTable[M4AD_kType_NB]; /**< store indices of external
+ decoders */
+ M4OSA_Void* m_pAudioDecoderUserDataTable[M4AD_kType_NB];
+ M4OSA_Void* m_pCurrentAudioDecoderUserData;
+
+ M4MCS_MediaRendering MediaRendering; /**< FB: to crop, resize, or render black borders*/
+ M4OSA_Context m_air_context;
+ M4OSA_Bool bExtOMXAudDecoder; /* External OMX Audio decoder */
+
+ /**< FlB 2009.03.04: Audio effects*/
+ M4MCS_EffectSettings *pEffects; /**< List of effects */
+ M4OSA_UInt8 nbEffects; /**< Number of effects in the above list */
+ M4OSA_Int8 pActiveEffectNumber; /**< Effect ID to be applied, if -1,
+ no effect has to be applied currently*/
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+ M4OSA_Bool m_bIsStillPicture; /**< =TRUE if input file is a still picture
+ (JPEG, PNG, BMP, GIF)*/
+ M4MCS_Context m_pStillPictureContext; /**< Context of the still picture part of MCS*/
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+ NSWAVC_MCS_t *m_pInstance;
+ M4OSA_UInt8 *H264MCSTempBuffer;
+ M4OSA_UInt32 H264MCSTempBufferSize;
+ M4OSA_UInt32 H264MCSTempBufferDataSize;
+ M4OSA_Bool bH264Trim;
+ /* Flag when to get lastdecodedframeCTS */
+ M4OSA_Bool bLastDecodedFrameCTS;
+ M4OSA_Int32 encodingVideoProfile;
+ M4OSA_Int32 encodingVideoLevel;
+
+} M4MCS_InternalContext;
+
+
+#endif /* __M4MCS_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/mcs/src/Android.mk b/libvideoeditor/vss/mcs/src/Android.mk
new file mode 100755
index 0000000..37986b9
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/Android.mk
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideoeditor_mcs
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_mcs
+
+LOCAL_SRC_FILES:= \
+ M4MCS_API.c \
+ M4MCS_AudioEffects.c \
+ M4MCS_Codecs.c \
+ M4MCS_MediaAndCodecSubscription.c \
+ M4MCS_VideoPreProcessing.c
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
+ $(TOP)/frameworks/native/include/media/openmax
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+ -DM4MCS_WITH_FAST_OPEN
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_API.c b/libvideoeditor/vss/mcs/src/M4MCS_API.c
new file mode 100755
index 0000000..c056ef0
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_API.c
@@ -0,0 +1,10949 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file M4MCS_API.c
+ * @brief MCS implementation (Video Compressor Service)
+ * @note This file implements the API and the processing of the MCS
+ *************************************************************************
+ **/
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+
+/* PCM samples */
+#include "VideoEditorResampler.h"
+/**
+ * Decoder interface */
+#include "M4DECODER_Common.h"
+
+/* Encoder interface*/
+#include "M4ENCODER_common.h"
+
+/* Enable for DEBUG logging */
+//#define MCS_DUMP_PCM_TO_FILE
+#ifdef MCS_DUMP_PCM_TO_FILE
+#include <stdio.h>
+FILE *file_au_reader = NULL;
+FILE *file_pcm_decoder = NULL;
+FILE *file_pcm_encoder = NULL;
+#endif
+
+/* Core headers */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_InternalConfig.h"
+#include "M4MCS_InternalFunctions.h"
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+#include "M4MCS_StillPicture.h"
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+/* Common headers (for aac) */
+#include "M4_Common.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#include "M4AIR_API.h"
+#include "OMX_Video.h"
+
+/* Version */
+#define M4MCS_VERSION_MAJOR 3
+#define M4MCS_VERSION_MINOR 4
+#define M4MCS_VERSION_REVISION 3
+
+/**
+ ********************************************************************
+ * Static local functions
+ ********************************************************************
+ */
+
+static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareVideoDecoder(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareVideoEncoder(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareAudioProcessing(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareAudioBeginCut(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intStepEncoding(
+ M4MCS_InternalContext *pC,
+ M4OSA_UInt8 *pTranscodedTime );
+static M4OSA_ERR M4MCS_intStepBeginVideoJump(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intStepBeginVideoDecode(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intGetInputClipProperties(
+ M4MCS_InternalContext *pContext );
+static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(
+ M4OSA_MemAddr8 pAudioFrame );
+static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(
+ M4OSA_MemAddr8 pAudioFrame );
+static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext );
+static M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(
+ M4OSA_Int32 freebitrate,
+ M4OSA_Int8 mode );
+static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(
+ M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intReallocTemporaryAU(
+ M4OSA_MemAddr8 *addr,
+ M4OSA_UInt32 newSize );
+static M4OSA_ERR M4MCS_intCheckAndGetCodecProperties(
+ M4MCS_InternalContext *pC);
+
+static M4OSA_ERR M4MCS_intLimitBitratePerCodecProfileLevel(
+ M4ENCODER_AdvancedParams* EncParams);
+static M4OSA_Int32 M4MCS_intLimitBitrateForH263Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate);
+static M4OSA_Int32 M4MCS_intLimitBitrateForMpeg4Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate);
+static M4OSA_Int32 M4MCS_intLimitBitrateForH264Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate);
+
+/**
+ **********************************************************************
+ * External function used only by VideoEditor and that does not appear
+ * in the API
+ **********************************************************************
+ */
+
+M4OSA_ERR M4MCS_open_normalMode( M4MCS_Context pContext,
+ M4OSA_Void *pFileIn,
+ M4VIDEOEDITING_FileType InputFileType,
+ M4OSA_Void *pFileOut,
+ M4OSA_Void *pTempFile );
+
+/* All errors are fatal in the MCS */
+#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
+
+/* A define used with SSRC 1.04 and above to avoid taking blocks smaller
+ * that the minimal block size
+ */
+#define M4MCS_SSRC_MINBLOCKSIZE 100
+
+static M4OSA_UChar Tab_MCS[8] =
+{
+ 17, 5, 3, 3, 1, 1, 1, 1
+};
+
+M4OSA_ERR H264MCS_Getinstance( NSWAVC_MCS_t ** instance )
+{
+ NSWAVC_MCS_t *p_bs = M4OSA_NULL;
+ M4OSA_ERR err = M4NO_ERROR;
+ p_bs = (NSWAVC_MCS_t *)M4OSA_32bitAlignedMalloc(sizeof(NSWAVC_MCS_t), M4MCS,
+ (M4OSA_Char *)"NSWAVC_MCS_t");
+
+ if( M4OSA_NULL == p_bs )
+ {
+ M4OSA_TRACE1_0("H264MCS_Getinstance: allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ p_bs->prev_frame_num = 0;
+ p_bs->cur_frame_num = 0;
+ p_bs->log2_max_frame_num_minus4 = 0;
+ p_bs->prev_new_frame_num = 0;
+ p_bs->is_done = 0;
+ p_bs->is_first = 1;
+
+ p_bs->m_pDecoderSpecificInfo = M4OSA_NULL;
+ p_bs->m_decoderSpecificInfoSize = 0;
+
+ p_bs->m_pEncoderSPS = M4OSA_NULL;
+ p_bs->m_encoderSPSSize = 0;
+
+ p_bs->m_pEncoderPPS = M4OSA_NULL;
+ p_bs->m_encoderPPSSize = 0;
+
+ p_bs->m_pFinalDSI = M4OSA_NULL;
+ p_bs->m_pFinalDSISize = 0;
+
+ p_bs->p_clip_sps = M4OSA_NULL;
+ p_bs->m_encoder_SPS_Cnt = 0;
+
+ p_bs->p_clip_pps = M4OSA_NULL;
+ p_bs->m_encoder_PPS_Cnt = 0;
+
+ p_bs->p_encoder_sps = M4OSA_NULL;
+ p_bs->p_encoder_pps = M4OSA_NULL;
+
+ p_bs->encoder_pps.slice_group_id = M4OSA_NULL;
+
+ *instance = (NSWAVC_MCS_t *)p_bs;
+ return err;
+}
+
+M4OSA_UInt32 H264MCS_getBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
+{
+ M4OSA_UInt32 ui32RetBits;
+ M4OSA_UInt8 *pbs;
+ M4OSA_Int32 bcnt;
+ p_bs->i8BitCnt -= numBits;
+ bcnt = p_bs->i8BitCnt;
+
+ /* Measure the quantity of bits to be read in ui32TempBuff */
+ ui32RetBits = p_bs->ui32TempBuff >> (32 - numBits);
+
+ /* Read numBits in ui32TempBuff */
+ p_bs->ui32TempBuff <<= numBits;
+ p_bs->bitPos += numBits;
+
+ if( bcnt > 24 )
+ {
+ return (ui32RetBits);
+ }
+ else
+ { /* at least one byte can be buffered in ui32TempBuff */
+ pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
+
+ if( bcnt < (int)(p_bs->numBitsInBuffer - p_bs->bitPos) )
+ { /* not enough remaining bits in ui32TempBuff: need to be filled */
+ do
+ {
+ /* On the fly detection of EPB byte */
+ if( ( *(pbs) == 0x03)
+ && (!(( pbs[-1])
+ | (pbs[-2])))) //(p_bs->ui32LastTwoBytes & 0x0000FFFF) == 0)
+ {
+ /* EPB byte found: skip it and update bitPos accordingly */
+ (pbs)++;
+ p_bs->bitPos += 8;
+ }
+
+ p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
+ bcnt += 8;
+ } while ( bcnt <= 24 );
+
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
+ p_bs->i8BitCnt = bcnt;
+ return (ui32RetBits);
+ }
+ }
+
+ if( p_bs->bitPos <= p_bs->numBitsInBuffer )
+ {
+ return (ui32RetBits);
+ }
+ else
+ {
+ return (0);
+ }
+}
+
+M4OSA_Void H264MCS_flushBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
+{
+ M4OSA_UInt8 *pbs;
+ M4OSA_UInt32 bcnt;
+ p_bs->i8BitCnt -= numBits;
+ bcnt = p_bs->i8BitCnt;
+
+ p_bs->ui32TempBuff <<= numBits;
+ p_bs->bitPos += numBits;
+
+ if( bcnt > 24 )
+ {
+ return;
+ }
+ else
+ { /* at least one byte can be buffered in ui32TempBuff */
+ pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
+
+ if( bcnt < (p_bs->numBitsInBuffer - p_bs->bitPos) )
+ { /* Not enough remaining bits in ui32TempBuff: need to be filled */
+ do
+ {
+ /* On the fly detection of EPB byte */
+ if( ( *(pbs) == 0x03) && (!(( pbs[-1]) | (pbs[-2]))) )
+ { /* JC: EPB byte found: skip it and update bitPos accordingly */
+ (pbs)++;
+ p_bs->bitPos += 8;
+ }
+ p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
+ bcnt += 8;
+ } while ( bcnt <= 24 );
+
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
+ p_bs->i8BitCnt = bcnt;
+ }
+ }
+
+ return;
+}
+
+M4OSA_UInt32 H264MCS_DecVLCReadExpGolombCode( ComBitStreamMCS_t *p_bs )
+{
+ M4OSA_UInt32 code, l0 = 0, l1;
+ /* Reading 32 Bits from local cache buffer of Bitstream structure*/
+ code = p_bs->ui32TempBuff;
+
+ /* Checking in first 3 bits*/
+ if( code >> 29 )
+ {
+ l0 = Tab_MCS[(code >> 29)];
+ code = code >> (32 - l0);
+ H264MCS_flushBits(p_bs, l0);
+ }
+ else
+ {
+ if( code )
+ {
+ code <<= 3;
+
+ for ( l0 = 3; code < 0x80000000; code <<= 1, l0++ );
+
+ if( l0 < 16 ) /*all useful bits are inside the 32 bits read */
+ {
+ code = code >> (31 - l0);
+ H264MCS_flushBits(p_bs, 2 * l0 + 1);
+ }
+ else
+ { /* Read the useful bits in 2 parts */
+ l1 = ( l0 << 1) - 31;
+ code >>= l0;
+ H264MCS_flushBits(p_bs, 32);
+ code = ( code << l1) | H264MCS_getBits(p_bs, l1);
+ }
+ }
+ else
+ {
+ H264MCS_flushBits(p_bs, 32);
+
+ if( H264MCS_getBits(p_bs, 1) )
+ {
+ /* if number of leading 0's is 32, the only code allowed is 1 followed
+ by 32 0's */
+
+ /*reading 32 more bits from bitstream buffer*/
+ code = H264MCS_getBits(p_bs, 32);
+
+ if( code == 0 )
+ {
+ return (code - 1);
+ }
+ }
+ /*if number of leading 0's is >32, then symbol is >32 bits,
+ which is an error */
+ //p_bs->state = _BS_ERR;
+ //p_bs->flags |= _BF_SYM_ERR;
+ return (0);
+ }
+ }
+
+ if( 1 ) //(p_bs->state == _BS_OK)
+ {
+ return (code - 1);
+ }
+ else
+ {
+ return (0);
+ }
+ }
+
+M4OSA_Int32 H264MCS_DecVLCReadSignedExpGolombCode( ComBitStreamMCS_t *p_bs )
+{
+ M4OSA_Int32 codeNo, ret;
+
+ /* read the unsigned code number */
+ codeNo = H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ /* map to the signed value, if value is odd then it's positive,
+ if even then it's negative, formula is (-1)^(k+1)*CEIL(k/2) */
+
+ ret = (codeNo & 0x01) ? (( codeNo + 1) >> 1) : (( -codeNo) >> 1);
+
+ return ret;
+}
+
+M4OSA_Void DecBitStreamReset_MCS( ComBitStreamMCS_t *p_bs,
+ M4OSA_UInt32 bytes_read )
+{
+ p_bs->bitPos = 0;
+
+ p_bs->lastTotalBits = 0;
+ p_bs->numBitsInBuffer = bytes_read << 3;
+ p_bs->readableBytesInBuffer = bytes_read;
+ //p_bs->state = M4NO_ERROR;//_BS_OK;
+ //p_bs->flags = 0;
+
+ p_bs->ui32TempBuff = 0;
+ p_bs->i8BitCnt = 0;
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+ p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+ H264MCS_getBits(p_bs, 0);
+}
+
+M4OSA_ERR NSWAVCMCS_initBitstream( NSWAVC_bitStream_t_MCS *bS )
+{
+ bS->bitPos = 0;
+ bS->byteCnt = 0;
+ bS->currBuff = 0;
+ bS->prevByte = 0xff;
+ bS->prevPrevByte = 0xff;
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_putBits( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value,
+ M4OSA_UInt8 length )
+{
+ M4OSA_UInt32 maskedValue = 0, temp = 0;
+ M4OSA_UInt8 byteOne;
+
+ M4OSA_UInt32 len1 = (length == 32) ? 31 : length;
+
+ if( !(length) )
+ {
+ /* Length = 0, return OK*/
+ return M4NO_ERROR;
+ }
+
+ maskedValue = (M4OSA_UInt32)(value &(( 1 << len1) - 1));
+
+ if( 32 > (length + bS->bitPos) )
+ {
+ bS->bitPos += length;
+ bS->currBuff |= maskedValue << (32 - bS->bitPos);
+ }
+ else
+ {
+ temp = (( bS->bitPos + length) - 32);
+
+ bS->currBuff |= (maskedValue >> (temp));
+
+ byteOne =
+ bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)((bS->currBuff) &0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+
+ bS->currBuff = 0;
+
+ bS->currBuff |= ( maskedValue &(( 1 << temp) - 1)) << (32 - temp);
+
+ bS->bitPos = temp;
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_putBit( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value )
+{
+ M4OSA_UInt32 maskedValue = 0, temp = 0;
+ M4OSA_UInt8 byteOne;
+
+ maskedValue = (value ? 1 : 0);
+
+ if( 32 > (1 + bS->bitPos) )
+ {
+ bS->bitPos += 1;
+ bS->currBuff |= maskedValue << (32 - bS->bitPos);
+ }
+ else
+ {
+ temp = 0;
+
+ bS->currBuff |= (maskedValue);
+
+ /* writing it to memory*/
+ byteOne =
+ bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(bS->currBuff >> 24);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ byteOne = bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)((bS->currBuff) &0xff);
+
+ if( (( bS->prevPrevByte
+ == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+ {
+ bS->byteCnt -= 1;
+ bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+ bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+ }
+ else
+ {
+ bS->prevPrevByte = bS->prevByte;
+ bS->prevByte = byteOne;
+ }
+ bS->currBuff = 0;
+ bS->bitPos = 0;
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_Int32 NSWAVCMCS_putRbspTbits( NSWAVC_bitStream_t_MCS *bS )
+{
+ M4OSA_UInt8 trailBits = 0;
+ M4OSA_UInt8 byteCnt = 0;
+
+ trailBits = (M4OSA_UInt8)(bS->bitPos % 8);
+
+ /* Already in the byte aligned position,
+ RBSP trailing bits will be 1000 0000 */
+ if( 0 == trailBits )
+ {
+ trailBits = (1 << 7);
+ NSWAVCMCS_putBits(bS, trailBits, 8);
+ }
+ else
+ {
+ trailBits = (8 - trailBits);
+ NSWAVCMCS_putBit(bS, 1);
+ trailBits--;
+
+ if( trailBits )
+ { /* put trailBits times zeros */
+ NSWAVCMCS_putBits(bS, 0, trailBits);
+ }
+ }
+
+ /* For writting the currBuff in streamBuff 4byte alignment is required*/
+ byteCnt = (M4OSA_UInt8)(( bS->bitPos + 4) / 8);
+
+ switch( byteCnt )
+ {
+ case 1:
+ bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+ break;
+
+ case 2:
+ bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+ bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+ break;
+
+ case 3:
+ bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+ bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+ bS->streamBuffer[bS->byteCnt++] =
+ (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+ break;
+
+ default:
+ /* It will not come here */
+ break;
+ }
+
+ // bS->bitPos =0;
+ // bS->currBuff = 0;
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_uExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
+{
+
+ M4OSA_Int32 loop, temp;
+ M4OSA_Int32 data = 0;
+ M4OSA_UInt8 codeLen = 0;
+
+ /* The codeNum cannot be less than zero for this ue(v) */
+ if( codeNum < 0 )
+ {
+ return 0;
+ }
+
+ /* Implementation for Encoding of the Table 9-1 in the Standard */
+ temp = codeNum + 1;
+
+ for ( loop = 0; temp != 0; loop++ )
+ {
+ temp /= 2;
+ }
+
+ codeLen = (( loop * 2) - 1);
+
+ data = codeNum + 1;
+
+ NSWAVCMCS_putBits(bS, data, codeLen);
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_sExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
+{
+
+ M4OSA_Int32 loop, temp1, temp2;
+ M4OSA_Int32 data = 0;
+ M4OSA_UInt8 codeLen = 0, isPositive = 0;
+ M4OSA_UInt32 abscodeNum;
+
+ if( codeNum > 0 )
+ {
+ isPositive = 1;
+ }
+
+ if( codeNum > 0 )
+ {
+ abscodeNum = codeNum;
+ }
+ else
+ {
+ abscodeNum = -codeNum;
+ }
+
+ temp1 = ( ( ( abscodeNum) << 1) - isPositive) + 1;
+ temp2 = temp1;
+
+ for ( loop = 0; loop < 16 && temp2 != 0; loop++ )
+ {
+ temp2 /= 2;
+ }
+
+ codeLen = ( loop * 2) - 1;
+
+ data = temp1;
+
+ NSWAVCMCS_putBits(bS, data, codeLen);
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessEncodedNALU( M4OSA_Void *ainstance,
+ M4OSA_UInt8 *inbuff,
+ M4OSA_Int32 inbuf_size,
+ M4OSA_UInt8 *outbuff,
+ M4OSA_Int32 *outbuf_size )
+{
+ ComBitStreamMCS_t *p_bs, bs;
+ NSWAVC_MCS_t *instance;
+ M4OSA_UInt8 nalu_info;
+ M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+ M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
+ M4OSA_Int32 seq_parameter_set_id;
+ M4OSA_UInt8 temp1, temp2, temp3, temp4;
+ M4OSA_Int32 temp_frame_num;
+ M4OSA_Int32 bitstoDiacard, bytes;
+ M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+ M4OSA_Int32 new_bytes, init_bit_pos;
+ M4OSA_UInt32 nal_size;
+ M4OSA_UInt32 cnt;
+ M4OSA_UInt32 outbuffpos = 0;
+ M4OSA_UInt32 nal_size_low16, nal_size_high16;
+ M4OSA_UInt32 frame_size = 0;
+ M4OSA_UInt32 temp = 0;
+
+ // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+ M4OSA_Int8 *pTmpBuff1 = M4OSA_NULL;
+ M4OSA_Int8 *pTmpBuff2 = M4OSA_NULL;
+
+ p_bs = &bs;
+ instance = (NSWAVC_MCS_t *)ainstance;
+
+ M4OSA_TRACE1_2(
+ "In H264MCS_ProcessEncodedNALU with FrameSize = %d inBuf_Size=%d",
+ frame_size, inbuf_size);
+
+ // StageFright codecs may add a start code, make sure it is not present
+
+ if( !memcmp((void *)inbuff,
+ "\x00\x00\x00\x01", 4) )
+ {
+ M4OSA_TRACE1_3(
+ "H264MCS_ProcessNALU ERROR : NALU start code has not been removed %d "
+ "0x%X 0x%X", inbuf_size, ((M4OSA_UInt32 *)inbuff)[0],
+ ((M4OSA_UInt32 *)inbuff)[1]);
+
+ return M4ERR_PARAMETER;
+ }
+
+ // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+ pTmpBuff1 = (M4OSA_Int8 *)M4OSA_32bitAlignedMalloc(inbuf_size + 4, M4MCS,
+ (M4OSA_Char *)"tmpNALU");
+ memcpy((void *)(pTmpBuff1 + 4), (void *)inbuff,
+ inbuf_size);
+ pTmpBuff1[3] = ( (M4OSA_UInt32)inbuf_size) & 0x000000FF;
+ pTmpBuff1[2] = ( (M4OSA_UInt32)inbuf_size >> 8) & 0x000000FF;
+ pTmpBuff1[1] = ( (M4OSA_UInt32)inbuf_size >> 16) & 0x000000FF;
+ pTmpBuff1[0] = ( (M4OSA_UInt32)inbuf_size >> 24) & 0x000000FF;
+ pTmpBuff2 = (M4OSA_Int8 *)inbuff;
+ inbuff = (M4OSA_UInt8 *)pTmpBuff1;
+ inbuf_size += 4;
+
+ // Make sure the available size was set
+ if( inbuf_size >= *outbuf_size )
+ {
+ M4OSA_TRACE1_1(
+ "!!! H264MCS_ProcessNALU ERROR : specified available size is incorrect %d ",
+ *outbuf_size);
+ return M4ERR_PARAMETER;
+ }
+
+
+
+ while( (M4OSA_Int32)frame_size < inbuf_size )
+ {
+ mask_bits = 0xFFFFFFFF;
+ p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+ // Use unsigned value to fix errors due to bit sign extension, this fix should be generic
+
+ nal_size_high16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[0] << 8)
+ + ((M4OSA_UInt8 *)p_bs->Buffer)[1];
+ nal_size_low16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[2] << 8)
+ + ((M4OSA_UInt8 *)p_bs->Buffer)[3];
+
+ nalu_info = (unsigned char)p_bs->Buffer[4];
+
+ outbuff[outbuffpos] = p_bs->Buffer[4];
+
+ p_bs->Buffer = p_bs->Buffer + 5;
+
+ p_bs->bitPos = 0;
+ p_bs->lastTotalBits = 0;
+ p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
+ p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
+
+ p_bs->ui32TempBuff = 0;
+ p_bs->i8BitCnt = 0;
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+ p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+ H264MCS_getBits(p_bs, 0);
+
+ nal_size = ( nal_size_high16 << 16) + nal_size_low16;
+
+ frame_size += nal_size + 4;
+
+ forbidden_bit = ( nalu_info >> 7) & 1;
+ nal_ref_idc = ( nalu_info >> 5) & 3;
+ nal_unit_type = (nalu_info) &0x1f;
+
+ NSWAVCMCS_initBitstream(&instance->encbs);
+
+ instance->encbs.streamBuffer = outbuff + outbuffpos + 1;
+
+ if( nal_unit_type == 8 )
+ {
+ M4OSA_TRACE1_0("Error : PPS");
+ return 0;
+ }
+
+ if( nal_unit_type == 7 )
+ {
+ /*SPS Packet */
+ M4OSA_TRACE1_0("Error : SPS");
+ return 0;
+ }
+
+ if( (nal_unit_type == 5) )
+ {
+ instance->frame_count = 0;
+ instance->POC_lsb = 0;
+ }
+
+ if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
+ {
+ first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ /* First MB in slice */
+ NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
+
+ /* Slice Type */
+ NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
+
+ /* Picture Parameter set Id */
+ pic_parameter_set_id = instance->encoder_pps.pic_parameter_set_id;
+ NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
+
+ temp = H264MCS_getBits(p_bs,
+ instance->encoder_sps.log2_max_frame_num_minus4 + 4);
+ NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
+ instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+ // In Baseline Profile: frame_mbs_only_flag should be ON
+ if( nal_unit_type == 5 )
+ {
+ temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ NSWAVCMCS_uExpVLC(&instance->encbs, temp);
+ }
+
+ if( instance->encoder_sps.pic_order_cnt_type == 0 )
+ {
+ temp = H264MCS_getBits(p_bs,
+ instance->encoder_sps.log2_max_pic_order_cnt_lsb_minus4
+ + 4);
+
+ // in baseline profile field_pic_flag should be off.
+ if( instance->encoder_pps.pic_order_present_flag )
+ {
+ temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+ }
+ }
+
+ if( ( instance->encoder_sps.pic_order_cnt_type == 1)
+ && (instance->encoder_sps.delta_pic_order_always_zero_flag) )
+ {
+ temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+ // in baseline profile field_pic_flag should be off.
+ if( instance->encoder_pps.pic_order_present_flag )
+ {
+ temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+ }
+ }
+
+ if( instance->clip_sps.pic_order_cnt_type == 0 )
+ {
+ NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
+ instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ // in baseline profile field_pic_flag should be off.
+ if( instance->encoder_pps.pic_order_present_flag )
+ {
+ NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+ }
+ }
+
+ if( ( instance->clip_sps.pic_order_cnt_type == 1)
+ && (instance->clip_sps.delta_pic_order_always_zero_flag) )
+ {
+ NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+
+ // in baseline profile field_pic_flag should be off.
+ if( instance->encoder_pps.pic_order_present_flag )
+ {
+ NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+ }
+ }
+
+ cnt = p_bs->bitPos & 0x7;
+
+ if( cnt )
+ {
+ cnt = 8 - cnt;
+ temp = H264MCS_getBits(p_bs, cnt);
+ NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
+ }
+
+ cnt = p_bs->bitPos >> 3;
+
+ while( cnt < (nal_size - 2) )
+ {
+ temp = H264MCS_getBits(p_bs, 8);
+ NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+ cnt = p_bs->bitPos >> 3;
+ }
+
+ temp = H264MCS_getBits(p_bs, 8);
+
+ if( temp != 0 )
+ {
+ cnt = 0;
+
+ while( ( temp & 0x1) == 0 )
+ {
+ cnt++;
+ temp = temp >> 1;
+ }
+ cnt++;
+ temp = temp >> 1;
+
+ if( 8 - cnt )
+ {
+ NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
+ }
+
+ NSWAVCMCS_putRbspTbits(&instance->encbs);
+ }
+ else
+ {
+
+ M4OSA_TRACE1_1(
+ "H264MCS_ProcessEncodedNALU : 13 temp = 0 trailing bits = %d",
+ instance->encbs.bitPos % 8);
+
+ if( instance->encbs.bitPos % 8 )
+ {
+ NSWAVCMCS_putBits(&instance->encbs, 0,
+ (8 - instance->encbs.bitPos % 8));
+ }
+ }
+
+ temp = instance->encbs.byteCnt;
+ temp = temp + 1;
+
+ outbuffpos = outbuffpos + temp;
+ }
+ }
+
+ *outbuf_size = outbuffpos;
+
+ instance->POC_lsb = instance->POC_lsb + 1;
+
+ if( instance->POC_lsb == instance->POC_lsb_mod )
+ {
+ instance->POC_lsb = 0;
+ }
+ instance->frame_count = instance->frame_count + 1;
+
+ if( instance->frame_count == instance->frame_mod_count )
+ {
+ instance->frame_count = 0;
+ }
+
+ // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+
+ free(pTmpBuff1);
+ pTmpBuff1 = M4OSA_NULL;
+ inbuff = (M4OSA_UInt8 *)pTmpBuff2;
+
+ return M4NO_ERROR;
+}
+
+M4OSA_Int32 DecSPSMCS( ComBitStreamMCS_t *p_bs,
+ ComSequenceParameterSet_t_MCS *sps )
+{
+ M4OSA_UInt32 i;
+ M4OSA_Int32 temp_max_dpb_size;
+ M4OSA_Int32 nb_ignore_bits;
+ M4OSA_Int32 error;
+ M4OSA_UInt8 profile_idc, level_idc, reserved_zero_4bits,
+ seq_parameter_set_id;
+ M4OSA_UInt8 constraint_set0_flag, constraint_set1_flag,
+ constraint_set2_flag, constraint_set3_flag;
+
+ sps->profile_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
+ sps->constraint_set0_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ sps->constraint_set1_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ sps->constraint_set2_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ sps->constraint_set3_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ reserved_zero_4bits = (M4OSA_UInt8)H264MCS_getBits(p_bs, 4);
+ sps->level_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
+ sps->seq_parameter_set_id =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->log2_max_frame_num_minus4 =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->MaxFrameNum = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ sps->pic_order_cnt_type =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ if (sps->pic_order_cnt_type == 0)
+ {
+ sps->log2_max_pic_order_cnt_lsb_minus4 =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->MaxPicOrderCntLsb =
+ 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ }
+ else if( sps->pic_order_cnt_type == 1 )
+ {
+ sps->delta_pic_order_always_zero_flag =
+ (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ // This fix should be generic to remove codec dependency
+
+ sps->offset_for_non_ref_pic =
+ H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+ sps->offset_for_top_to_bottom_field =
+ H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+
+ /*num_ref_frames_in_pic_order_cnt_cycle must be in the range 0, 255*/
+
+ sps->num_ref_frames_in_pic_order_cnt_cycle =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ /* compute deltaPOC */
+ sps->expectedDeltaPerPicOrderCntCycle = 0;
+
+ for ( i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++ )
+ {
+ // This fix should be generic to remove codec dependency
+ sps->offset_for_ref_frame[i] =
+ H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+ sps->expectedDeltaPerPicOrderCntCycle +=
+ sps->offset_for_ref_frame[i];
+ }
+ }
+
+ /* num_ref_frames must be in the range 0,16 */
+ sps->num_ref_frames = (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->gaps_in_frame_num_value_allowed_flag =
+ (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ sps->pic_width_in_mbs_minus1 =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->pic_height_in_map_units_minus1 =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ sps->frame_mbs_only_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ if (!sps->frame_mbs_only_flag)
+ {
+ sps->mb_adaptive_frame_field_flag =
+ (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ }
+ else
+ {
+ sps->mb_adaptive_frame_field_flag = 0;
+ }
+
+ sps->PicWidthInMbs = sps->pic_width_in_mbs_minus1 + 1;
+ sps->FrameHeightInMbs = ( 2 - sps->frame_mbs_only_flag) * \
+ (sps->pic_height_in_map_units_minus1 + 1);
+#ifdef _CAP_FMO_
+
+ sps->NumSliceGroupMapUnits =
+ sps->PicWidthInMbs * (sps->pic_height_in_map_units_minus1 + 1);
+ sps->MaxPicSizeInMbs = sps->PicWidthInMbs * sps->FrameHeightInMbs;
+
+#endif /*_CAP_FMO_*/
+
+ sps->direct_8x8_inference_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ if( sps->frame_mbs_only_flag == 0 )
+ sps->direct_8x8_inference_flag = 1;
+
+ sps->frame_cropping_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ if( sps->frame_cropping_flag )
+ {
+ sps->frame_crop_left_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->frame_crop_right_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->frame_crop_top_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ sps->frame_crop_bottom_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ }
+ else
+ {
+ sps->frame_crop_left_offset = 0;
+ sps->frame_crop_right_offset = 0;
+ sps->frame_crop_top_offset = 0;
+ sps->frame_crop_bottom_offset = 0;
+ }
+
+ sps->vui_parameters_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ if (sps->vui_parameters_present_flag) {
+ /* no error message as stream can be decoded without VUI messages */
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_Int32 DecPPSMCS( ComBitStreamMCS_t *p_bs,
+ ComPictureParameterSet_t_MCS *pps )
+{
+ M4OSA_Int32 error;
+ M4OSA_UInt32 pic_parameter_set_id;
+
+#ifdef _CAP_FMO_
+ M4OSA_UInt32 i, length, v;
+#endif
+
+ M4OSA_Int32 nb_ignore_bits;
+
+ pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ pps->pic_parameter_set_id = (M4OSA_UInt8)pic_parameter_set_id;
+
+ pps->seq_parameter_set_id =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ /* entropy_coding_mode_flag must be 0 or 1 */
+ pps->entropy_coding_mode_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ pps->pic_order_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ pps->num_slice_groups_minus1 =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+#ifdef _CAP_FMO_
+ /* FMO stuff begins here */
+
+ pps->map_initialized = FALSE;
+
+ if( pps->num_slice_groups_minus1 > 0 )
+ {
+ pps->slice_group_map_type =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ switch( pps->slice_group_map_type )
+ {
+ case 0:
+ for ( i = 0; i <= pps->num_slice_groups_minus1; i++ )
+ {
+ pps->run_length_minus1[i] =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ }
+ break;
+
+ case 2:
+ for ( i = 0; i < pps->num_slice_groups_minus1; i++ )
+ {
+ pps->top_left[i] =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ pps->bottom_right[i] =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ }
+ break;
+
+ case 3:
+ case 4:
+ case 5:
+ pps->slice_group_change_direction_flag =
+ (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ pps->slice_group_change_rate_minus1 =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ break;
+
+ case 6:
+ pps->pic_size_in_map_units_minus1 =
+ (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ pps->slice_group_id = (H264UInt8
+ *)M4H264Dec_malloc((pps->pic_size_in_map_units_minus1
+ + 1), M4H264_COREID, (M4OSA_Char *)"PPS");
+
+ if (M4OSA_NULL == pps->slice_group_id)
+ {
+ M4OSA_TRACE1_0("DecPPSMCS: allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ for ( length = 0, v = pps->num_slice_groups_minus1 + 1; v != 0;
+ v >>= 1, length++ );
+
+ for ( i = 0; i <= pps->pic_size_in_map_units_minus1; i++ )
+ {
+ pps->slice_group_id[i] =
+ (M4OSA_UInt8)getBits(p_vlc_engine->p_bs, length);
+ }
+ break;
+ }
+ }
+ else
+ {
+ pps->slice_group_map_type = 0;
+ }
+ /* End of FMO stuff */
+
+#else
+
+#endif /* _CAP_FMO_ */
+
+ /* num_ref_idx_l0_active_minus1 must be in the range 0, 31 */
+
+ pps->num_ref_idx_l0_active_minus1 =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ /* num_ref_idx_l1_active_minus1 must be in the range 0, 31 */
+ pps->num_ref_idx_l1_active_minus1 =
+ (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+ pps->weighted_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ /* weighted_bipred_idc must be in the range 0,2 */
+ pps->weighted_bipred_idc = (M4OSA_Bool)H264MCS_getBits(p_bs, 2);
+
+ /* pic_init_qp_minus26 must be in the range -26,25 */
+ pps->pic_init_qp_minus26 =
+ (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+ /* pic_init_qs_minus26 must be in the range -26,25 */
+ pps->pic_init_qs_minus26 =
+ (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+ /* chroma_qp_index_offset must be in the range -12,+12 */
+ pps->chroma_qp_index_offset =
+ (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+ pps->deblocking_filter_control_present_flag =
+ (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ pps->constrained_intra_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+ pps->redundant_pic_cnt_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessSPS_PPS( NSWAVC_MCS_t *instance, M4OSA_UInt8 *inbuff,
+ M4OSA_Int32 inbuf_size )
+{
+ ComBitStreamMCS_t *p_bs, bs;
+ ComBitStreamMCS_t *p_bs1, bs1;
+
+ M4OSA_UInt8 nalu_info = 0;
+ M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+ M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id = 0,
+ frame_num;
+ M4OSA_Int32 seq_parameter_set_id;
+ M4OSA_UInt8 temp1, temp2, temp3, temp4;
+ M4OSA_Int32 temp_frame_num;
+ M4OSA_Int32 bitstoDiacard, bytes;
+ M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+ M4OSA_Int32 new_bytes, init_bit_pos;
+ M4OSA_UInt32 nal_size = 0;
+ M4OSA_UInt32 cnt, cnt1;
+ M4OSA_UInt32 outbuffpos = 0;
+ M4OSA_UInt32 nal_size_low16, nal_size_high16;
+ M4OSA_UInt32 frame_size = 0;
+ M4OSA_UInt32 temp = 0;
+ M4OSA_UInt8 *lClipDSI;
+ M4OSA_UInt8 *lClipDSI_PPS_start;
+ M4OSA_UInt32 lClipDSI_PPS_offset = 0;
+
+ M4OSA_UInt8 *lPPS_Buffer = M4OSA_NULL;
+ M4OSA_UInt32 lPPS_Buffer_Size = 0;
+
+ M4OSA_UInt32 lSize, lSize1;
+ M4OSA_UInt32 lActiveSPSID_Clip;
+ M4OSA_UInt32 lClipPPSRemBits = 0;
+
+ M4OSA_UInt32 lEncoder_SPSID = 0;
+ M4OSA_UInt32 lEncoder_PPSID = 0;
+ M4OSA_UInt32 lEncoderPPSRemBits = 0;
+ M4OSA_UInt32 lFound = 0;
+ M4OSA_UInt32 size;
+
+ M4OSA_UInt8 Clip_SPSID[32] = { 0 };
+ M4OSA_UInt8 Clip_UsedSPSID[32] = { 0 };
+ M4OSA_UInt8 Clip_PPSID[256] = { 0 };
+ M4OSA_UInt8 Clip_SPSID_in_PPS[256] = { 0 };
+ M4OSA_UInt8 Clip_UsedPPSID[256] = { 0 };
+ M4OSA_ERR err = M4NO_ERROR;
+
+ p_bs = &bs;
+ p_bs1 = &bs1;
+
+ /* Find the active SPS ID */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+ "H264MCS_ProcessSPS_PPS: instance is M4OSA_NULL");
+
+ instance->m_Num_Bytes_NALUnitLength =
+ (instance->m_pDecoderSpecificInfo[4] & 0x03) + 1;
+
+ instance->m_encoder_SPS_Cnt = instance->m_pDecoderSpecificInfo[5] & 0x1F;
+
+ lClipDSI = instance->m_pDecoderSpecificInfo + 6;
+
+ lClipDSI_PPS_offset = 6;
+
+ for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
+ {
+ lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+ lClipDSI = lClipDSI + 2;
+
+ p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 4);
+ DecBitStreamReset_MCS(p_bs, lSize - 4);
+
+ Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
+
+ lClipDSI = lClipDSI + lSize;
+ lClipDSI_PPS_offset = lClipDSI_PPS_offset + 2 + lSize;
+ }
+
+ instance->m_encoder_PPS_Cnt = lClipDSI[0];
+ lClipDSI = lClipDSI + 1;
+
+ lClipDSI_PPS_start = lClipDSI;
+
+ for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
+ {
+ lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+ lClipDSI = lClipDSI + 2;
+
+ p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+ DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+ Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ Clip_UsedPPSID[Clip_PPSID[cnt]] = 1;
+ Clip_SPSID_in_PPS[Clip_PPSID[cnt]] =
+ H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+ lClipDSI = lClipDSI + lSize;
+ }
+
+ /* Find the clip SPS ID used at the cut start frame */
+ while( ( (M4OSA_Int32)frame_size) < inbuf_size )
+ {
+ mask_bits = 0xFFFFFFFF;
+ p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+ switch( instance->m_Num_Bytes_NALUnitLength )
+ {
+ case 1:
+ nal_size = (unsigned char)p_bs->Buffer[0];
+ nalu_info = (unsigned char)p_bs->Buffer[1];
+ p_bs->Buffer = p_bs->Buffer + 2;
+
+ break;
+
+ case 2:
+ nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
+ nal_size = nal_size_high16;
+ nalu_info = (unsigned char)p_bs->Buffer[2];
+ p_bs->Buffer = p_bs->Buffer + 3;
+
+ break;
+
+ case 4:
+ nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
+ nal_size_low16 = ( p_bs->Buffer[2] << 8) + p_bs->Buffer[3];
+ nal_size = ( nal_size_high16 << 16) + nal_size_low16;
+ nalu_info = (unsigned char)p_bs->Buffer[4];
+ p_bs->Buffer = p_bs->Buffer + 5;
+
+ break;
+ }
+
+ if (nal_size == 0) {
+ M4OSA_TRACE1_1("0 size nal unit at line %d", __LINE__);
+ frame_size += instance->m_Num_Bytes_NALUnitLength;
+ continue;
+ }
+
+ p_bs->bitPos = 0;
+ p_bs->lastTotalBits = 0;
+ p_bs->numBitsInBuffer =
+ ( inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1)
+ << 3;
+ p_bs->readableBytesInBuffer =
+ inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1;
+
+ p_bs->ui32TempBuff = 0;
+ p_bs->i8BitCnt = 0;
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+ p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+ H264MCS_getBits(p_bs, 0);
+
+ frame_size += nal_size + instance->m_Num_Bytes_NALUnitLength;
+
+ forbidden_bit = ( nalu_info >> 7) & 1;
+ nal_ref_idc = ( nalu_info >> 5) & 3;
+ nal_unit_type = (nalu_info) &0x1f;
+
+ if( nal_unit_type == 8 )
+ {
+ M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: PPS");
+ return err;
+ }
+
+ if( nal_unit_type == 7 )
+ {
+ /*SPS Packet */
+ M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: SPS");
+ return err;
+ }
+
+ if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
+ {
+ first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ break;
+ }
+ }
+
+ lActiveSPSID_Clip = Clip_SPSID_in_PPS[pic_parameter_set_id];
+
+ instance->final_SPS_ID = lActiveSPSID_Clip;
+ /* Do we need to add encoder PPS to clip PPS */
+
+ lClipDSI = lClipDSI_PPS_start;
+
+ for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
+ {
+ lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+ lClipDSI = lClipDSI + 2;
+
+ if( lActiveSPSID_Clip == Clip_SPSID_in_PPS[Clip_PPSID[cnt]] )
+ {
+ lPPS_Buffer = lClipDSI + 1;
+ lPPS_Buffer_Size = lSize - 1;
+
+ p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+ DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+ Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ Clip_UsedPPSID[Clip_SPSID[cnt]] = 1;
+ Clip_SPSID_in_PPS[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ lClipPPSRemBits = ( lSize - 1) << 3;
+ lClipPPSRemBits -= p_bs->bitPos;
+
+ temp = lClipDSI[lSize - 1];
+
+ cnt1 = 0;
+
+ while( ( temp & 0x1) == 0 )
+ {
+ cnt1++;
+ temp = temp >> 1;
+ }
+ cnt1++;
+ lClipPPSRemBits -= cnt1;
+
+ lSize1 = instance->m_encoderPPSSize - 1;
+ p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
+ DecBitStreamReset_MCS(p_bs1, lSize1);
+
+ lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+ lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+
+ lEncoderPPSRemBits = ( lSize1) << 3;
+ lEncoderPPSRemBits -= p_bs1->bitPos;
+
+ temp = instance->m_pEncoderPPS[lSize1];
+
+ cnt1 = 0;
+
+ while( ( temp & 0x1) == 0 )
+ {
+ cnt1++;
+ temp = temp >> 1;
+ }
+ cnt1++;
+ lEncoderPPSRemBits -= cnt1;
+
+ if( lEncoderPPSRemBits == lClipPPSRemBits )
+ {
+ while( lEncoderPPSRemBits > 8 )
+ {
+ temp1 = H264MCS_getBits(p_bs, 8);
+ temp2 = H264MCS_getBits(p_bs1, 8);
+ lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
+
+ if( temp1 != temp2 )
+ {
+ break;
+ }
+ }
+
+ if( lEncoderPPSRemBits < 8 )
+ {
+ if( lEncoderPPSRemBits )
+ {
+ temp1 = H264MCS_getBits(p_bs, lEncoderPPSRemBits);
+ temp2 = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
+
+ if( temp1 == temp2 )
+ {
+ lFound = 1;
+ }
+ }
+ else
+ {
+ lFound = 1;
+ }
+ }
+ break;
+ }
+ }
+
+ lClipDSI = lClipDSI + lSize;
+ }
+
+ /* Form the final SPS and PPS data */
+
+ if( lFound == 1 )
+ {
+ /* No need to add PPS */
+ instance->final_PPS_ID = Clip_PPSID[cnt];
+
+ instance->m_pFinalDSI =
+ (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(instance->m_decoderSpecificInfoSize,
+ M4MCS, (M4OSA_Char *)"instance->m_pFinalDSI");
+
+ if( instance->m_pFinalDSI == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ instance->m_pFinalDSISize = instance->m_decoderSpecificInfoSize;
+ memcpy((void *)instance->m_pFinalDSI,
+ (void *)instance->m_pDecoderSpecificInfo,
+ instance->m_decoderSpecificInfoSize);
+ }
+ else
+ {
+ /* ADD PPS */
+ /* find the free PPS ID */
+
+ cnt = 0;
+
+ while( Clip_UsedPPSID[cnt] )
+ {
+ cnt++;
+ }
+ instance->final_PPS_ID = cnt;
+
+ size = instance->m_decoderSpecificInfoSize + instance->m_encoderPPSSize
+ + 10;
+
+ instance->m_pFinalDSI = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(size, M4MCS,
+ (M4OSA_Char *)"instance->m_pFinalDSI");
+
+ if( instance->m_pFinalDSI == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ memcpy((void *)instance->m_pFinalDSI,
+ (void *)instance->m_pDecoderSpecificInfo,
+ instance->m_decoderSpecificInfoSize);
+
+ temp = instance->m_pFinalDSI[lClipDSI_PPS_offset];
+ temp = temp + 1;
+ instance->m_pFinalDSI[lClipDSI_PPS_offset] = temp;
+
+ //temp = instance->m_pEncoderPPS[0];
+ lSize1 = instance->m_encoderPPSSize - 1;
+ p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
+ DecBitStreamReset_MCS(p_bs1, lSize1);
+
+ lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+ lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+
+ lEncoderPPSRemBits = ( lSize1) << 3;
+ lEncoderPPSRemBits -= p_bs1->bitPos;
+
+ temp = instance->m_pEncoderPPS[lSize1];
+
+ cnt1 = 0;
+
+ while( ( temp & 0x1) == 0 )
+ {
+ cnt1++;
+ temp = temp >> 1;
+ }
+ cnt1++;
+ lEncoderPPSRemBits -= cnt1;
+
+ instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 2] =
+ instance->m_pEncoderPPS[0];
+
+ NSWAVCMCS_initBitstream(&instance->encbs);
+ instance->encbs.streamBuffer =
+ &(instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 3]);
+ lPPS_Buffer = instance->encbs.streamBuffer;
+
+ NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_PPS_ID);
+ NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_SPS_ID);
+
+ while( lEncoderPPSRemBits > 8 )
+ {
+ temp = H264MCS_getBits(p_bs1, 8);
+ NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+ lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
+ }
+
+ if( lEncoderPPSRemBits )
+ {
+ temp = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
+ NSWAVCMCS_putBits(&instance->encbs, temp, lEncoderPPSRemBits);
+ }
+ NSWAVCMCS_putRbspTbits(&instance->encbs);
+
+ temp = instance->encbs.byteCnt;
+ lPPS_Buffer_Size = temp;
+ temp = temp + 1;
+
+ instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize] =
+ ( temp >> 8) & 0xFF;
+ instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 1] =
+ (temp) &0xFF;
+ instance->m_pFinalDSISize =
+ instance->m_decoderSpecificInfoSize + 2 + temp;
+ }
+
+ /* Decode the clip SPS */
+
+ lClipDSI = instance->m_pDecoderSpecificInfo + 6;
+
+ lClipDSI_PPS_offset = 6;
+
+ for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
+ {
+ lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+ lClipDSI = lClipDSI + 2;
+
+ if( Clip_SPSID[cnt] == instance->final_SPS_ID )
+ {
+ p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+ DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+ err = DecSPSMCS(p_bs, &instance->clip_sps);
+ if(err != M4NO_ERROR) {
+ return M4ERR_PARAMETER;
+ }
+
+ //Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ //Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
+ break;
+ }
+
+ lClipDSI = lClipDSI + lSize;
+ }
+
+ /* Decode encoder SPS */
+ p_bs->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderSPS + 1);
+ DecBitStreamReset_MCS(p_bs, instance->m_encoderSPSSize - 1);
+ err = DecSPSMCS(p_bs, &instance->encoder_sps);
+ if(err != M4NO_ERROR) {
+ return M4ERR_PARAMETER;
+ }
+
+ if( instance->encoder_sps.num_ref_frames
+ > instance->clip_sps.num_ref_frames )
+ {
+ return 100; //not supported
+ }
+
+ p_bs->Buffer = (M4OSA_UInt8 *)lPPS_Buffer;
+ DecBitStreamReset_MCS(p_bs, lPPS_Buffer_Size);
+ DecPPSMCS(p_bs, &instance->encoder_pps);
+
+ instance->frame_count = 0;
+ instance->frame_mod_count =
+ 1 << (instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+ instance->POC_lsb = 0;
+ instance->POC_lsb_mod =
+ 1 << (instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessNALU( NSWAVC_MCS_t *ainstance, M4OSA_UInt8 *inbuff,
+ M4OSA_Int32 inbuf_size, M4OSA_UInt8 *outbuff,
+ M4OSA_Int32 *outbuf_size )
+{
+ ComBitStreamMCS_t *p_bs, bs;
+ NSWAVC_MCS_t *instance;
+ M4OSA_UInt8 nalu_info;
+ M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+ M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
+ M4OSA_Int32 seq_parameter_set_id;
+ M4OSA_UInt8 temp1, temp2, temp3, temp4;
+ M4OSA_Int32 temp_frame_num;
+ M4OSA_Int32 bitstoDiacard, bytes;
+ M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+ M4OSA_Int32 new_bytes, init_bit_pos;
+ M4OSA_UInt32 nal_size;
+ M4OSA_UInt32 cnt;
+ M4OSA_UInt32 outbuffpos = 0;
+ //#ifndef DGR_FIX // + new
+ M4OSA_UInt32 nal_size_low16, nal_size_high16;
+ //#endif // + end new
+ M4OSA_UInt32 frame_size = 0;
+ M4OSA_UInt32 temp = 0;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt8 *buff;
+
+ p_bs = &bs;
+ instance = (NSWAVC_MCS_t *)ainstance;
+ M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+ "H264MCS_ProcessNALU: instance is M4OSA_NULL");
+
+ if( instance->is_done )
+ return err;
+
+ inbuff[0] = 0x00;
+ inbuff[1] = 0x00;
+ inbuff[2] = 0x00;
+ inbuff[3] = 0x01;
+
+
+ while( (M4OSA_Int32)frame_size < inbuf_size )
+ {
+ mask_bits = 0xFFFFFFFF;
+ p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+
+ nalu_info = (unsigned char)p_bs->Buffer[4];
+
+ outbuff[outbuffpos] = p_bs->Buffer[0];
+ outbuff[outbuffpos + 1] = p_bs->Buffer[1];
+ outbuff[outbuffpos + 2] = p_bs->Buffer[2];
+ outbuff[outbuffpos + 3] = p_bs->Buffer[3];
+ outbuff[outbuffpos + 4] = p_bs->Buffer[4];
+
+ p_bs->Buffer = p_bs->Buffer + 5;
+
+ p_bs->bitPos = 0;
+ p_bs->lastTotalBits = 0;
+ p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
+ p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
+
+ p_bs->ui32TempBuff = 0;
+ p_bs->i8BitCnt = 0;
+ p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+ p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+ H264MCS_getBits(p_bs, 0);
+
+
+
+ nal_size = inbuf_size - frame_size - 4;
+ buff = inbuff + frame_size + 4;
+
+ while( nal_size > 4 )
+ {
+ if( ( buff[0] == 0x00) && (buff[1] == 0x00) && (buff[2] == 0x00)
+ && (buff[3] == 0x01) )
+ {
+ break;
+ }
+ buff = buff + 1;
+ nal_size = nal_size - 1;
+ }
+
+ if( nal_size <= 4 )
+ {
+ nal_size = 0;
+ }
+ nal_size = ( inbuf_size - frame_size - 4) - nal_size;
+
+ // M4OSA_TRACE1_3("H264MCS_ProcessNALU frame input buff size = %d current position
+ //= %d nal size = %d",
+ // inbuf_size, frame_size, nal_size + 4);
+ frame_size += nal_size + 4;
+
+
+
+ forbidden_bit = ( nalu_info >> 7) & 1;
+ nal_ref_idc = ( nalu_info >> 5) & 3;
+ nal_unit_type = (nalu_info) &0x1f;
+
+ if( nal_unit_type == 5 )
+ {
+ /*IDR/PPS Packet - Do nothing*/
+ instance->is_done = 1;
+ return err;
+ }
+
+ NSWAVCMCS_initBitstream(&instance->encbs);
+ instance->encbs.streamBuffer = outbuff + outbuffpos + 5;
+
+ if( nal_unit_type == 8 )
+ {
+ M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: PPS");
+ return err;
+ }
+
+ if( nal_unit_type == 7 )
+ {
+ /*SPS Packet */
+ M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: SPS");
+ return 0;
+ }
+
+ if( (nal_unit_type == 5) )
+ {
+ instance->frame_count = 0;
+ instance->POC_lsb = 0;
+ }
+
+ if( (nal_unit_type == 1) )
+ {
+ first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
+
+ slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
+
+ pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
+
+ temp = H264MCS_getBits(p_bs,
+ instance->clip_sps.log2_max_frame_num_minus4 + 4);
+ NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
+ instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+ // In Baseline Profile: frame_mbs_only_flag should be ON
+
+ if( nal_unit_type == 5 )
+ {
+ temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
+ NSWAVCMCS_uExpVLC(&instance->encbs, temp);
+ }
+
+ if( instance->clip_sps.pic_order_cnt_type == 0 )
+ {
+ temp = H264MCS_getBits(p_bs,
+ instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4
+ + 4);
+ NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
+ instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+ }
+
+ if( ( instance->clip_sps.pic_order_cnt_type == 1)
+ && (instance->clip_sps.delta_pic_order_always_zero_flag) )
+ {
+ temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+ NSWAVCMCS_sExpVLC(&instance->encbs, temp);
+ }
+
+ cnt = p_bs->bitPos & 0x7;
+
+ if( cnt )
+ {
+ cnt = 8 - cnt;
+ temp = H264MCS_getBits(p_bs, cnt);
+ NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
+ }
+
+ cnt = p_bs->bitPos >> 3;
+
+ while( cnt < (nal_size - 2) )
+ {
+ temp = H264MCS_getBits(p_bs, 8);
+ NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+ cnt = p_bs->bitPos >> 3;
+ }
+
+ temp = H264MCS_getBits(p_bs, 8);
+
+ if( temp != 0 )
+ {
+ cnt = 0;
+
+ while( ( temp & 0x1) == 0 )
+ {
+ cnt++;
+ temp = temp >> 1;
+ }
+ cnt++;
+ temp = temp >> 1;
+
+ if( 8 - cnt )
+ {
+ NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
+ }
+
+ NSWAVCMCS_putRbspTbits(&instance->encbs);
+ }
+ else
+ {
+ if( instance->encbs.bitPos % 8 )
+ {
+ NSWAVCMCS_putBits(&instance->encbs, 0,
+ (8 - instance->encbs.bitPos % 8));
+ }
+ }
+
+ temp = instance->encbs.byteCnt;
+ temp = temp + 1;
+
+ outbuff[outbuffpos] = (M4OSA_UInt8)(( temp >> 24) & 0xFF);
+ outbuff[outbuffpos + 1] = (M4OSA_UInt8)(( temp >> 16) & 0xFF);
+ outbuff[outbuffpos + 2] = (M4OSA_UInt8)(( temp >> 8) & 0xFF);
+ outbuff[outbuffpos + 3] = (M4OSA_UInt8)((temp) &0xFF);
+ outbuffpos = outbuffpos + temp + 4;
+ }
+ else
+ {
+ p_bs->Buffer = p_bs->Buffer - 5;
+ memcpy((void *) &outbuff[outbuffpos],
+ (void *)p_bs->Buffer, nal_size + 4);
+
+ outbuff[outbuffpos] = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
+ outbuff[outbuffpos + 1] = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);;
+ outbuff[outbuffpos + 2] = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);;
+ outbuff[outbuffpos + 3] = (M4OSA_UInt8)((nal_size)& 0xFF);;
+
+ outbuffpos = outbuffpos + nal_size + 4;
+ }
+ }
+
+ *outbuf_size = outbuffpos;
+
+ instance->POC_lsb = instance->POC_lsb + 1;
+
+ if( instance->POC_lsb == instance->POC_lsb_mod )
+ {
+ instance->POC_lsb = 0;
+ }
+ instance->frame_count = instance->frame_count + 1;
+
+ if( instance->frame_count == instance->frame_mod_count )
+ {
+ instance->frame_count = 0;
+ }
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4MCS_convetFromByteStreamtoNALStream( M4OSA_UInt8 *inbuff,
+ M4OSA_UInt32 inbuf_size )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 framesize = 0;
+ M4OSA_UInt32 nal_size =0;
+ M4OSA_UInt8 *buff;
+
+
+ while(framesize < inbuf_size)
+ {
+ nal_size = inbuf_size - framesize - 4;
+ buff = inbuff + framesize + 4;
+
+ while(nal_size > 4){
+ if((buff[0] == 0x00) &&
+ (buff[1] == 0x00) &&
+ (buff[2] == 0x00) &&
+ (buff[3] == 0x01)){
+ break;
+ }
+ buff = buff + 1;
+ nal_size = nal_size -1;
+ }
+
+ if(nal_size <= 4){
+ nal_size = 0;
+ }
+ nal_size = (inbuf_size - framesize - 4) - nal_size;
+
+ inbuff[framesize + 0] = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
+ inbuff[framesize + 1] = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);
+ inbuff[framesize + 2] = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);
+ inbuff[framesize + 3] = (M4OSA_UInt8)((nal_size )& 0xFF);
+ framesize += nal_size + 4;
+
+ M4OSA_TRACE1_2("M4MCS_convetFromByteStreamtoNALStream framesize = %x nalsize = %x",
+ framesize, nal_size)
+ }
+
+ return err;
+}
+
+
+M4OSA_ERR H264MCS_Freeinstance( NSWAVC_MCS_t *instance )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+ "H264MCS_Freeinstance: instance is M4OSA_NULL");
+
+ if( M4OSA_NULL != instance->encoder_pps.slice_group_id )
+ {
+ free(instance->encoder_pps.slice_group_id);
+ }
+
+ if( M4OSA_NULL != instance->p_encoder_sps )
+ {
+ free(instance->p_encoder_sps);
+ instance->p_encoder_sps = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != instance->p_encoder_pps )
+ {
+ free(instance->p_encoder_pps);
+ instance->p_encoder_pps = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != instance->m_pFinalDSI )
+ {
+ free(instance->m_pFinalDSI);
+ instance->m_pFinalDSI = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != instance )
+ {
+ free(instance);
+ instance = M4OSA_NULL;
+ }
+
+ return err;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+ * @brief Get the MCS version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getVersion( M4_VersionInfo *pVersionInfo )
+{
+ M4OSA_TRACE3_1("M4MCS_getVersion called with pVersionInfo=0x%x",
+ pVersionInfo);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
+ "M4MCS_getVersion: pVersionInfo is M4OSA_NULL");
+
+ pVersionInfo->m_major = M4MCS_VERSION_MAJOR;
+ pVersionInfo->m_minor = M4MCS_VERSION_MINOR;
+ pVersionInfo->m_revision = M4MCS_VERSION_REVISION;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_getVersion(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * @brief Initializes the MCS (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the MCS context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4MCS_init( M4MCS_Context *pContext,
+ M4OSA_FileReadPointer *pFileReadPtrFct,
+ M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+ M4MCS_InternalContext *pC = M4OSA_NULL;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE3_3(
+ "M4MCS_init called with pContext=0x%x, pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
+ pContext, pFileReadPtrFct, pFileWritePtrFct);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_init: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+ "M4MCS_init: pFileReadPtrFct is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+ "M4MCS_init: pFileWritePtrFct is M4OSA_NULL");
+
+ /**
+ * Allocate the MCS context and return it to the user */
+ pC = (M4MCS_InternalContext *)M4OSA_32bitAlignedMalloc(sizeof(M4MCS_InternalContext),
+ M4MCS, (M4OSA_Char *)"M4MCS_InternalContext");
+ *pContext = pC;
+
+ if( M4OSA_NULL == pC )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_init(): unable to allocate M4MCS_InternalContext, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Init the context. All pointers must be initialized to M4OSA_NULL
+ * because CleanUp() can be called just after Init(). */
+ pC->State = M4MCS_kState_CREATED;
+ pC->pOsaFileReadPtr = pFileReadPtrFct;
+ pC->pOsaFileWritPtr = pFileWritePtrFct;
+ pC->VideoState = M4MCS_kStreamState_NOSTREAM;
+ pC->AudioState = M4MCS_kStreamState_NOSTREAM;
+ pC->noaudio = M4OSA_FALSE;
+ pC->novideo = M4OSA_FALSE;
+ pC->uiProgress = 0;
+
+ /**
+ * Reader stuff */
+ pC->pInputFile = M4OSA_NULL;
+ pC->InputFileType = M4VIDEOEDITING_kFileType_Unsupported;
+ pC->bFileOpenedInFastMode = M4OSA_FALSE;
+ pC->pReaderContext = M4OSA_NULL;
+ pC->pReaderVideoStream = M4OSA_NULL;
+ pC->pReaderAudioStream = M4OSA_NULL;
+ pC->bUnsupportedVideoFound = M4OSA_FALSE;
+ pC->bUnsupportedAudioFound = M4OSA_FALSE;
+ pC->iAudioCtsOffset = 0;
+ /* First temporary video AU to have more precise end video cut*/
+ pC->ReaderVideoAU1.m_structSize = 0;
+ /* Second temporary video AU to have more precise end video cut*/
+ pC->ReaderVideoAU2.m_structSize = 0;
+ pC->ReaderAudioAU1.m_structSize = 0;
+ pC->ReaderAudioAU2.m_structSize = 0;
+ pC->m_audioAUDuration = 0;
+ pC->m_pDataAddress1 = M4OSA_NULL;
+ pC->m_pDataAddress2 = M4OSA_NULL;
+ /* First temporary video AU data to have more precise end video cut*/
+ pC->m_pDataVideoAddress1 = M4OSA_NULL;
+ /* Second temporary video AU data to have more precise end video cut*/
+ pC->m_pDataVideoAddress2 = M4OSA_NULL;
+
+ /**
+ * Video decoder stuff */
+ pC->pViDecCtxt = M4OSA_NULL;
+ pC->dViDecStartingCts = 0.0;
+ pC->iVideoBeginDecIncr = 0;
+ pC->dViDecCurrentCts = 0.0;
+ pC->dCtsIncrement = 0.0;
+ pC->isRenderDup = M4OSA_FALSE;
+
+ /**
+ * Video encoder stuff */
+ pC->pViEncCtxt = M4OSA_NULL;
+ pC->pPreResizeFrame = M4OSA_NULL;
+ pC->uiEncVideoBitrate = 0;
+ pC->encoderState = M4MCS_kNoEncoder;
+
+ /**
+ * Audio decoder stuff */
+ pC->pAudioDecCtxt = M4OSA_NULL;
+ pC->AudioDecBufferIn.m_dataAddress = M4OSA_NULL;
+ pC->AudioDecBufferIn.m_bufferSize = 0;
+ pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+ pC->AudioDecBufferOut.m_bufferSize = 0;
+ pC->pPosInDecBufferOut = M4OSA_NULL;
+ /**
+ * Ssrc stuff */
+ pC->pSsrcBufferIn = M4OSA_NULL;
+ pC->pSsrcBufferOut = M4OSA_NULL;
+ pC->pPosInSsrcBufferIn = M4OSA_NULL;
+ pC->pPosInSsrcBufferOut = M4OSA_NULL;
+ pC->iSsrcNbSamplIn = 0;
+ pC->iSsrcNbSamplOut = 0;
+ pC->SsrcScratch = M4OSA_NULL;
+ pC->pLVAudioResampler = M4OSA_NULL;
+ /**
+ * Audio encoder */
+ pC->pAudioEncCtxt = M4OSA_NULL;
+ pC->pAudioEncDSI.infoSize = 0;
+ pC->pAudioEncDSI.pInfo = M4OSA_NULL;
+ pC->pAudioEncoderBuffer = M4OSA_NULL;
+ pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+ pC->audioEncoderGranularity = 0;
+
+ /**
+ * Writer stuff */
+ pC->pOutputFile = M4OSA_NULL;
+ pC->pTemporaryFile = M4OSA_NULL;
+ pC->pWriterContext = M4OSA_NULL;
+ pC->uiVideoAUCount = 0;
+ pC->uiVideoMaxAuSize = 0;
+ pC->uiVideoMaxChunckSize = 0;
+ pC->uiAudioAUCount = 0;
+ pC->uiAudioMaxAuSize = 0;
+
+ pC->uiAudioCts = 0;
+ pC->b_isRawWriter = M4OSA_FALSE;
+ pC->pOutputPCMfile = M4OSA_NULL;
+
+ /* Encoding config */
+ pC->EncodingVideoFormat = M4ENCODER_kNULL; /**< No format set yet */
+ pC->EncodingWidth = 0; /**< No size set yet */
+ pC->EncodingHeight = 0; /**< No size set yet */
+ pC->EncodingVideoFramerate = 0; /**< No framerate set yet */
+
+ pC->uiBeginCutTime = 0; /**< No begin cut */
+ pC->uiEndCutTime = 0; /**< No end cut */
+ pC->uiMaxFileSize = 0; /**< No limit */
+ pC->uiAudioBitrate =
+ M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
+ pC->uiVideoBitrate =
+ M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
+
+ pC->WriterVideoStream.streamType = M4SYS_kVideoUnknown;
+ pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
+ pC->WriterAudioStream.streamType = M4SYS_kAudioUnknown;
+
+ pC->outputVideoTimescale = 0;
+
+ /*FB 2008/10/20: add media rendering parameter and AIR context to keep media aspect ratio*/
+ pC->MediaRendering = M4MCS_kResizing;
+ pC->m_air_context = M4OSA_NULL;
+ /**/
+
+ /**
+ * FlB 2009.03.04: add audio Effects*/
+ pC->pEffects = M4OSA_NULL;
+ pC->nbEffects = 0;
+ pC->pActiveEffectNumber = -1;
+ /**/
+
+ /*
+ * Reset pointers for media and codecs interfaces */
+ err = M4MCS_clearInterfaceTables(pC);
+ M4ERR_CHECK_RETURN(err);
+
+ /*
+ * Call the media and codecs subscription module */
+ err = M4MCS_subscribeMediaAndCodec(pC);
+ M4ERR_CHECK_RETURN(err);
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+ /**
+ * Initialize the Still picture part of MCS*/
+
+ err = M4MCS_stillPicInit(pC, pFileReadPtrFct, pFileWritePtrFct);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->m_bIsStillPicture = M4OSA_FALSE;
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ pC->m_pInstance = M4OSA_NULL;
+ pC->H264MCSTempBuffer = M4OSA_NULL;
+ pC->H264MCSTempBufferSize = 0;
+ pC->H264MCSTempBufferDataSize = 0;
+ pC->bH264Trim = M4OSA_FALSE;
+
+ /* Flag to get the last decoded frame cts */
+ pC->bLastDecodedFrameCTS = M4OSA_FALSE;
+
+ if( pC->m_pInstance == M4OSA_NULL )
+ {
+ err = H264MCS_Getinstance(&pC->m_pInstance);
+ }
+ pC->bExtOMXAudDecoder = M4OSA_FALSE;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_init(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ * M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+ * @brief Set the MCS input and output files.
+ * @note It opens the input file, but the output file is not created yet.
+ * @param pContext (IN) MCS context
+ * @param pFileIn (IN) Input file to transcode (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param mediaType (IN) Container type (.3gp,.amr,mp3 ...) of input file.
+ * @param pFileOut (IN) Output file to create (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param pTempFile (IN) Temporary file for the constant memory writer to
+ * store metadata ("moov.bin").
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return M4ERR_FILE_NOT_FOUND: The input file has not been found
+ * @return M4MCS_ERR_INVALID_INPUT_FILE: The input file is not a valid file, or is corrupted
+ * @return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM: The input file contains no
+ * supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open( M4MCS_Context pContext, M4OSA_Void *pFileIn,
+ M4VIDEOEDITING_FileType InputFileType, M4OSA_Void *pFileOut,
+ M4OSA_Void *pTempFile )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4OSA_ERR err;
+
+ M4READER_MediaFamily mediaFamily;
+ M4_StreamHandler *pStreamHandler;
+
+ M4OSA_TRACE2_3(
+ "M4MCS_open called with pContext=0x%x, pFileIn=0x%x, pFileOut=0x%x",
+ pContext, pFileIn, pFileOut);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_open: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn), M4ERR_PARAMETER,
+ "M4MCS_open: pFileIn is M4OSA_NULL");
+
+ if( ( InputFileType == M4VIDEOEDITING_kFileType_JPG)
+ || (InputFileType == M4VIDEOEDITING_kFileType_PNG)
+ || (InputFileType == M4VIDEOEDITING_kFileType_GIF)
+ || (InputFileType == M4VIDEOEDITING_kFileType_BMP) )
+ {
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+ /**
+ * Indicate that we must use the still picture functions*/
+
+ pC->m_bIsStillPicture = M4OSA_TRUE;
+
+ /**
+ * Call the still picture MCS functions*/
+ return M4MCS_stillPicOpen(pC, pFileIn, InputFileType, pFileOut);
+
+#else
+
+ M4OSA_TRACE1_0(
+ "M4MCS_open: Still picture is not supported with this version of MCS");
+ return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ }
+
+ /**
+ * Check state automaton */
+ if( M4MCS_kState_CREATED != pC->State )
+ {
+ M4OSA_TRACE1_1("M4MCS_open(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Copy function input parameters into our context */
+ pC->pInputFile = pFileIn;
+ pC->InputFileType = InputFileType;
+ pC->pOutputFile = pFileOut;
+ pC->pTemporaryFile = pTempFile;
+ pC->uiProgress = 0;
+
+ /***********************************/
+ /* Open input file with the reader */
+ /***********************************/
+
+ err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Reset reader related variables */
+ pC->VideoState = M4MCS_kStreamState_NOSTREAM;
+ pC->AudioState = M4MCS_kStreamState_NOSTREAM;
+ pC->pReaderVideoStream = M4OSA_NULL;
+ pC->pReaderAudioStream = M4OSA_NULL;
+
+ /*******************************************************/
+ /* Initializes the reader shell and open the data file */
+ /*******************************************************/
+ err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctCreate returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Link the reader interface to the reader context */
+ pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
+
+ /**
+ * Set the reader shell file access functions */
+ err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+ M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+ (M4OSA_DataOption)pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctSetOption returns 0x%x",
+ err);
+ return err;
+ }
+
+#ifdef M4MCS_WITH_FAST_OPEN
+
+ if( M4OSA_FALSE == pC->bFileOpenedInFastMode )
+ {
+ M4OSA_Bool trueValue = M4OSA_TRUE;
+
+ /* For first call use fast open mode */
+ err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+ M4READER_3GP_kOptionID_FastOpenMode, &trueValue);
+
+ if( M4NO_ERROR == err )
+ {
+ pC->bFileOpenedInFastMode = M4OSA_TRUE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): M4READER_3GP_kOptionID_FastOpenMode returns 0x%x",
+ err);
+
+ if( ( ( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID) == err)
+ || (( (M4OSA_UInt32)M4ERR_PARAMETER) == err) )
+ {
+ /* Not fatal, some readers may not support fast open mode */
+ pC->bFileOpenedInFastMode = M4OSA_FALSE;
+ }
+ else
+ return err;
+ }
+ }
+ else
+ {
+ M4OSA_Bool falseValue = M4OSA_FALSE;
+
+ /* For second call use normal open mode */
+ err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+ M4READER_3GP_kOptionID_FastOpenMode, &falseValue);
+ }
+
+#endif /* M4MCS_WITH_FAST_OPEN */
+
+ /**
+ * Open the input file */
+
+ err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_UInt32 uiDummy, uiCoreId;
+ M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+ /**
+ * If the error is from the core reader, we change it to a public VXS error */
+ M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+
+ if( M4MP4_READER == uiCoreId )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_open(): returning M4MCS_ERR_INVALID_INPUT_FILE");
+ return M4MCS_ERR_INVALID_INPUT_FILE;
+ }
+ return err;
+ }
+
+ /**
+ * Get the streams from the input file */
+ while( M4NO_ERROR == err )
+ {
+ err =
+ pC->m_pReader->m_pFctGetNextStream( pC->pReaderContext,
+ &mediaFamily,
+ &pStreamHandler);
+
+ /**
+ * In case we found a BIFS stream or something else...*/
+ if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+ || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
+ {
+ err = M4NO_ERROR;
+ continue;
+ }
+
+ if( M4NO_ERROR == err ) /**< One stream found */
+ {
+ /**
+ * Found the first video stream */
+ if( ( M4READER_kMediaFamilyVideo == mediaFamily)
+ && (M4OSA_NULL == pC->pReaderVideoStream) )
+ {
+ if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeVideoMpeg4
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeVideoMpeg4Avc
+ == pStreamHandler->m_streamType) )
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_open(): Found a H263 or MPEG-4 video stream in input 3gpp clip");
+
+ /**
+ * Keep pointer to the video stream */
+ pC->pReaderVideoStream =
+ (M4_VideoStreamHandler *)pStreamHandler;
+ pC->bUnsupportedVideoFound = M4OSA_FALSE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+ /**
+ * Init our video stream state variable */
+ pC->VideoState = M4MCS_kStreamState_STARTED;
+
+ /**
+ * Reset the stream reader */
+ err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open():\
+ m_pReader->m_pFctReset(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ pStreamHandler, &pC->ReaderVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open():\
+ m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else /**< Not H263 or MPEG-4 (H264, etc.) */
+ {
+ M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported video stream (0x%x) in\
+ input 3gpp clip",
+ pStreamHandler->m_streamType);
+
+ pC->bUnsupportedVideoFound = M4OSA_TRUE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ /* +CRLV6775 -H.264 Trimming */
+ if( M4DA_StreamTypeVideoMpeg4Avc
+ == pStreamHandler->m_streamType )
+ {
+
+ // SPS and PPS are storead as per the 3gp file format
+ pC->m_pInstance->m_pDecoderSpecificInfo =
+ pStreamHandler->m_pH264DecoderSpecificInfo;
+ pC->m_pInstance->m_decoderSpecificInfoSize =
+ pStreamHandler->m_H264decoderSpecificInfoSize;
+ }
+ /* -CRLV6775 -H.264 Trimming */
+ }
+ /**
+ * Found the first audio stream */
+ else if( ( M4READER_kMediaFamilyAudio == mediaFamily)
+ && (M4OSA_NULL == pC->pReaderAudioStream) )
+ {
+ if( ( M4DA_StreamTypeAudioAmrNarrowBand
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioMp3
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioEvrc
+ == pStreamHandler->m_streamType) )
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_open(): Found an AMR-NB, AAC or MP3 audio stream in input clip");
+
+ /**
+ * Keep pointer to the audio stream */
+ pC->pReaderAudioStream =
+ (M4_AudioStreamHandler *)pStreamHandler;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+ pC->bUnsupportedAudioFound = M4OSA_FALSE;
+
+ /**
+ * Init our audio stream state variable */
+ pC->AudioState = M4MCS_kStreamState_STARTED;
+
+ /**
+ * Reset the stream reader */
+ err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open():\
+ m_pReader->m_pFctReset(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ pStreamHandler, &pC->ReaderAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open():\
+ m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Output max AU size is equal to input max AU size (this value
+ * will be changed if there is audio transcoding) */
+ pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
+ }
+ else
+ {
+ /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
+ M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported audio stream (0x%x) in \
+ input 3gpp clip", pStreamHandler->m_streamType);
+
+ pC->bUnsupportedAudioFound = M4OSA_TRUE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ }
+ } /**< end of while (M4NO_ERROR == err) */
+
+ /**
+ * Check we found at least one supported stream */
+ if( ( M4OSA_NULL == pC->pReaderVideoStream)
+ && (M4OSA_NULL == pC->pReaderAudioStream) )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_open(): returning M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
+ return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+ }
+
+ if( pC->VideoState == M4MCS_kStreamState_STARTED )
+ {
+ err = M4MCS_setCurrentVideoDecoder(pContext,
+ pC->pReaderVideoStream->m_basicProperties.m_streamType);
+ /*FB 2009-02-09: the error is check and returned only if video codecs are compiled,
+ else only audio is used, that is why the editing process can continue*/
+#ifndef M4MCS_AUDIOONLY
+
+ M4ERR_CHECK_RETURN(err);
+
+#else
+
+ if( ( M4NO_ERROR != err) && (M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED != err) )
+ {
+ M4ERR_CHECK_RETURN(err);
+ }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+ }
+
+ if( pC->AudioState == M4MCS_kStreamState_STARTED )
+ {
+ //EVRC
+ if( M4DA_StreamTypeAudioEvrc
+ != pStreamHandler->
+ m_streamType ) /* decoder not supported yet, but allow to do null encoding */
+ {
+ err = M4MCS_setCurrentAudioDecoder(pContext,
+ pC->pReaderAudioStream->m_basicProperties.m_streamType);
+ M4ERR_CHECK_RETURN(err);
+ }
+ }
+
+ /**
+ * Get the audio and video stream properties */
+ err = M4MCS_intGetInputClipProperties(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): M4MCS_intGetInputClipProperties returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set the begin cut decoding increment according to the input frame rate */
+ if( 0. != pC->InputFileProperties.fAverageFrameRate ) /**< sanity check */
+ {
+ pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000.
+ / pC->InputFileProperties.
+ fAverageFrameRate); /**< about 3 frames */
+ }
+ else
+ {
+ pC->iVideoBeginDecIncr =
+ 200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
+ }
+
+ /**
+ * Update state automaton */
+ pC->State = M4MCS_kState_OPENED;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_open(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+ * @brief Perform one step of trancoding.
+ * @note
+ * @param pContext (IN) MCS context
+ * @param pProgress (OUT) Progress percentage (0 to 100) of the transcoding
+ * @note pProgress must be a valid address.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: One of the parameters is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
+ * @return M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB or MP3) failed
+ * @return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
+ * with an invalid sampling frequency (should never happen)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_step( M4MCS_Context pContext, M4OSA_UInt8 *pProgress )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ M4OSA_TRACE3_1("M4MCS_step called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_step: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
+ "M4MCS_step: pProgress is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the still picture MCS functions*/
+ return M4MCS_stillPicStep(pC, pProgress);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ switch( pC->State )
+ {
+ case M4MCS_kState_READY:
+ *pProgress = 0;
+ return M4MCS_intStepSet(pC);
+ break;
+
+ case M4MCS_kState_BEGINVIDEOJUMP:
+ *pProgress = pC->uiProgress;
+ return M4MCS_intStepBeginVideoJump(pC);
+ break;
+
+ case M4MCS_kState_BEGINVIDEODECODE:
+ *pProgress = pC->uiProgress;
+ return M4MCS_intStepBeginVideoDecode(pC);
+ break;
+
+ case M4MCS_kState_PROCESSING:
+ {
+ M4OSA_ERR err = M4NO_ERROR;
+ err = M4MCS_intStepEncoding(pC, pProgress);
+ /* Save progress info in case of pause */
+ pC->uiProgress = *pProgress;
+ return err;
+ }
+ break;
+
+ default: /**< State error */
+ M4OSA_TRACE1_1(
+ "M4MCS_step(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+ * @brief Pause the transcoding i.e. release the (external hardware) video decoder.
+ * @note This function is not needed if no hardware accelerators are used.
+ * In that case, pausing the MCS is simply achieved by temporarily suspending
+ * the M4MCS_step function calls.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_pause( M4MCS_Context pContext )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_1("M4MCS_pause called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_pause: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicPause(pC);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ switch( pC->State )
+ {
+ case M4MCS_kState_BEGINVIDEOJUMP: /**< the video decoder has been created,
+ we must destroy it */
+ case M4MCS_kState_BEGINVIDEODECODE: /**< the video is being used, we must destroy it */
+ case M4MCS_kState_PROCESSING: /**< the video is being used, we must destroy it */
+ /**< OK, nothing to do here */
+ break;
+
+ default: /**< State error */
+ M4OSA_TRACE1_1(
+ "M4MCS_pause(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * Set the CTS at which we will resume the decoding */
+ if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+ {
+ /**
+ * We passed the starting CTS, so the resume target is the current CTS */
+ pC->dViDecStartingCts = pC->dViDecCurrentCts;
+ }
+ else {
+ /**
+ * We haven't passed the starting CTS yet, so the resume target is still the starting CTS
+ * --> nothing to do in the else block */
+ }
+
+ /**
+ * Free video decoder stuff */
+ if( M4OSA_NULL != pC->pViDecCtxt )
+ {
+ err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
+ pC->pViDecCtxt = M4OSA_NULL;
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_pause: m_pVideoDecoder->pFctDestroy returns 0x%x", err);
+ return err;
+ }
+ }
+
+ /**
+ * State transition */
+ pC->State = M4MCS_kState_PAUSED;
+
+ M4OSA_TRACE3_0("M4MCS_pause(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+ * @brief Resume the transcoding after a pause (see M4MCS_pause).
+ * @note This function is not needed if no hardware accelerators are used.
+ * In that case, resuming the MCS is simply achieved by calling
+ * the M4MCS_step function.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_resume( M4MCS_Context pContext )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_1("M4MCS_resume called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_resume: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicResume(pC);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ switch( pC->State )
+ {
+ case M4MCS_kState_PAUSED: /**< OK, nothing to do here */
+ break;
+
+ default: /**< State error */
+ M4OSA_TRACE1_1(
+ "M4MCS_resume(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ break;
+ }
+
+ /**
+ * Prepare the video decoder */
+ err = M4MCS_intPrepareVideoDecoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_resume(): M4MCS_intPrepareVideoDecoder() returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * State transition */
+ if( 0.0 == pC->dViDecStartingCts )
+ {
+ /**
+ * We are still at the beginning of the decoded stream, no need to jump, we can proceed */
+ pC->State = M4MCS_kState_PROCESSING;
+ }
+ else
+ {
+ /**
+ * Jumping */
+ pC->State = M4MCS_kState_BEGINVIDEOJUMP;
+ }
+
+ M4OSA_TRACE3_0("M4MCS_resume(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+ * @brief Finish the MCS transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_close( M4MCS_Context pContext )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4ENCODER_Header *encHeader;
+ M4SYS_StreamIDmemAddr streamHeader;
+
+ M4OSA_ERR err = M4NO_ERROR, err2;
+
+ M4OSA_TRACE2_1("M4MCS_close called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_close: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Indicate that current file is no longer a still picture*/
+ pC->m_bIsStillPicture = M4OSA_FALSE;
+
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicClose(pC);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ if( M4MCS_kState_FINISHED != pC->State )
+ {
+ M4OSA_TRACE1_1("M4MCS_close(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Close the encoder before the writer to be certain all the AUs have been written and we can
+ get the DSI. */
+
+ /* Has the encoder actually been started? Don't stop it if that's not the case. */
+ if( M4MCS_kEncoderRunning == pC->encoderState )
+ {
+ if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+ err);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+ }
+
+ pC->encoderState = M4MCS_kEncoderStopped;
+ }
+
+ /* Has the encoder actually been opened? Don't close it if that's not the case. */
+ if( M4MCS_kEncoderStopped == pC->encoderState )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+
+ pC->encoderState = M4MCS_kEncoderClosed;
+ }
+
+ /**********************************/
+ /******** Close the writer ********/
+ /**********************************/
+ if( M4OSA_NULL != pC->pWriterContext ) /* happens in state _SET */
+ {
+ /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
+ closing it. */
+
+ if( pC->novideo != M4OSA_TRUE )
+ {
+ if( ( M4ENCODER_kMPEG4 == pC->EncodingVideoFormat)
+ || (M4ENCODER_kH264 == pC->EncodingVideoFormat) )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
+ M4ENCODER_kOptionID_EncoderHeader,
+ (M4OSA_DataOption) &encHeader);
+
+ if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: failed to get the encoder header (err 0x%x)",
+ err);
+ /**< no return here, we still have stuff to deallocate after close, even
+ if it fails. */
+ }
+ else
+ {
+ /* set this header in the writer */
+ streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+ streamHeader.size = encHeader->Size;
+ streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
+ }
+
+ M4OSA_TRACE1_0("calling set option");
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ M4WRITER_kDSI, &streamHeader);
+ M4OSA_TRACE1_0("set option done");
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
+ err);
+ }
+ }
+
+ if( ( M4OSA_TRUE == pC->bH264Trim)
+ && (M4ENCODER_kNULL == pC->EncodingVideoFormat) )
+ {
+ if(pC->uiBeginCutTime == 0)
+ {
+ M4OSA_TRACE1_1("Decoder specific info size = %d",
+ pC->m_pInstance->m_decoderSpecificInfoSize);
+ pC->m_pInstance->m_pFinalDSISize =
+ pC->m_pInstance->m_decoderSpecificInfoSize;
+ M4OSA_TRACE1_1("Decoder specific info pointer = %d",
+ (M4OSA_MemAddr8)pC->m_pInstance->m_pDecoderSpecificInfo);
+
+ pC->m_pInstance->m_pFinalDSI =
+ (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(pC->m_pInstance-> \
+ m_decoderSpecificInfoSize, M4MCS,
+ (M4OSA_Char *)"instance->m_pFinalDSI");
+
+ if( pC->m_pInstance->m_pFinalDSI == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pC->m_pInstance->m_pFinalDSI,
+ (void *)pC-> \
+ m_pInstance->m_pDecoderSpecificInfo,
+ pC->m_pInstance->m_decoderSpecificInfoSize);
+ }
+ streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+ streamHeader.size = pC->m_pInstance->m_pFinalDSISize;
+ streamHeader.addr =
+ (M4OSA_MemAddr32)pC->m_pInstance->m_pFinalDSI;
+ M4OSA_TRACE1_0("calling set option");
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ M4WRITER_kDSI, &streamHeader);
+ M4OSA_TRACE1_0("set option done");
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
+ err);
+ }
+ }
+ }
+ /* Write and close the 3GP output file */
+ err2 = pC->pWriterGlobalFcts->pFctCloseWrite(pC->pWriterContext);
+ pC->pWriterContext = M4OSA_NULL;
+
+ if( M4NO_ERROR != err2 )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: pWriterGlobalFcts->pFctCloseWrite returns 0x%x",
+ err2);
+
+ if( M4NO_ERROR == err )
+ err = err2;
+ /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
+ }
+ }
+
+ /* Close output PCM file if needed */
+ if( pC->pOutputPCMfile != M4OSA_NULL )
+ {
+ pC->pOsaFileWritPtr->closeWrite(pC->pOutputPCMfile);
+ pC->pOutputPCMfile = M4OSA_NULL;
+ }
+
+ /*FlB 2009.03.04: add audio effects,
+ free effects list*/
+ if( M4OSA_NULL != pC->pEffects )
+ {
+ free(pC->pEffects);
+ pC->pEffects = M4OSA_NULL;
+ }
+ pC->nbEffects = 0;
+ pC->pActiveEffectNumber = -1;
+
+ /**
+ * State transition */
+ pC->State = M4MCS_kState_CLOSED;
+
+ if( M4OSA_NULL != pC->H264MCSTempBuffer )
+ {
+ free(pC->H264MCSTempBuffer);
+ }
+
+ M4OSA_TRACE3_0("M4MCS_close(): returning M4NO_ERROR");
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+ * @brief Free all resources used by the MCS.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_cleanUp( M4MCS_Context pContext )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ M4OSA_TRACE3_1("M4MCS_cleanUp called with pContext=0x%x", pContext);
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+ if( file_au_reader )
+ {
+ fclose(file_au_reader);
+ file_au_reader = NULL;
+ }
+
+ if( file_pcm_decoder )
+ {
+ fclose(file_pcm_decoder);
+ file_pcm_decoder = NULL;
+ }
+
+ if( file_pcm_encoder )
+ {
+ fclose(file_pcm_encoder);
+ file_pcm_encoder = NULL;
+ }
+
+#endif
+
+ /**
+ * Check input parameter */
+
+ if( M4OSA_NULL == pContext )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_cleanUp: pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Check state automaton */
+ if( M4MCS_kState_CLOSED != pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ if( M4OSA_NULL != pC->m_pInstance )
+ {
+ err = H264MCS_Freeinstance(pC->m_pInstance);
+ pC->m_pInstance = M4OSA_NULL;
+ }
+
+ /* ----- Free video encoder stuff, if needed ----- */
+
+ if( ( M4OSA_NULL != pC->pViEncCtxt)
+ && (M4OSA_NULL != pC->pVideoEncoderGlobalFcts) )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctCleanup(pC->pViEncCtxt);
+ pC->pViEncCtxt = M4OSA_NULL;
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pC->encoderState = M4MCS_kNoEncoder;
+ }
+
+ /**
+ * In the H263 case, we allocated our own DSI buffer */
+ if( ( M4ENCODER_kH263 == pC->EncodingVideoFormat)
+ && (M4OSA_NULL != pC->WriterVideoStreamInfo.Header.pBuf) )
+ {
+ free(pC->WriterVideoStreamInfo.Header.pBuf);
+ pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pPreResizeFrame )
+ {
+ if( M4OSA_NULL != pC->pPreResizeFrame[0].pac_data )
+ {
+ free(pC->pPreResizeFrame[0].pac_data);
+ pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pPreResizeFrame[1].pac_data )
+ {
+ free(pC->pPreResizeFrame[1].pac_data);
+ pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pPreResizeFrame[2].pac_data )
+ {
+ free(pC->pPreResizeFrame[2].pac_data);
+ pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
+ }
+ free(pC->pPreResizeFrame);
+ pC->pPreResizeFrame = M4OSA_NULL;
+ }
+
+ /* ----- Free the ssrc stuff ----- */
+
+ if( M4OSA_NULL != pC->SsrcScratch )
+ {
+ free(pC->SsrcScratch);
+ pC->SsrcScratch = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pSsrcBufferIn )
+ {
+ free(pC->pSsrcBufferIn);
+ pC->pSsrcBufferIn = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pSsrcBufferOut )
+ {
+ free(pC->pSsrcBufferOut);
+ pC->pSsrcBufferOut = M4OSA_NULL;
+ }
+
+ if (pC->pLVAudioResampler != M4OSA_NULL)
+ {
+ LVDestroy(pC->pLVAudioResampler);
+ pC->pLVAudioResampler = M4OSA_NULL;
+ }
+
+ /* ----- Free the audio encoder stuff ----- */
+
+ if( M4OSA_NULL != pC->pAudioEncCtxt )
+ {
+ err = pC->pAudioEncoderGlobalFcts->pFctClose(pC->pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ err = pC->pAudioEncoderGlobalFcts->pFctCleanUp(pC->pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pC->pAudioEncCtxt = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pAudioEncoderBuffer )
+ {
+ free(pC->pAudioEncoderBuffer);
+ pC->pAudioEncoderBuffer = M4OSA_NULL;
+ }
+
+ /* ----- Free all other stuff ----- */
+
+ /**
+ * Free the readers and the decoders */
+ M4MCS_intCleanUp_ReadersDecoders(pC);
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+ /**
+ * Free the still picture resources */
+
+ M4MCS_stillPicCleanUp(pC);
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Free the shells interfaces */
+
+ M4MCS_unRegisterAllWriters(pContext);
+ M4MCS_unRegisterAllEncoders(pContext);
+ M4MCS_unRegisterAllReaders(pContext);
+ M4MCS_unRegisterAllDecoders(pContext);
+
+ /**
+ * Free the context itself */
+ free(pC);
+ pC = M4OSA_NULL;
+
+ M4OSA_TRACE3_0("M4MCS_cleanUp(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+ * @brief Finish the MCS transcoding and free all resources used by the MCS
+ * whatever the state is.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_abort( M4MCS_Context pContext )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_ERR err1 = M4NO_ERROR;
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ if( M4OSA_NULL == pContext )
+ {
+ return M4NO_ERROR;
+ }
+
+ if( ( pC->State == M4MCS_kState_CREATED)
+ || (pC->State == M4MCS_kState_CLOSED) )
+ {
+ pC->State = M4MCS_kState_CLOSED;
+
+ err = M4MCS_cleanUp(pContext);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
+ }
+ }
+ else
+ {
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Cancel the ongoing processes if any*/
+ err = M4MCS_stillPicCancel(pC);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_abort : M4MCS_stillPicCancel fails err = 0x%x", err);
+ }
+ /*Still picture process is now stopped; Carry on with close and cleanup*/
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ pC->State = M4MCS_kState_FINISHED;
+
+ err = M4MCS_close(pContext);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4MCS_abort : M4MCS_close fails err = 0x%x", err);
+ err1 = err;
+ }
+
+ err = M4MCS_cleanUp(pContext);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
+ }
+ }
+ err = (err1 == M4NO_ERROR) ? err : err1;
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+ * M4VIDEOEDITING_ClipProperties* pFileProperties);
+ * @brief Retrieves the properties of the audio and video streams from the input file.
+ * @param pContext (IN) MCS context
+ * @param pProperties (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
+structure which is filled with the input stream properties.
+ * @note The structure pProperties must be allocated and further de-allocated
+by the application. The function must be called in the opened state.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getInputFileProperties( M4MCS_Context pContext,
+ M4VIDEOEDITING_ClipProperties *pFileProperties )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ M4OSA_TRACE2_2("M4MCS_getInputFileProperties called with pContext=0x%x, \
+ pFileProperties=0x%x", pContext, pFileProperties);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_getInputFileProperties: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileProperties), M4ERR_PARAMETER,
+ "M4MCS_getInputFileProperties: pProperties is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicGetInputFileProperties(pC, pFileProperties);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ if( M4MCS_kState_OPENED != pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_getInputFileProperties(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * Copy previously computed properties into given structure */
+ memcpy((void *)pFileProperties,
+ (void *) &pC->InputFileProperties,
+ sizeof(M4VIDEOEDITING_ClipProperties));
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+ * @brief Set the MCS video output parameters.
+ * @note Must be called after M4MCS_open. Must be called before M4MCS_step.
+ * @param pContext (IN) MCS context
+ * @param pParams (IN/OUT) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
+ * incompatible with H263 encoding
+ * @return M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
+ * incompatible with H263 encoding
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT : Undefined output video format parameter
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
+ * @return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
+ * @return M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
+ * (no audio and video)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setOutputParams( M4MCS_Context pContext,
+ M4MCS_OutputParams *pParams )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4OSA_UInt32 uiFrameWidth;
+ M4OSA_UInt32 uiFrameHeight;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_2(
+ "M4MCS_setOutputParams called with pContext=0x%x, pParams=0x%x",
+ pContext, pParams);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_setOutputParams: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams), M4ERR_PARAMETER,
+ "M4MCS_setOutputParams: pParam is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicSetOutputParams(pC, pParams);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ if( M4MCS_kState_OPENED != pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_setOutputParams(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Ignore audio or video stream if the output do not need it, */
+ /* or if the input file does not have any audio or video stream */
+ /*FlB 26.02.2009: add mp3 as mcs output format*/
+ if( ( pParams->OutputVideoFormat == M4VIDEOEDITING_kNoneVideo)
+ || (pC->VideoState == M4MCS_kStreamState_NOSTREAM)
+ || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_AMR)
+ || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP3) )
+ {
+ pC->novideo = M4OSA_TRUE;
+ }
+
+ if( ( pParams->OutputAudioFormat == M4VIDEOEDITING_kNoneAudio)
+ || (pC->AudioState == M4MCS_kStreamState_NOSTREAM) )
+ {
+ pC->noaudio = M4OSA_TRUE;
+ }
+
+ if( pC->noaudio && pC->novideo )
+ {
+ M4OSA_TRACE1_0(
+ "!!! M4MCS_setOutputParams : clip is NULL, there is no audio, no video");
+ return M4MCS_ERR_DURATION_IS_NULL;
+ }
+
+ /* Set writer */
+ err = M4MCS_setCurrentWriter(pContext, pParams->OutputFileType);
+ M4ERR_CHECK_RETURN(err);
+
+ /* Set video parameters */
+ if( pC->novideo == M4OSA_FALSE )
+ {
+ /**
+ * Check Video Format correctness */
+
+ switch( pParams->OutputVideoFormat )
+ {
+ case M4VIDEOEDITING_kH263:
+ if( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4 )
+ return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
+
+ pC->EncodingVideoFormat = M4ENCODER_kH263;
+ err = M4MCS_setCurrentVideoEncoder(pContext,
+ pParams->OutputVideoFormat);
+ M4ERR_CHECK_RETURN(err);
+ break;
+
+ case M4VIDEOEDITING_kMPEG4:
+
+ pC->EncodingVideoFormat = M4ENCODER_kMPEG4;
+ err = M4MCS_setCurrentVideoEncoder(pContext,
+ pParams->OutputVideoFormat);
+ M4ERR_CHECK_RETURN(err);
+ break;
+
+ case M4VIDEOEDITING_kH264:
+
+ pC->EncodingVideoFormat = M4ENCODER_kH264;
+ err = M4MCS_setCurrentVideoEncoder(pContext,
+ pParams->OutputVideoFormat);
+ M4ERR_CHECK_RETURN(err);
+ break;
+
+ case M4VIDEOEDITING_kNullVideo:
+ if( ( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4)
+ && (pC->InputFileProperties.VideoStreamType
+ == M4VIDEOEDITING_kH263) )
+ return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
+
+
+ /* Encoder needed for begin cut to generate an I-frame */
+ pC->EncodingVideoFormat = M4ENCODER_kNULL;
+ err = M4MCS_setCurrentVideoEncoder(pContext,
+ pC->InputFileProperties.VideoStreamType);
+ M4ERR_CHECK_RETURN(err);
+ break;
+
+ default:
+ M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output video format (%d),\
+ returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pParams->OutputVideoFormat);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+
+ /**
+ * Check Video frame size correctness */
+ if( M4VIDEOEDITING_kNullVideo == pParams->OutputVideoFormat )
+ {
+ uiFrameWidth =
+ pC->EncodingWidth = pC->InputFileProperties.uiVideoWidth;
+ uiFrameHeight =
+ pC->EncodingHeight = pC->InputFileProperties.uiVideoHeight;
+
+ /**
+ * Set output video profile and level */
+ pC->encodingVideoProfile = pC->InputFileProperties.uiVideoProfile;
+ /** Set the target video level, because input 3gp file may
+ * have wrong video level value (some encoders do not respect
+ * level restrictions like video resolution when content is created).
+ **/
+ pC->encodingVideoLevel = pParams->outputVideoLevel;
+
+ // Clip's original width and height may not be
+ // multiple of 16.
+ // Ensure encoding width and height are multiple of 16
+
+ uint32_t remainder = pC->EncodingWidth % 16;
+ if (remainder != 0) {
+ if (remainder >= 8) {
+ // Roll forward
+ pC->EncodingWidth =
+ pC->EncodingWidth + (16-remainder);
+ } else {
+ // Roll backward
+ pC->EncodingWidth =
+ pC->EncodingWidth - remainder;
+ }
+ uiFrameWidth = pC->EncodingWidth;
+ }
+
+ remainder = pC->EncodingHeight % 16;
+ if (remainder != 0) {
+ if (remainder >= 8) {
+ // Roll forward
+ pC->EncodingHeight =
+ pC->EncodingHeight + (16-remainder);
+ } else {
+ // Roll backward
+ pC->EncodingHeight =
+ pC->EncodingHeight - remainder;
+ }
+ uiFrameHeight = pC->EncodingHeight;
+ }
+
+ }
+ else
+ {
+ /**
+ * Set output video profile and level */
+ pC->encodingVideoProfile = pParams->outputVideoProfile;
+ pC->encodingVideoLevel = pParams->outputVideoLevel;
+
+ switch( pParams->OutputVideoFrameSize )
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_SQCIF_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_SQCIF_Height;
+ break;
+
+ case M4VIDEOEDITING_kQQVGA:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_QQVGA_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_QQVGA_Height;
+ break;
+
+ case M4VIDEOEDITING_kQCIF:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_QCIF_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_QCIF_Height;
+ break;
+
+ case M4VIDEOEDITING_kQVGA:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_QVGA_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_QVGA_Height;
+ break;
+
+ case M4VIDEOEDITING_kCIF:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_CIF_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_CIF_Height;
+ break;
+
+ case M4VIDEOEDITING_kVGA:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_VGA_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_VGA_Height;
+ break;
+ /* +PR LV5807 */
+ case M4VIDEOEDITING_kWVGA:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_WVGA_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_WVGA_Height;
+ break;
+
+ case M4VIDEOEDITING_kNTSC:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_NTSC_Width;
+ uiFrameHeight = pC->EncodingHeight = M4ENCODER_NTSC_Height;
+ break;
+ /* -PR LV5807*/
+ /* +CR Google */
+ case M4VIDEOEDITING_k640_360:
+ uiFrameWidth = pC->EncodingWidth = M4ENCODER_640_360_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_640_360_Height;
+ break;
+
+ case M4VIDEOEDITING_k854_480:
+ uiFrameWidth =
+ pC->EncodingWidth = M4ENCODER_854_480_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_854_480_Height;
+ break;
+
+ case M4VIDEOEDITING_k1280_720:
+ uiFrameWidth =
+ pC->EncodingWidth = M4ENCODER_1280_720_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_1280_720_Height;
+ break;
+
+ case M4VIDEOEDITING_k1080_720:
+ uiFrameWidth =
+ pC->EncodingWidth = M4ENCODER_1080_720_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_1080_720_Height;
+ break;
+
+ case M4VIDEOEDITING_k960_720:
+ uiFrameWidth =
+ pC->EncodingWidth = M4ENCODER_960_720_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_960_720_Height;
+ break;
+
+ case M4VIDEOEDITING_k1920_1080:
+ uiFrameWidth =
+ pC->EncodingWidth = M4ENCODER_1920_1080_Width;
+ uiFrameHeight =
+ pC->EncodingHeight = M4ENCODER_1920_1080_Height;
+ break;
+ /* -CR Google */
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_setOutputParams: Undefined output video frame size \
+ (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
+ pParams->OutputVideoFrameSize);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+ }
+ }
+
+ /**
+ * Compute video max au size and max chunck size.
+ * We do it here because it depends on the frame size only, and
+ * because we need it for the file size/video bitrate estimations */
+ pC->uiVideoMaxAuSize =
+ (M4OSA_UInt32)(1.5F *(M4OSA_Float)(uiFrameWidth * uiFrameHeight) \
+ *M4MCS_VIDEO_MIN_COMPRESSION_RATIO);
+ pC->uiVideoMaxChunckSize = (M4OSA_UInt32)(pC->uiVideoMaxAuSize \
+ *
+ M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO); /**< from max AU size to max Chunck size */
+
+ if( 0 == pC->uiVideoMaxAuSize )
+ {
+ /* Size may be zero in case of null encoding with unrecognized stream */
+ M4OSA_TRACE1_0("M4MCS_setOutputParams: video frame size is 0 returning\
+ M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE");
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+ }
+
+
+ /**
+ * Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
+
+ if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
+ {
+ switch( pParams->OutputVideoFrameSize )
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ case M4VIDEOEDITING_kQCIF:
+ case M4VIDEOEDITING_kCIF:
+ /* OK */
+ break;
+
+ default:
+ M4OSA_TRACE1_0(
+ "M4MCS_setOutputParams():\
+ returning M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
+ return M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
+ }
+ }
+
+ /**
+ * Check Video Frame rate correctness */
+ if( M4VIDEOEDITING_kNullVideo != pParams->OutputVideoFormat )
+ {
+ switch( pParams->OutputVideoFrameRate )
+ {
+ case M4VIDEOEDITING_k5_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k5_FPS;
+ break;
+
+ case M4VIDEOEDITING_k7_5_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k7_5_FPS;
+ break;
+
+ case M4VIDEOEDITING_k10_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k10_FPS;
+ break;
+
+ case M4VIDEOEDITING_k12_5_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k12_5_FPS;
+ break;
+
+ case M4VIDEOEDITING_k15_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k15_FPS;
+ break;
+
+ case M4VIDEOEDITING_k20_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k20_FPS;
+ break;
+
+ case M4VIDEOEDITING_k25_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k25_FPS;
+ break;
+
+ case M4VIDEOEDITING_k30_FPS:
+ pC->EncodingVideoFramerate = M4ENCODER_k30_FPS;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_setOutputParams: Undefined output video frame rate\
+ (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
+ pParams->OutputVideoFrameRate);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
+ }
+ }
+
+ /**
+ * Frame rate check for H263 (only dividers of 30 fps (29.97 actually)) */
+ if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
+ {
+ switch( pC->EncodingVideoFramerate )
+ {
+ case M4ENCODER_k5_FPS:
+ case M4ENCODER_k7_5_FPS:
+ case M4ENCODER_k10_FPS:
+ case M4ENCODER_k15_FPS:
+ case M4ENCODER_k30_FPS:
+ /* OK */
+ break;
+
+ default:
+ M4OSA_TRACE1_0(
+ "M4MCS_setOutputParams():\
+ returning M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263");
+ return M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263;
+ }
+ }
+ }
+
+ /* Set audio parameters */
+ if( pC->noaudio == M4OSA_FALSE )
+ {
+ /**
+ * Check Audio Format correctness */
+ switch( pParams->OutputAudioFormat )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pParams->OutputAudioFormat);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+ pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+ pC->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pParams->OutputAudioFormat);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kAAC;
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+ switch( pParams->OutputAudioSamplingFrequency )
+ {
+ case M4VIDEOEDITING_k8000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ break;
+
+ case M4VIDEOEDITING_k16000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+ break;
+
+ case M4VIDEOEDITING_k22050_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+ break;
+
+ case M4VIDEOEDITING_k24000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+ break;
+
+ case M4VIDEOEDITING_k32000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+ break;
+
+ case M4VIDEOEDITING_k44100_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+ break;
+
+ case M4VIDEOEDITING_k48000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+ break;
+
+ case M4VIDEOEDITING_k11025_ASF:
+ case M4VIDEOEDITING_k12000_ASF:
+ case M4VIDEOEDITING_kDefault_ASF:
+ break;
+ }
+ pC->AudioEncParams.ChannelNum =
+ (pParams->bAudioMono == M4OSA_TRUE) ? \
+ M4ENCODER_kMono : M4ENCODER_kStereo;
+ pC->AudioEncParams.SpecifParam.AacParam.Regulation =
+ M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+ /* unused */
+ pC->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
+ /* TODO change into highspeed asap */
+ pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+ M4OSA_FALSE;
+ break;
+
+ /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
+ case M4VIDEOEDITING_kMP3:
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pParams->OutputAudioFormat);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kMP3;
+ pC->AudioEncParams.ChannelNum =
+ (pParams->bAudioMono == M4OSA_TRUE) ? \
+ M4ENCODER_kMono : M4ENCODER_kStereo;
+
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+ switch( pParams->OutputAudioSamplingFrequency )
+ {
+ case M4VIDEOEDITING_k8000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ break;
+
+ case M4VIDEOEDITING_k11025_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k11025Hz;
+ break;
+
+ case M4VIDEOEDITING_k12000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k12000Hz;
+ break;
+
+ case M4VIDEOEDITING_k16000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+ break;
+
+ case M4VIDEOEDITING_k22050_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+ break;
+
+ case M4VIDEOEDITING_k24000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+ break;
+
+ case M4VIDEOEDITING_k32000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+ break;
+
+ case M4VIDEOEDITING_k44100_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+ break;
+
+ case M4VIDEOEDITING_k48000_ASF:
+ pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+ break;
+
+ case M4VIDEOEDITING_kDefault_ASF:
+ break;
+ }
+
+ break;
+
+ case M4VIDEOEDITING_kNullAudio:
+ if( pParams->pEffects == M4OSA_NULL || pParams->nbEffects == 0 )
+ {
+ /* no encoder needed */
+ pC->AudioEncParams.Format = M4ENCODER_kAudioNULL;
+ pC->AudioEncParams.Frequency =
+ pC->pReaderAudioStream->m_samplingFrequency;
+ pC->AudioEncParams.ChannelNum =
+ (pC->pReaderAudioStream->m_nbChannels == 1) ? \
+ M4ENCODER_kMono : M4ENCODER_kStereo;
+ }
+ else
+ {
+ pC->AudioEncParams.Frequency =
+ pC->pReaderAudioStream->m_samplingFrequency;
+ pC->AudioEncParams.ChannelNum =
+ (pC->pReaderAudioStream->m_nbChannels == 1) ? \
+ M4ENCODER_kMono : M4ENCODER_kStereo;
+
+ switch( pC->InputFileProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ M4OSA_TRACE3_0(
+ "M4MCS_setOutputParams calling \
+ M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AMR");
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pC->InputFileProperties.AudioStreamType);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+ pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+
+ if( pC->pReaderAudioStream->m_samplingFrequency
+ != 8000 )
+ {
+ pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+ }
+ pC->AudioEncParams.SpecifParam.AmrSID =
+ M4ENCODER_kAmrNoSID;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ M4OSA_TRACE3_0(
+ "M4MCS_setOutputParams calling \
+ M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AAC");
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pC->InputFileProperties.AudioStreamType);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kAAC;
+ pC->AudioEncParams.SpecifParam.AacParam.Regulation =
+ M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+ switch( pC->pReaderAudioStream->
+ m_samplingFrequency )
+ {
+ case 16000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k16000Hz;
+ break;
+
+ case 22050:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k22050Hz;
+ break;
+
+ case 24000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k24000Hz;
+ break;
+
+ case 32000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k32000Hz;
+ break;
+
+ case 44100:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k44100Hz;
+ break;
+
+ case 48000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k48000Hz;
+ break;
+
+ default:
+ pC->AudioEncParams.Format = M4ENCODER_kAAC;
+ break;
+ }
+ /* unused */
+ pC->AudioEncParams.SpecifParam.AacParam.bIS =
+ M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bMS =
+ M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bPNS =
+ M4OSA_FALSE;
+ pC->AudioEncParams.SpecifParam.AacParam.bTNS =
+ M4OSA_FALSE;
+ /* TODO change into highspeed asap */
+ pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+ M4OSA_FALSE;
+ break;
+
+ case M4VIDEOEDITING_kMP3:
+ M4OSA_TRACE3_0(
+ "M4MCS_setOutputParams calling\
+ M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, MP3");
+ err = M4MCS_setCurrentAudioEncoder(pContext,
+ pC->InputFileProperties.AudioStreamType);
+ M4ERR_CHECK_RETURN(err);
+
+ pC->AudioEncParams.Format = M4ENCODER_kMP3;
+ pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+ switch( pC->pReaderAudioStream->
+ m_samplingFrequency )
+ {
+ case 8000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k8000Hz;
+ break;
+
+ case 16000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k16000Hz;
+ break;
+
+ case 22050:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k22050Hz;
+ break;
+
+ case 24000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k24000Hz;
+ break;
+
+ case 32000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k32000Hz;
+ break;
+
+ case 44100:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k44100Hz;
+ break;
+
+ case 48000:
+ pC->AudioEncParams.Frequency =
+ M4ENCODER_k48000Hz;
+ break;
+
+ default:
+ pC->AudioEncParams.Format = M4ENCODER_kMP3;
+ break;
+ }
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ case M4VIDEOEDITING_kUnsupportedAudio:
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_setOutputParams: Output audio format (%d) is\
+ incompatible with audio effects, returning \
+ M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+ pC->InputFileProperties.AudioStreamType);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+ }
+ }
+ break;
+ /* EVRC
+ // case M4VIDEOEDITING_kEVRC:
+ //
+ // err = M4MCS_setCurrentAudioEncoder(pContext, pParams->\
+ // OutputAudioFormat);
+ // M4ERR_CHECK_RETURN(err);
+ //
+ // pC->AudioEncParams.Format = M4ENCODER_kEVRC;
+ // pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ // pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+ // break; */
+
+ default:
+ M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output audio format (%d),\
+ returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+ pParams->OutputAudioFormat);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+ }
+ }
+
+ if( pParams->pOutputPCMfile != M4OSA_NULL )
+ {
+ pC->pOutputPCMfile = pParams->pOutputPCMfile;
+
+ /* Open output PCM file */
+ pC->pOsaFileWritPtr->openWrite(&(pC->pOutputPCMfile),
+ pParams->pOutputPCMfile, M4OSA_kFileWrite);
+ }
+ else
+ {
+ pC->pOutputPCMfile = M4OSA_NULL;
+ }
+
+ /*Store media rendering parameter into the internal context*/
+ pC->MediaRendering = pParams->MediaRendering;
+
+ /* Add audio effects*/
+ /*Copy MCS effects structure into internal context*/
+ if( pParams->nbEffects > 0 )
+ {
+ M4OSA_UInt32 j = 0;
+ pC->nbEffects = pParams->nbEffects;
+ pC->pEffects = (M4MCS_EffectSettings *)M4OSA_32bitAlignedMalloc(pC->nbEffects \
+ *sizeof(M4MCS_EffectSettings), M4MCS,
+ (M4OSA_Char *)"Allocation of effects list");
+
+ if( pC->pEffects == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("M4MCS_setOutputParams(): allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ for ( j = 0; j < pC->nbEffects; j++ )
+ {
+ /* Copy effect to "local" structure */
+ memcpy((void *) &(pC->pEffects[j]),
+ (void *) &(pParams->pEffects[j]),
+ sizeof(M4MCS_EffectSettings));
+
+ switch( pC->pEffects[j].AudioEffectType )
+ {
+ case M4MCS_kAudioEffectType_None:
+ M4OSA_TRACE3_1(
+ "M4MCS_setOutputParams(): effect type %i is None", j);
+ pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+ pC->pEffects[j].ExtAudioEffectFct = M4OSA_NULL;
+ break;
+
+ case M4MCS_kAudioEffectType_FadeIn:
+ M4OSA_TRACE3_1(
+ "M4MCS_setOutputParams(): effect type %i is FadeIn", j);
+ pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+ pC->pEffects[j].ExtAudioEffectFct =
+ M4MCS_editAudioEffectFct_FadeIn;
+ break;
+
+ case M4MCS_kAudioEffectType_FadeOut:
+ M4OSA_TRACE3_1(
+ "M4MCS_setOutputParams(): effect type %i is FadeOut",
+ j);
+ pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+ pC->pEffects[j].ExtAudioEffectFct =
+ M4MCS_editAudioEffectFct_FadeOut;
+ break;
+
+ case M4MCS_kAudioEffectType_External:
+ M4OSA_TRACE3_1(
+ "M4MCS_setOutputParams(): effect type %i is External",
+ j);
+
+ if( pParams->pEffects != M4OSA_NULL )
+ {
+ if( pParams->pEffects[j].ExtAudioEffectFct
+ == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_1("M4MCS_setOutputParams(): no external effect function\
+ associated to external effect number %i", j);
+ return M4ERR_PARAMETER;
+ }
+ pC->pEffects[j].pExtAudioEffectFctCtxt =
+ pParams->pEffects[j].pExtAudioEffectFctCtxt;
+
+ pC->pEffects[j].ExtAudioEffectFct =
+ pParams->pEffects[j].ExtAudioEffectFct;
+ }
+
+ break;
+
+ default:
+ M4OSA_TRACE1_0(
+ "M4MCS_setOutputParams(): effect type not recognized");
+ return M4ERR_PARAMETER;
+ }
+ }
+ }
+ else
+ {
+ pC->nbEffects = 0;
+ pC->pEffects = M4OSA_NULL;
+ }
+
+ /**
+ * Update state automaton */
+ pC->State = M4MCS_kState_SET;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_setOutputParams(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief Set the values of the encoding parameters
+ * @note Must be called before M4MCS_checkParamsAndStart().
+ * @param pContext (IN) MCS context
+ * @param pRates (IN) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac, 12.2
+ * for amr, 8 for mp3)
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than the input clip
+ * duration
+ * @return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output file at given
+ * bitrates
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_LOW: Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setEncodingParams( M4MCS_Context pContext,
+ M4MCS_EncodingParams *pRates )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4OSA_UInt32 j = 0;
+
+ M4OSA_TRACE2_2(
+ "M4MCS_setEncodingParams called with pContext=0x%x, pRates=0x%x",
+ pContext, pRates);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_setEncodingParams: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pRates), M4ERR_PARAMETER,
+ "M4MCS_setEncodingParams: pRates is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicSetEncodingParams(pC, pRates);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ if( M4MCS_kState_SET != pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_setEncodingParams(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Set given values */
+ pC->uiVideoBitrate = pRates->OutputVideoBitrate;
+ pC->uiAudioBitrate = pRates->OutputAudioBitrate;
+ pC->uiBeginCutTime = pRates->BeginCutTime;
+ pC->uiEndCutTime = pRates->EndCutTime;
+ pC->uiMaxFileSize = pRates->OutputFileSize;
+
+ /**
+ * Check begin cut time validity */
+ if( pC->uiBeginCutTime >= pC->InputFileProperties.uiClipDuration )
+ {
+ M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut larger than duration (%d>%d),\
+ returning M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
+ pC->uiBeginCutTime, pC->InputFileProperties.uiClipDuration);
+ return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
+ }
+
+ /**
+ * If end cut time is too large, we set it to the clip duration */
+ if( pC->uiEndCutTime > pC->InputFileProperties.uiClipDuration )
+ {
+ pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
+ }
+
+ /**
+ * Check end cut time validity */
+ if( pC->uiEndCutTime > 0 )
+ {
+ if( pC->uiEndCutTime < pC->uiBeginCutTime )
+ {
+ M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut greater than end cut (%d,%d), \
+ returning M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT",
+ pC->uiBeginCutTime, pC->uiEndCutTime);
+ return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT;
+ }
+
+ if( pC->uiEndCutTime == pC->uiBeginCutTime )
+ {
+ M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin and End cuts are equal (%d,%d),\
+ returning M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT",
+ pC->uiBeginCutTime, pC->uiEndCutTime);
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+ }
+
+ /**
+ * FlB 2009.03.04: check audio effects start time and duration validity*/
+ for ( j = 0; j < pC->nbEffects; j++ )
+ {
+ M4OSA_UInt32 outputEndCut = pC->uiEndCutTime;
+
+ if( pC->uiEndCutTime == 0 )
+ {
+ outputEndCut = pC->InputFileProperties.uiClipDuration;
+ }
+
+ if( pC->pEffects[j].uiStartTime > (outputEndCut - pC->uiBeginCutTime) )
+ {
+ M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Effects start time is larger than\
+ duration (%d,%d), returning M4ERR_PARAMETER",
+ pC->pEffects[j].uiStartTime,
+ (pC->uiEndCutTime - pC->uiBeginCutTime));
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->pEffects[j].uiStartTime + pC->pEffects[j].uiDuration > \
+ (outputEndCut - pC->uiBeginCutTime) )
+ {
+ /* Re-adjust the effect duration until the end of the output clip*/
+ pC->pEffects[j].uiDuration = (outputEndCut - pC->uiBeginCutTime) - \
+ pC->pEffects[j].uiStartTime;
+ }
+ }
+
+ /* Check audio bitrate consistency */
+ if( ( pC->noaudio == M4OSA_FALSE)
+ && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL) )
+ {
+ if( pC->uiAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+ {
+ if( pC->uiAudioBitrate > M4VIDEOEDITING_k12_2_KBPS )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+ if( pC->uiAudioBitrate < M4VIDEOEDITING_k12_2_KBPS )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ //EVRC
+ // else if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
+ // {
+ // if(pC->uiAudioBitrate > M4VIDEOEDITING_k9_2_KBPS)
+ // return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+ // if(pC->uiAudioBitrate < M4VIDEOEDITING_k9_2_KBPS)
+ // return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ // }
+ /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
+ else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+ {
+ if( pC->AudioEncParams.Frequency >= M4ENCODER_k32000Hz )
+ {
+ /*Mpeg layer 1*/
+ if( pC->uiAudioBitrate > 320000 )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+ if( pC->uiAudioBitrate < 32000 )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ else if( pC->AudioEncParams.Frequency >= M4ENCODER_k16000Hz )
+ {
+ /*Mpeg layer 2*/
+ if( pC->uiAudioBitrate > 160000 )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+ if( ( pC->uiAudioBitrate < 8000
+ && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+ || (pC->uiAudioBitrate < 16000
+ && pC->AudioEncParams.ChannelNum
+ == M4ENCODER_kStereo) )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ else if( pC->AudioEncParams.Frequency == M4ENCODER_k8000Hz
+ || pC->AudioEncParams.Frequency == M4ENCODER_k11025Hz
+ || pC->AudioEncParams.Frequency == M4ENCODER_k12000Hz )
+ {
+ /*Mpeg layer 2.5*/
+ if( pC->uiAudioBitrate > 64000 )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+ if( ( pC->uiAudioBitrate < 8000
+ && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+ || (pC->uiAudioBitrate < 16000
+ && pC->AudioEncParams.ChannelNum
+ == M4ENCODER_kStereo) )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ else
+ {
+ M4OSA_TRACE1_1("M4MCS_setEncodingParams: MP3 audio sampling frequency error\
+ (%d)", pC->AudioEncParams.Frequency);
+ return M4ERR_PARAMETER;
+ }
+ }
+ else
+ {
+ if( pC->uiAudioBitrate > M4VIDEOEDITING_k192_KBPS )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+ if( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono )
+ {
+ if( pC->uiAudioBitrate < M4VIDEOEDITING_k16_KBPS )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ else
+ {
+ if( pC->uiAudioBitrate < M4VIDEOEDITING_k32_KBPS )
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* NULL audio : copy input file bitrate */
+ pC->uiAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
+ }
+
+ /* Check video bitrate consistency */
+ if( ( pC->novideo == M4OSA_FALSE)
+ && (pC->EncodingVideoFormat != M4ENCODER_kNULL) )
+ {
+ if( pC->uiVideoBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ if( pC->uiVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+ return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH;
+
+ if( pC->uiVideoBitrate < M4VIDEOEDITING_k16_KBPS )
+ return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
+ }
+ }
+ else
+ {
+ /* NULL video : copy input file bitrate */
+ pC->uiVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
+ }
+
+ if( pRates->OutputVideoTimescale <= 30000
+ && pRates->OutputVideoTimescale > 0 )
+ {
+ pC->outputVideoTimescale = pRates->OutputVideoTimescale;
+ }
+
+ /* Check file size */
+ return M4MCS_intCheckMaxFileSize(pC);
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief Get the extended values of the encoding parameters
+ * @note Could be called after M4MCS_setEncodingParams.
+ * @param pContext (IN) MCS context
+ * @param pRates (OUT) Transcoding parameters
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a null duration
+ * clip = encoding is impossible
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExtendedEncodingParams( M4MCS_Context pContext,
+ M4MCS_EncodingParams *pRates )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ M4OSA_Int32 minaudiobitrate;
+ M4OSA_Int32 minvideobitrate;
+ M4OSA_Int32 maxcombinedbitrate;
+
+ M4OSA_Int32 calcbitrate;
+
+ M4OSA_UInt32 maxduration;
+ M4OSA_UInt32 calcduration;
+
+ M4OSA_Bool fixed_audio = M4OSA_FALSE;
+ M4OSA_Bool fixed_video = M4OSA_FALSE;
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicGetExtendedEncodingParams(pC, pRates);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ pRates->OutputVideoBitrate =
+ M4MCS_intGetNearestBitrate(pC->uiVideoBitrate, 0);
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(pC->uiAudioBitrate, 0);
+ pRates->BeginCutTime = pC->uiBeginCutTime;
+ pRates->EndCutTime = pC->uiEndCutTime;
+ pRates->OutputFileSize = pC->uiMaxFileSize;
+
+ /**
+ * Check state automaton */
+ if( M4MCS_kState_SET != pC->State )
+ {
+ M4OSA_TRACE1_1("M4MCS_getExtendedEncodingParams(): Wrong State (%d),\
+ returning M4ERR_STATE", pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Compute min audio bitrate */
+ if( pC->noaudio )
+ {
+ fixed_audio = M4OSA_TRUE;
+ pRates->OutputAudioBitrate = 0;
+ minaudiobitrate = 0;
+ }
+ else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+ {
+ fixed_audio = M4OSA_TRUE;
+ pRates->OutputAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
+ minaudiobitrate = pC->InputFileProperties.uiAudioBitrate;
+ }
+ else
+ {
+ if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+ {
+ fixed_audio = M4OSA_TRUE;
+ pRates->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+ minaudiobitrate = M4VIDEOEDITING_k12_2_KBPS;
+ }
+ //EVRC
+ // if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
+ // {
+ // fixed_audio = M4OSA_TRUE;
+ // pRates->OutputAudioBitrate = M4VIDEOEDITING_k9_2_KBPS;
+ // minaudiobitrate = M4VIDEOEDITING_k9_2_KBPS;
+ // }
+ /*FlB 26.02.2009: add mp3 as mcs output format*/
+ else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+ {
+ minaudiobitrate =
+ M4VIDEOEDITING_k32_KBPS; /*Default min audio bitrate for MPEG layer 1,
+ for both mono and stereo channels*/
+ }
+ else
+ {
+ minaudiobitrate = (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+ ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
+ }
+ }
+
+ /* Check audio bitrate is in the correct range */
+ if( fixed_audio == M4OSA_FALSE )
+ {
+ if( ( pC->uiAudioBitrate > 0)
+ && (pRates->OutputAudioBitrate < minaudiobitrate) )
+ {
+ pRates->OutputAudioBitrate = minaudiobitrate;
+ }
+
+ if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
+ {
+ pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+ }
+ }
+
+ /* Compute min video bitrate */
+ if( pC->novideo )
+ {
+ fixed_video = M4OSA_TRUE;
+ pRates->OutputVideoBitrate = 0;
+ minvideobitrate = 0;
+ }
+ else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+ {
+ fixed_video = M4OSA_TRUE;
+ pRates->OutputVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
+ minvideobitrate = pC->InputFileProperties.uiVideoBitrate;
+ }
+ else
+ {
+ minvideobitrate = M4VIDEOEDITING_k16_KBPS;
+ }
+
+ /* Check video bitrate is in the correct range */
+ if( fixed_video == M4OSA_FALSE )
+ {
+ if( ( pC->uiVideoBitrate > 0)
+ && (pRates->OutputVideoBitrate < minvideobitrate) )
+ {
+ pRates->OutputVideoBitrate = minvideobitrate;
+ }
+ /*+ New Encoder bitrates */
+ if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+ {
+ pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
+ }
+ /*- New Encoder bitrates */
+ }
+
+ /* Check cut times are in correct range */
+ if( ( pRates->BeginCutTime >= pC->InputFileProperties.uiClipDuration)
+ || (( pRates->BeginCutTime >= pRates->EndCutTime)
+ && (pRates->EndCutTime > 0)) )
+ {
+ pRates->BeginCutTime = 0;
+ pRates->EndCutTime = 0;
+ }
+
+ if( pRates->EndCutTime == 0 )
+ calcduration =
+ pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+ else
+ calcduration = pRates->EndCutTime - pRates->BeginCutTime;
+
+ /* priority 1 : max file size */
+ if( pRates->OutputFileSize == 0 )
+ {
+ /* we can put maximum values for all undefined parameters */
+ if( pRates->EndCutTime == 0 )
+ {
+ pRates->EndCutTime = pC->InputFileProperties.uiClipDuration;
+ }
+
+ if( ( pRates->OutputAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate)
+ && (fixed_audio == M4OSA_FALSE) )
+ {
+ pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+ }
+
+ if( ( pRates->OutputVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate)
+ && (fixed_video == M4OSA_FALSE) )
+ {
+ /*+ New Encoder bitrates */
+ pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
+ /*- New Encoder bitrates */
+ }
+ }
+ else
+ {
+ /* compute max duration */
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (minvideobitrate + minaudiobitrate) * 8000.0);
+
+ if( maxduration
+ + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration =
+ pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+ }
+
+ /* priority 2 : cut times */
+ if( ( pRates->BeginCutTime > 0) || (pRates->EndCutTime > 0) )
+ {
+ if( calcduration > maxduration )
+ {
+ calcduration = maxduration;
+ }
+
+ if( calcduration == 0 )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+
+ maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO / (calcduration / 8000.0));
+
+ /* audio and video bitrates */
+ if( ( pRates->OutputAudioBitrate
+ == M4VIDEOEDITING_kUndefinedBitrate)
+ && (pRates->OutputVideoBitrate
+ == M4VIDEOEDITING_kUndefinedBitrate) )
+ {
+ /* set audio = 1/3 and video = 2/3 */
+ if( fixed_audio == M4OSA_FALSE )
+ {
+ if( pC->novideo )
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate, 0);
+ else
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate / 3,
+ 0);
+
+ if( pRates->OutputAudioBitrate < minaudiobitrate )
+ pRates->OutputAudioBitrate = minaudiobitrate;
+
+ if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
+ pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+ }
+
+ if( fixed_video == M4OSA_FALSE )
+ {
+ pRates->OutputVideoBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate
+ - pRates->OutputAudioBitrate, 0);
+
+ if( pRates->OutputVideoBitrate < minvideobitrate )
+ pRates->OutputVideoBitrate = minvideobitrate;
+
+ if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+ pRates->OutputVideoBitrate =
+ M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
+ bitrates */
+ }
+ }
+ else
+ {
+ /* priority 3 : audio bitrate */
+ if( pRates->OutputAudioBitrate
+ != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ while( ( fixed_audio == M4OSA_FALSE)
+ && (pRates->OutputAudioBitrate >= minaudiobitrate)
+ && (pRates->OutputAudioBitrate
+ + minvideobitrate > maxcombinedbitrate) )
+ {
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(
+ pRates->OutputAudioBitrate, -1);
+ }
+
+ if( ( fixed_audio == M4OSA_FALSE)
+ && (pRates->OutputAudioBitrate < minaudiobitrate) )
+ {
+ pRates->OutputAudioBitrate = minaudiobitrate;
+ }
+
+ calcbitrate = M4MCS_intGetNearestBitrate(
+ maxcombinedbitrate
+ - pRates->OutputAudioBitrate, 0);
+
+ if( calcbitrate < minvideobitrate )
+ calcbitrate = minvideobitrate;
+
+ if( calcbitrate > M4VIDEOEDITING_k8_MBPS )
+ calcbitrate = M4VIDEOEDITING_k8_MBPS;
+
+ if( ( fixed_video == M4OSA_FALSE)
+ && (( pRates->OutputVideoBitrate
+ == M4VIDEOEDITING_kUndefinedBitrate)
+ || (pRates->OutputVideoBitrate > calcbitrate)) )
+ {
+ pRates->OutputVideoBitrate = calcbitrate;
+ }
+ }
+ else
+ {
+ /* priority 4 : video bitrate */
+ if( pRates->OutputVideoBitrate
+ != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ while( ( fixed_video == M4OSA_FALSE)
+ && (pRates->OutputVideoBitrate >= minvideobitrate)
+ && (pRates->OutputVideoBitrate
+ + minaudiobitrate > maxcombinedbitrate) )
+ {
+ pRates->OutputVideoBitrate =
+ M4MCS_intGetNearestBitrate(
+ pRates->OutputVideoBitrate, -1);
+ }
+
+ if( ( fixed_video == M4OSA_FALSE)
+ && (pRates->OutputVideoBitrate < minvideobitrate) )
+ {
+ pRates->OutputVideoBitrate = minvideobitrate;
+ }
+
+ calcbitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate
+ - pRates->OutputVideoBitrate, 0);
+
+ if( calcbitrate < minaudiobitrate )
+ calcbitrate = minaudiobitrate;
+
+ if( calcbitrate > M4VIDEOEDITING_k96_KBPS )
+ calcbitrate = M4VIDEOEDITING_k96_KBPS;
+
+ if( ( fixed_audio == M4OSA_FALSE)
+ && (( pRates->OutputAudioBitrate
+ == M4VIDEOEDITING_kUndefinedBitrate)
+ || (pRates->OutputAudioBitrate > calcbitrate)) )
+ {
+ pRates->OutputAudioBitrate = calcbitrate;
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ /* priority 3 : audio bitrate */
+ if( pRates->OutputAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ /* priority 4 : video bitrate */
+ if( pRates->OutputVideoBitrate
+ != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ /* compute max duration */
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (pRates->OutputVideoBitrate
+ + pRates->OutputAudioBitrate) * 8000.0);
+
+ if( maxduration + pRates->BeginCutTime
+ > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration = pC->InputFileProperties.uiClipDuration
+ - pRates->BeginCutTime;
+ }
+
+ if( calcduration > maxduration )
+ {
+ calcduration = maxduration;
+ }
+
+ if( calcduration == 0 )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+ }
+ else
+ {
+ /* start with min video bitrate */
+ pRates->OutputVideoBitrate = minvideobitrate;
+
+ /* compute max duration */
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (pRates->OutputVideoBitrate
+ + pRates->OutputAudioBitrate) * 8000.0);
+
+ if( maxduration + pRates->BeginCutTime
+ > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration = pC->InputFileProperties.uiClipDuration
+ - pRates->BeginCutTime;
+ }
+
+ if( calcduration > maxduration )
+ {
+ calcduration = maxduration;
+ }
+
+ if( calcduration == 0 )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+
+ /* search max possible video bitrate */
+ maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (calcduration / 8000.0));
+
+ while( ( fixed_video == M4OSA_FALSE)
+ && (pRates->OutputVideoBitrate
+ < M4VIDEOEDITING_k8_MBPS) ) /*+ New Encoder bitrates */
+ {
+ calcbitrate = M4MCS_intGetNearestBitrate(
+ pRates->OutputVideoBitrate, +1);
+
+ if( calcbitrate
+ + pRates->OutputAudioBitrate <= maxcombinedbitrate )
+ pRates->OutputVideoBitrate = calcbitrate;
+ else
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* priority 4 : video bitrate */
+ if( pRates->OutputVideoBitrate
+ != M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ /* start with min audio bitrate */
+ pRates->OutputAudioBitrate = minaudiobitrate;
+
+ /* compute max duration */
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (pRates->OutputVideoBitrate
+ + pRates->OutputAudioBitrate) * 8000.0);
+
+ if( maxduration + pRates->BeginCutTime
+ > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration = pC->InputFileProperties.uiClipDuration
+ - pRates->BeginCutTime;
+ }
+
+ if( calcduration > maxduration )
+ {
+ calcduration = maxduration;
+ }
+
+ if( calcduration == 0 )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+
+ /* search max possible audio bitrate */
+ maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (calcduration / 8000.0));
+
+ while( ( fixed_audio == M4OSA_FALSE)
+ && (pRates->OutputAudioBitrate
+ < M4VIDEOEDITING_k96_KBPS) )
+ {
+ calcbitrate = M4MCS_intGetNearestBitrate(
+ pRates->OutputAudioBitrate, +1);
+
+ if( calcbitrate
+ + pRates->OutputVideoBitrate <= maxcombinedbitrate )
+ pRates->OutputAudioBitrate = calcbitrate;
+ else
+ break;
+ }
+ }
+ else
+ {
+ /* compute max duration */
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (minvideobitrate + minaudiobitrate) * 8000.0);
+
+ if( maxduration + pRates->BeginCutTime
+ > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration = pC->InputFileProperties.uiClipDuration
+ - pRates->BeginCutTime;
+ }
+
+ if( calcduration > maxduration )
+ {
+ calcduration = maxduration;
+ }
+
+ if( calcduration == 0 )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+
+ /* set audio = 1/3 and video = 2/3 */
+ maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (calcduration / 8000.0));
+
+ if( fixed_audio == M4OSA_FALSE )
+ {
+ if( pC->novideo )
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate,
+ 0);
+ else
+ pRates->OutputAudioBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate
+ / 3, 0);
+
+ if( pRates->OutputAudioBitrate < minaudiobitrate )
+ pRates->OutputAudioBitrate = minaudiobitrate;
+
+ if( pRates->OutputAudioBitrate
+ > M4VIDEOEDITING_k96_KBPS )
+ pRates->OutputAudioBitrate =
+ M4VIDEOEDITING_k96_KBPS;
+ }
+
+ if( fixed_video == M4OSA_FALSE )
+ {
+ pRates->OutputVideoBitrate =
+ M4MCS_intGetNearestBitrate(maxcombinedbitrate
+ - pRates->OutputAudioBitrate, 0);
+
+ if( pRates->OutputVideoBitrate < minvideobitrate )
+ pRates->OutputVideoBitrate = minvideobitrate;
+
+ if( pRates->OutputVideoBitrate
+ > M4VIDEOEDITING_k8_MBPS )
+ pRates->OutputVideoBitrate =
+ M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
+ bitrates */
+ }
+ }
+ }
+ }
+ }
+
+ /* recompute max duration with final bitrates */
+ if( pRates->OutputFileSize > 0 )
+ {
+ maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+ / M4MCS_MOOV_OVER_FILESIZE_RATIO
+ / (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
+ * 8000.0);
+ }
+ else
+ {
+ maxduration = pC->InputFileProperties.uiClipDuration;
+ }
+
+ if( maxduration
+ + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
+ {
+ maxduration =
+ pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+ }
+
+ if( pRates->EndCutTime == 0 )
+ {
+ pRates->EndCutTime = pRates->BeginCutTime + maxduration;
+ }
+ else
+ {
+ calcduration = pRates->EndCutTime - pRates->BeginCutTime;
+
+ if( calcduration > maxduration )
+ {
+ pRates->EndCutTime = pRates->BeginCutTime + maxduration;
+ }
+ }
+
+ /* Should never happen : constraints are too strong */
+ if( pRates->EndCutTime == pRates->BeginCutTime )
+ {
+ return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+ }
+
+ /* estimated resulting file size */
+ pRates->OutputFileSize = (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
+ * (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
+ * (( pRates->EndCutTime - pRates->BeginCutTime) / 8000.0));
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
+ * @brief Check parameters to start
+ * @note
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for
+ * this function to be called
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH:
+ * Audio bitrate too high (we limit to 96 kbps)
+ * @return M4MCS_ERR_AUDIOBITRATE_TOO_LOW:
+ * Audio bitrate is too low (16 kbps min for aac,
+ * 12.2 for amr, 8 for mp3)
+ * @return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT:
+ * Begin cut and End cut are equals
+ * @return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
+ * Begin cut time is larger than the input
+ * clip duration
+ * @return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT:
+ * End cut time is smaller than begin cut time
+ * @return M4MCS_ERR_MAXFILESIZE_TOO_SMALL:
+ * Not enough space to store whole output
+ * file at given bitrates
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH:
+ * Video bitrate too high (we limit to 800 kbps)
+ * @return M4MCS_ERR_VIDEOBITRATE_TOO_LOW:
+ * Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_checkParamsAndStart( M4MCS_Context pContext )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+ M4MCS_EncodingParams VerifyRates;
+ M4OSA_ERR err;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_checkParamsAndStart: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+ if( pC->m_bIsStillPicture )
+ {
+ /**
+ * Call the corresponding still picture MCS function*/
+ return M4MCS_stillPicCheckParamsAndStart(pC);
+ }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+ /**
+ * Check state automaton */
+
+ if( M4MCS_kState_SET != pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_checkParamsAndStart(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Audio bitrate should not stay undefined at this point */
+ if( ( pC->noaudio == M4OSA_FALSE)
+ && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL)
+ && (pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
+ {
+ M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined audio bitrate");
+ return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+ }
+
+ /* Video bitrate should not stay undefined at this point */
+ if( ( pC->novideo == M4OSA_FALSE)
+ && (pC->EncodingVideoFormat != M4ENCODER_kNULL)
+ && (pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
+ {
+ M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined video bitrate");
+ return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
+ }
+
+ /* Set end cut time if necessary (not an error) */
+ if( pC->uiEndCutTime == 0 )
+ {
+ pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
+ }
+
+ /* Force a re-set to check validity of parameters */
+ VerifyRates.OutputVideoBitrate = pC->uiVideoBitrate;
+ VerifyRates.OutputAudioBitrate = pC->uiAudioBitrate;
+ VerifyRates.BeginCutTime = pC->uiBeginCutTime;
+ VerifyRates.EndCutTime = pC->uiEndCutTime;
+ VerifyRates.OutputFileSize = pC->uiMaxFileSize;
+ VerifyRates.OutputVideoTimescale = pC->outputVideoTimescale;
+
+ err = M4MCS_setEncodingParams(pContext, &VerifyRates);
+
+ /**
+ * Check parameters consistency */
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : invalid parameter found");
+ return err;
+ }
+
+ /**
+ * All is OK : update state automaton */
+ pC->uiEncVideoBitrate = pC->uiVideoBitrate;
+ pC->AudioEncParams.Bitrate = pC->uiAudioBitrate;
+
+#ifdef M4MCS_WITH_FAST_OPEN
+ /**
+ * Remake the open if it was done in fast mode */
+
+ if( M4OSA_TRUE == pC->bFileOpenedInFastMode )
+ {
+ /* Close the file opened in fast mode */
+ M4MCS_intCleanUp_ReadersDecoders(pC);
+
+ pC->State = M4MCS_kState_CREATED;
+
+ /* Reopen it in normal mode */
+ err = M4MCS_open(pContext, pC->pInputFile, pC->InputFileType,
+ pC->pOutputFile, pC->pTemporaryFile);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_checkParamsAndStart : M4MCS_Open returns 0x%x", err);
+ return err;
+ }
+ }
+
+#endif /* M4MCS_WITH_FAST_OPEN */
+
+ pC->State = M4MCS_kState_READY;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepSet(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4ENCODER_Header *encHeader;
+
+ /**
+ * Prepare the video decoder */
+ err = M4MCS_intPrepareVideoDecoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepSet(): M4MCS_intPrepareVideoDecoder() returns 0x%x",
+ err);
+ return err;
+ }
+
+ if( ( pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
+ && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
+ {
+ pC->bH264Trim = M4OSA_TRUE;
+ }
+
+ /**
+ * Prepare the video encoder */
+ err = M4MCS_intPrepareVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepSet(): M4MCS_intPrepareVideoEncoder() returns 0x%x",
+ err);
+ return err;
+ }
+
+ if( ( pC->uiBeginCutTime != 0)
+ && (pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
+ && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
+ {
+
+ err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
+ M4ENCODER_kOptionID_H264ProcessNALUContext,
+ (M4OSA_DataOption)pC->m_pInstance);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed (err 0x%x)",
+ err);
+ return err;
+ }
+
+ err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
+ M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr,
+ (M4OSA_DataOption) &H264MCS_ProcessEncodedNALU);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed (err 0x%x)",
+ err);
+ return err;
+ }
+
+ err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
+ M4ENCODER_kOptionID_EncoderHeader,
+ (M4OSA_DataOption) &encHeader);
+
+ if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_close: failed to get the encoder header (err 0x%x)",
+ err);
+ /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
+ }
+ else
+ {
+ // Handle DSI first bits
+#define SPS_START_POS 6
+
+ pC->m_pInstance->m_encoderSPSSize =
+ ( encHeader->pBuf[SPS_START_POS] << 8)
+ + encHeader->pBuf[SPS_START_POS + 1];
+ pC->m_pInstance->m_pEncoderSPS =
+ (M4OSA_UInt8 *)(encHeader->pBuf) + SPS_START_POS + 2;
+
+ pC->m_pInstance->m_encoderPPSSize =
+ ( encHeader->pBuf[SPS_START_POS + 3
+ + pC->m_pInstance->m_encoderSPSSize] << 8)
+ + encHeader->pBuf[SPS_START_POS + 4
+ + pC->m_pInstance->m_encoderSPSSize];
+ pC->m_pInstance->m_pEncoderPPS = (M4OSA_UInt8 *)encHeader->pBuf + SPS_START_POS + 5
+ + pC->m_pInstance->m_encoderSPSSize;
+
+ /* Check the DSI integrity */
+ if( encHeader->Size != (pC->m_pInstance->m_encoderSPSSize
+ + pC->m_pInstance->m_encoderPPSSize + 5 + SPS_START_POS) )
+ {
+ M4OSA_TRACE1_3(
+ "!!! M4MCS_intStepSet ERROR : invalid SPS / PPS %d %d %d",
+ encHeader->Size, pC->m_pInstance->m_encoderSPSSize,
+ pC->m_pInstance->m_encoderPPSSize);
+ return M4ERR_PARAMETER;
+ }
+ }
+ }
+
+ /**
+ * Prepare audio processing */
+ err = M4MCS_intPrepareAudioProcessing(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepSet(): M4MCS_intPrepareAudioProcessing() returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Prepare the writer */
+ err = M4MCS_intPrepareWriter(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepSet(): M4MCS_intPrepareWriter() returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Jump the audio stream to the begin cut time (all AUs are RAP)
+ * Must be done after the 3gpp writer init, because it may write the first
+ * audio AU in some cases */
+ err = M4MCS_intPrepareAudioBeginCut(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepSet(): M4MCS_intPrepareAudioBeginCut() returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Update state automaton */
+ if( 0 == pC->uiBeginCutTime )
+ {
+ pC->dViDecStartingCts = 0.0;
+ /**
+ * No begin cut, do the encoding */
+ pC->State = M4MCS_kState_PROCESSING;
+ }
+ else
+ {
+ /**
+ * Remember that we must start the decode/encode process at the begin cut time */
+ pC->dViDecStartingCts = (M4OSA_Double)pC->uiBeginCutTime;
+
+ /**
+ * Jumping */
+ pC->State = M4MCS_kState_BEGINVIDEOJUMP;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intStepSet(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareVideoDecoder(M4MCS_InternalContext* pC);
+ * @brief Prepare the video decoder.
+ * @param pC (IN) MCS private context
+ * @return M4NO_ERROR No error
+ * @return M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareVideoDecoder( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_Void *decoderUserData;
+ M4DECODER_OutputFilter FilterOption;
+
+ if( pC->novideo )
+ return M4NO_ERROR;
+
+ /**
+ * Create the decoder, if it has not been created yet (to get video properties for example) */
+ if( M4OSA_NULL == pC->pViDecCtxt )
+ {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ decoderUserData = pC->m_pCurrentVideoDecoderUserData;
+
+#else
+
+ decoderUserData = M4OSA_NULL;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS ? */
+
+ err = pC->m_pVideoDecoder->m_pFctCreate(&pC->pViDecCtxt,
+ &pC->pReaderVideoStream->m_basicProperties, pC->m_pReader,
+ pC->m_pReaderDataIt, &pC->ReaderVideoAU, decoderUserData);
+
+ if( (M4OSA_UInt32)(M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err )
+ {
+ /**
+ * Our decoder is not compatible with H263 profile other than 0.
+ * So it returns this internal error code.
+ * We translate it to our own error code */
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoDecoder:\
+ returning M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED");
+ return M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
+ m_pVideoDecoder->m_pFctCreate returns 0x%x", err);
+ return err;
+ }
+
+ if( M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType )
+ {
+ FilterOption.m_pFilterFunction =
+ (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
+ FilterOption.m_pFilterUserData = M4OSA_NULL;
+ err = pC->m_pVideoDecoder->m_pFctSetOption(pC->pViDecCtxt,
+ M4DECODER_kOptionID_OutputFilter,
+ (M4OSA_DataOption) &FilterOption);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
+ m_pVideoDecoder->m_pFctSetOption returns 0x%x", err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intPrepareVideoDecoder(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareVideoEncoder(M4MCS_InternalContext* pC);
+ * @brief Prepare the video encoder.
+ * @param pC (IN) MCS private context
+ * @return M4NO_ERROR No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareVideoEncoder( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4ENCODER_AdvancedParams EncParams; /**< Encoder advanced parameters */
+ M4ENCODER_Params EncParams1;
+ M4OSA_Double dFrameRate; /**< tmp variable */
+
+ if( pC->novideo )
+ return M4NO_ERROR;
+
+ if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+ {
+ /* Approximative cts increment */
+ pC->dCtsIncrement = 1000.0 / pC->pReaderVideoStream->m_averageFrameRate;
+
+ if( pC->uiBeginCutTime == 0 )
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_intPrepareVideoEncoder(): Null encoding, do nothing.");
+ return M4NO_ERROR;
+ }
+ else
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_intPrepareVideoEncoder(): Null encoding, I-frame defaults.");
+
+ /* Set useful parameters to encode the first I-frame */
+ EncParams.InputFormat = M4ENCODER_kIYUV420;
+ EncParams.videoProfile = pC->encodingVideoProfile;
+ EncParams.videoLevel= pC->encodingVideoLevel;
+
+ switch( pC->InputFileProperties.VideoStreamType )
+ {
+ case M4VIDEOEDITING_kH263:
+ EncParams.Format = M4ENCODER_kH263;
+ break;
+
+ case M4VIDEOEDITING_kMPEG4:
+ EncParams.Format = M4ENCODER_kMPEG4;
+ break;
+
+ case M4VIDEOEDITING_kH264:
+ EncParams.Format = M4ENCODER_kH264;
+ break;
+
+ default:
+ M4OSA_TRACE1_1("M4MCS_intPrepareVideoEncoder: unknown encoding video format\
+ (%d), returning M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED",
+ pC->InputFileProperties.VideoStreamType);
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ EncParams.FrameWidth = pC->EncodingWidth;
+ EncParams.FrameHeight = pC->EncodingHeight;
+ EncParams.Bitrate = pC->uiEncVideoBitrate;
+ EncParams.bInternalRegulation =
+ M4OSA_FALSE; /* do not constrain the I-frame */
+ EncParams.FrameRate = pC->EncodingVideoFramerate;
+
+ /* Other encoding settings (quite all dummy...) */
+ EncParams.uiHorizontalSearchRange = 0; /* use default */
+ EncParams.uiVerticalSearchRange = 0; /* use default */
+ EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+ EncParams.uiIVopPeriod = 0; /* use default */
+ EncParams.uiMotionEstimationTools =
+ 0; /* M4V_MOTION_EST_TOOLS_ALL */
+ EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */
+ EncParams.uiStartingQuantizerValue = 5; /* initial QP = 5 */
+ EncParams.bDataPartitioning =
+ M4OSA_FALSE; /* no data partitioning */
+
+ /* Rate factor */
+ EncParams.uiTimeScale = pC->InputFileProperties.uiVideoTimeScale;
+ EncParams.uiRateFactor = 1;
+ }
+ }
+ else
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_intPrepareVideoEncoder(): Normal encoding, set full config.");
+
+ /**
+ * Set encoder shell parameters according to MCS settings */
+ EncParams.Format = pC->EncodingVideoFormat;
+ EncParams.InputFormat = M4ENCODER_kIYUV420;
+ EncParams.videoProfile = pC->encodingVideoProfile;
+ EncParams.videoLevel= pC->encodingVideoLevel;
+
+ /**
+ * Video frame size */
+ EncParams.FrameWidth = pC->EncodingWidth;
+ EncParams.FrameHeight = pC->EncodingHeight;
+
+ /**
+ * Video bitrate has been previously computed */
+ EncParams.Bitrate = pC->uiEncVideoBitrate;
+
+ /**
+ * MCS use the "true" core internal bitrate regulation */
+ EncParams.bInternalRegulation = M4OSA_TRUE;
+
+ /**
+ * Other encoder settings */
+
+ EncParams.uiHorizontalSearchRange = 0; /* use default */
+ EncParams.uiVerticalSearchRange = 0; /* use default */
+ EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+ EncParams.uiIVopPeriod = 0; /* use default */
+ EncParams.uiMotionEstimationTools =
+ 0; /* M4V_MOTION_EST_TOOLS_ALL */
+ EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */
+ EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */
+ EncParams.bDataPartitioning =
+ M4OSA_FALSE; /* no data partitioning */
+
+
+ /**
+ * Video encoder frame rate and rate factor */
+ EncParams.FrameRate = pC->EncodingVideoFramerate;
+ EncParams.uiTimeScale = pC->outputVideoTimescale;
+
+ switch( pC->EncodingVideoFramerate )
+ {
+ case M4ENCODER_k5_FPS:
+ dFrameRate = 5.0;
+ break;
+
+ case M4ENCODER_k7_5_FPS:
+ dFrameRate = 7.5;
+ break;
+
+ case M4ENCODER_k10_FPS:
+ dFrameRate = 10.0;
+ break;
+
+ case M4ENCODER_k12_5_FPS:
+ dFrameRate = 12.5;
+ break;
+
+ case M4ENCODER_k15_FPS:
+ dFrameRate = 15.0;
+ break;
+
+ case M4ENCODER_k20_FPS: /**< MPEG-4 only */
+ dFrameRate = 20.0;
+ break;
+
+ case M4ENCODER_k25_FPS: /**< MPEG-4 only */
+ dFrameRate = 25.0;
+ break;
+
+ case M4ENCODER_k30_FPS:
+ dFrameRate = 30.0;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoEncoder: unknown encoding video frame rate\
+ (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
+ pC->EncodingVideoFramerate);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
+ }
+
+ /**
+ * Compute the number of milliseconds between two frames */
+ if( M4ENCODER_kH263 == EncParams.Format )
+ {
+ pC->dCtsIncrement = 1001.0 / dFrameRate;
+ }
+ else /**< MPEG4 or H.264 */
+ {
+ pC->dCtsIncrement = 1000.0 / dFrameRate;
+ }
+ }
+
+ /**
+ * Limit the video bitrate according to encoder profile
+ * and level */
+ err = M4MCS_intLimitBitratePerCodecProfileLevel(&EncParams);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoEncoder: limit bitrate returned err \
+ 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Create video encoder */
+ err = pC->pVideoEncoderGlobalFcts->pFctInit(&pC->pViEncCtxt,
+ pC->pWriterDataFcts, \
+ M4MCS_intApplyVPP, pC, pC->pCurrentVideoEncoderExternalAPI, \
+ pC->pCurrentVideoEncoderUserData);
+
+ /**< We put the MCS context in place of the VPP context */
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctInit returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->encoderState = M4MCS_kEncoderClosed;
+
+ if( M4OSA_TRUE == pC->bH264Trim )
+ //if((M4ENCODER_kNULL == pC->EncodingVideoFormat)
+ // && (M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType))
+ {
+ EncParams1.InputFormat = EncParams.InputFormat;
+ //EncParams1.InputFrameWidth = EncParams.InputFrameWidth;
+ //EncParams1.InputFrameHeight = EncParams.InputFrameHeight;
+ EncParams1.FrameWidth = EncParams.FrameWidth;
+ EncParams1.FrameHeight = EncParams.FrameHeight;
+ EncParams1.videoProfile= EncParams.videoProfile;
+ EncParams1.videoLevel= EncParams.videoLevel;
+ EncParams1.Bitrate = EncParams.Bitrate;
+ EncParams1.FrameRate = EncParams.FrameRate;
+ EncParams1.Format = M4ENCODER_kH264; //EncParams.Format;
+ M4OSA_TRACE1_2("mcs encoder open profile :%d, level %d",
+ EncParams1.videoProfile, EncParams1.videoLevel);
+ err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
+ &pC->WriterVideoAU, &EncParams1);
+ }
+ else
+ {
+ M4OSA_TRACE1_2("mcs encoder open Adv profile :%d, level %d",
+ EncParams.videoProfile, EncParams.videoLevel);
+ err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
+ &pC->WriterVideoAU, &EncParams);
+ }
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->encoderState = M4MCS_kEncoderStopped;
+
+ if( M4OSA_NULL != pC->pVideoEncoderGlobalFcts->pFctStart )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctStart(pC->pViEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctStart returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ pC->encoderState = M4MCS_kEncoderRunning;
+
+ /******************************/
+ /* Video resize management */
+ /******************************/
+ /**
+ * Compare video input size and video output size to check if resize is needed */
+ if( ( (M4OSA_UInt32)EncParams.FrameWidth
+ != pC->pReaderVideoStream->m_videoWidth)
+ || ((M4OSA_UInt32)EncParams.FrameHeight
+ != pC->pReaderVideoStream->m_videoHeight) )
+ {
+ /**
+ * Allocate the intermediate video plane that will receive the decoded image before
+ resizing */
+ pC->pPreResizeFrame =
+ (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(3 * sizeof(M4VIFI_ImagePlane),
+ M4MCS, (M4OSA_Char *)"m_pPreResizeFrame");
+
+ if( M4OSA_NULL == pC->pPreResizeFrame )
+ {
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder():\
+ unable to allocate m_pPreResizeFrame, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
+ pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
+ pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
+
+ /**
+ * Allocate the Y plane */
+ pC->pPreResizeFrame[0].u_topleft = 0;
+ pC->pPreResizeFrame[0].u_width = pC->pReaderVideoStream->
+ m_videoWidth; /**< input width */
+ pC->pPreResizeFrame[0].u_height = pC->pReaderVideoStream->
+ m_videoHeight; /**< input height */
+ pC->pPreResizeFrame[0].u_stride = pC->
+ pPreResizeFrame[0].u_width; /**< simple case: stride equals width */
+
+ pC->pPreResizeFrame[0].pac_data =
+ (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[0].u_stride \
+ *pC->pPreResizeFrame[0].u_height, M4MCS,
+ (M4OSA_Char *)"m_pPreResizeFrame[0].pac_data");
+
+ if( M4OSA_NULL == pC->pPreResizeFrame[0].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoEncoder():\
+ unable to allocate m_pPreResizeFrame[0].pac_data, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Allocate the U plane */
+ pC->pPreResizeFrame[1].u_topleft = 0;
+ pC->pPreResizeFrame[1].u_width = pC->pPreResizeFrame[0].u_width
+ >> 1; /**< U width is half the Y width */
+ pC->pPreResizeFrame[1].u_height = pC->pPreResizeFrame[0].u_height
+ >> 1; /**< U height is half the Y height */
+ pC->pPreResizeFrame[1].u_stride = pC->
+ pPreResizeFrame[1].u_width; /**< simple case: stride equals width */
+
+ pC->pPreResizeFrame[1].pac_data =
+ (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[1].u_stride \
+ *pC->pPreResizeFrame[1].u_height, M4MCS,
+ (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
+
+ if( M4OSA_NULL == pC->pPreResizeFrame[1].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoEncoder():\
+ unable to allocate m_pPreResizeFrame[1].pac_data, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Allocate the V plane */
+ pC->pPreResizeFrame[2].u_topleft = 0;
+ pC->pPreResizeFrame[2].u_width = pC->
+ pPreResizeFrame[1].u_width; /**< V width equals U width */
+ pC->pPreResizeFrame[2].u_height = pC->
+ pPreResizeFrame[1].u_height; /**< V height equals U height */
+ pC->pPreResizeFrame[2].u_stride = pC->
+ pPreResizeFrame[2].u_width; /**< simple case: stride equals width */
+
+ pC->pPreResizeFrame[2].pac_data =
+ (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pC->pPreResizeFrame[2].u_stride \
+ *pC->pPreResizeFrame[2].u_height, M4MCS,
+ (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
+
+ if( M4OSA_NULL == pC->pPreResizeFrame[2].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoEncoder():\
+ unable to allocate m_pPreResizeFrame[2].pac_data, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intPrepareVideoEncoder(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareAudioProcessing(M4MCS_InternalContext* pC);
+ * @brief Prepare the AAC decoder, the SRC and the AMR-NB encoder and the MP3 encoder.
+ * @param pC (IN) MCS private context
+ * @return M4NO_ERROR No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareAudioProcessing( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+
+ SSRC_ReturnStatus_en
+ ReturnStatus; /* Function return status */
+ LVM_INT16 NrSamplesMin =
+ 0; /* Minimal number of samples on the input or on the output */
+ LVM_INT32 ScratchSize; /* The size of the scratch memory */
+ LVM_INT16
+ *pInputInScratch; /* Pointer to input in the scratch buffer */
+ LVM_INT16
+ *pOutputInScratch; /* Pointer to the output in the scratch buffer */
+ SSRC_Params_t ssrcParams; /* Memory for init parameters */
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+ file_au_reader = fopen("mcs_ReaderOutput.raw", "wb");
+ file_pcm_decoder = fopen("mcs_DecoderOutput.pcm", "wb");
+ file_pcm_encoder = fopen("mcs_EncoderInput.pcm", "wb");
+
+#endif
+
+ if( pC->noaudio )
+ return M4NO_ERROR;
+
+ if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+ {
+ M4OSA_TRACE3_0(
+ "M4MCS_intPrepareAudioProcessing(): Null encoding, do nothing.");
+ return M4NO_ERROR;
+ }
+
+ /* ________________________________ */
+ /*| |*/
+ /*| Create and "start" the decoder |*/
+ /*|________________________________|*/
+
+ if( M4OSA_NULL == pC->m_pAudioDecoder )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareAudioProcessing(): Fails to initiate the audio decoder.");
+ return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+ }
+
+ if( M4OSA_NULL == pC->pAudioDecCtxt )
+ {
+ err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(&pC->pAudioDecCtxt,
+ pC->pReaderAudioStream, pC->m_pCurrentAudioDecoderUserData);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ if( M4VIDEOEDITING_kAMR_NB == pC->InputFileProperties.AudioStreamType ) {
+ /* AMR DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else if( M4VIDEOEDITING_kEVRC == pC->InputFileProperties.AudioStreamType ) {
+ /* EVRC DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else if( M4VIDEOEDITING_kMP3 == pC->InputFileProperties.AudioStreamType ) {
+ /* MP3 DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else
+ {
+ /* AAC DECODER CONFIGURATION */
+ M4_AacDecoderConfig AacDecParam;
+
+ AacDecParam.m_AACDecoderProfile = AAC_kAAC;
+ AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
+
+ if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+ {
+ AacDecParam.m_OutputMode = AAC_kMono;
+ }
+ else
+ {
+ /* For this version, we encode only in AAC */
+ if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
+ {
+ AacDecParam.m_OutputMode = AAC_kMono;
+ }
+ else
+ {
+ AacDecParam.m_OutputMode = AAC_kStereo;
+ }
+ }
+
+ pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
+ }
+
+ pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_3gpReaderInterface, (M4OSA_DataOption) pC->m_pReaderDataIt);
+
+ pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_AudioAU, (M4OSA_DataOption) &pC->ReaderAudioAU);
+
+ if( pC->m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL )
+ {
+ /* Not implemented in all decoders */
+ err = pC->m_pAudioDecoder->m_pFctStartAudioDec(pC->pAudioDecCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Allocate output buffer for the audio decoder */
+ pC->InputFileProperties.uiDecodedPcmSize =
+ pC->pReaderAudioStream->m_byteFrameLength
+ * pC->pReaderAudioStream->m_byteSampleSize
+ * pC->pReaderAudioStream->m_nbChannels;
+
+ if( pC->InputFileProperties.uiDecodedPcmSize > 0 )
+ {
+ pC->AudioDecBufferOut.m_bufferSize =
+ pC->InputFileProperties.uiDecodedPcmSize;
+ pC->AudioDecBufferOut.m_dataAddress =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->AudioDecBufferOut.m_bufferSize \
+ *sizeof(short), M4MCS, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
+ }
+
+ if( M4OSA_NULL == pC->AudioDecBufferOut.m_dataAddress )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoDecoder():\
+ unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /* _________________________ */
+ /*| |*/
+ /*| Set the SSRC parameters |*/
+ /*|_________________________|*/
+
+ switch( pC->pReaderAudioStream->m_samplingFrequency )
+ {
+ case 8000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_8000;
+ break;
+
+ case 11025:
+ ssrcParams.SSRC_Fs_In = LVM_FS_11025;
+ break;
+
+ case 12000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_12000;
+ break;
+
+ case 16000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_16000;
+ break;
+
+ case 22050:
+ ssrcParams.SSRC_Fs_In = LVM_FS_22050;
+ break;
+
+ case 24000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_24000;
+ break;
+
+ case 32000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_32000;
+ break;
+
+ case 44100:
+ ssrcParams.SSRC_Fs_In = LVM_FS_44100;
+ break;
+
+ case 48000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_48000;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareVideoDecoder: invalid input AAC sampling frequency (%d Hz),\
+ returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
+ pC->pReaderAudioStream->m_samplingFrequency);
+ return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
+ }
+
+ if( 1 == pC->pReaderAudioStream->m_nbChannels )
+ {
+ ssrcParams.SSRC_NrOfChannels = LVM_MONO;
+ }
+ else
+ {
+ ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
+ }
+
+ /*FlB 26.02.2009: add mp3 as output format*/
+ if( pC->AudioEncParams.Format == M4ENCODER_kAAC
+ || pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+ {
+ switch( pC->AudioEncParams.Frequency )
+ {
+ case M4ENCODER_k8000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+ break;
+
+ case M4ENCODER_k11025Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_11025;
+ break;
+
+ case M4ENCODER_k12000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_12000;
+ break;
+
+ case M4ENCODER_k16000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
+ break;
+
+ case M4ENCODER_k22050Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
+ break;
+
+ case M4ENCODER_k24000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
+ break;
+
+ case M4ENCODER_k32000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
+ break;
+
+ case M4ENCODER_k44100Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
+ break;
+
+ case M4ENCODER_k48000Hz:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioProcessing: invalid output AAC sampling frequency \
+ (%d Hz), returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
+ pC->AudioEncParams.Frequency);
+ return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
+ break;
+ }
+ }
+ else
+ {
+ ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+ }
+
+
+
+ ReturnStatus = 0;
+
+ switch( ssrcParams.SSRC_Fs_In )
+ {
+ case LVM_FS_8000:
+ ssrcParams.NrSamplesIn = 320;
+ break;
+
+ case LVM_FS_11025:
+ ssrcParams.NrSamplesIn = 441;
+ break;
+
+ case LVM_FS_12000:
+ ssrcParams.NrSamplesIn = 480;
+ break;
+
+ case LVM_FS_16000:
+ ssrcParams.NrSamplesIn = 640;
+ break;
+
+ case LVM_FS_22050:
+ ssrcParams.NrSamplesIn = 882;
+ break;
+
+ case LVM_FS_24000:
+ ssrcParams.NrSamplesIn = 960;
+ break;
+
+ case LVM_FS_32000:
+ ssrcParams.NrSamplesIn = 1280;
+ break;
+
+ case LVM_FS_44100:
+ ssrcParams.NrSamplesIn = 1764;
+ break;
+
+ case LVM_FS_48000:
+ ssrcParams.NrSamplesIn = 1920;
+ break;
+
+ default:
+ ReturnStatus = -1;
+ break;
+ }
+
+ switch( ssrcParams.SSRC_Fs_Out )
+ {
+ case LVM_FS_8000:
+ ssrcParams.NrSamplesOut = 320;
+ break;
+
+ case LVM_FS_11025:
+ ssrcParams.NrSamplesOut = 441;
+ break;
+
+ case LVM_FS_12000:
+ ssrcParams.NrSamplesOut = 480;
+ break;
+
+ case LVM_FS_16000:
+ ssrcParams.NrSamplesOut = 640;
+ break;
+
+ case LVM_FS_22050:
+ ssrcParams.NrSamplesOut = 882;
+ break;
+
+ case LVM_FS_24000:
+ ssrcParams.NrSamplesOut = 960;
+ break;
+
+ case LVM_FS_32000:
+ ssrcParams.NrSamplesOut = 1280;
+ break;
+
+ case LVM_FS_44100:
+ ssrcParams.NrSamplesOut = 1764;
+ break;
+
+ case LVM_FS_48000:
+ ssrcParams.NrSamplesOut = 1920;
+ break;
+
+ default:
+ ReturnStatus = -1;
+ break;
+ }
+
+
+
+ if( ReturnStatus != SSRC_OK )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioProcessing:\
+ Error code %d returned by the SSRC_GetNrSamples function",
+ ReturnStatus);
+ return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+ }
+
+ NrSamplesMin =
+ (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
+ ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
+
+ while( NrSamplesMin < M4MCS_SSRC_MINBLOCKSIZE )
+ { /* Don't take blocks smaller that the minimal block size */
+ ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
+ ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
+ NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
+ }
+
+
+ pC->iSsrcNbSamplIn = (LVM_INT16)(
+ ssrcParams.
+ NrSamplesIn); /* multiplication by NrOfChannels is done below */
+ pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
+
+ /**
+ * Allocate buffer for the input of the SSRC */
+ pC->pSsrcBufferIn =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->iSsrcNbSamplIn * sizeof(short) \
+ *pC->pReaderAudioStream->m_nbChannels, M4MCS,
+ (M4OSA_Char *)"pSsrcBufferIn");
+
+ if( M4OSA_NULL == pC->pSsrcBufferIn )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoDecoder():\
+ unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+ /**
+ * Allocate buffer for the output of the SSRC */
+ pC->pSsrcBufferOut =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->iSsrcNbSamplOut * sizeof(short) \
+ *pC->pReaderAudioStream->m_nbChannels, M4MCS,
+ (M4OSA_Char *)"pSsrcBufferOut");
+
+ if( M4OSA_NULL == pC->pSsrcBufferOut )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareVideoDecoder():\
+ unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+
+ pC->pLVAudioResampler = LVAudioResamplerCreate(
+ 16, /*gInputParams.lvBTChannelCount*/
+ (M4OSA_Int16)pC->InputFileProperties.uiNbChannels/*ssrcParams.SSRC_NrOfChannels*/,
+ (M4OSA_Int32)(pC->AudioEncParams.Frequency)/*ssrcParams.SSRC_Fs_Out*/, 1);
+
+ if( M4OSA_NULL == pC->pLVAudioResampler)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ LVAudiosetSampleRate(pC->pLVAudioResampler,
+ /*gInputParams.lvInSampleRate*/
+ /*pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency*/
+ pC->InputFileProperties.uiSamplingFrequency/*ssrcParams.SSRC_Fs_In*/);
+
+ LVAudiosetVolume(pC->pLVAudioResampler, (M4OSA_Int16)(0x1000 /* 0x7fff */),
+ (M4OSA_Int16)(0x1000/*0x7fff*/));
+
+
+ /* ________________________ */
+ /*| |*/
+ /*| Init the audio encoder |*/
+ /*|________________________|*/
+
+ /* Initialise the audio encoder */
+
+ err = pC->pAudioEncoderGlobalFcts->pFctInit(&pC->pAudioEncCtxt,
+ pC->pCurrentAudioEncoderUserData);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* Open the audio encoder */
+ err = pC->pAudioEncoderGlobalFcts->pFctOpen(pC->pAudioEncCtxt,
+ &pC->AudioEncParams, &pC->pAudioEncDSI,
+ M4OSA_NULL /* no grabbing */);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* Allocate the input buffer for the audio encoder */
+ switch( pC->AudioEncParams.Format )
+ {
+ case M4ENCODER_kAMRNB:
+ pC->audioEncoderGranularity = M4MCS_PCM_AMR_GRANULARITY_SAMPLES;
+ break;
+
+ case M4ENCODER_kAAC:
+ pC->audioEncoderGranularity = M4MCS_PCM_AAC_GRANULARITY_SAMPLES;
+ break;
+
+ /*FlB 26.02.2009: add mp3 as output format*/
+ case M4ENCODER_kMP3:
+ pC->audioEncoderGranularity = M4MCS_PCM_MP3_GRANULARITY_SAMPLES;
+ break;
+
+ default:
+ break;
+ }
+
+ if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
+ pC->audioEncoderGranularity *= sizeof(short);
+ else
+ pC->audioEncoderGranularity *= sizeof(short) * 2;
+
+ pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+ pC->pAudioEncoderBuffer =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->audioEncoderGranularity, M4MCS,
+ (M4OSA_Char *)"pC->pAudioEncoderBuffer");
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intPrepareAudioProcessing(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareWriter(M4MCS_InternalContext* pC);
+ * @brief Prepare the writer.
+ * @param pC (IN) MCS private context
+ * @return M4NO_ERROR No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 uiVersion; /**< To write component version in 3gp writer */
+ M4OSA_MemAddr8 pDSI = M4OSA_NULL; /**< To create the Decoder Specific Info */
+ M4SYS_StreamIDValue optionValue; /**< For the setoption calls */
+ M4OSA_UInt32 TargetedFileSize;
+ M4OSA_Bool bMULPPSSPS = M4OSA_FALSE;
+
+ /**
+ * Init the writer */
+ err = pC->pWriterGlobalFcts->pFctOpen(&pC->pWriterContext, pC->pOutputFile,
+ pC->pOsaFileWritPtr, pC->pTemporaryFile, pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Link to the writer context in the writer interface */
+ pC->pWriterDataFcts->pWriterContext = pC->pWriterContext;
+
+ /**
+ * Set the product description string in the written file */
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : MCS ");
+
+ if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the product version in the written file */
+ uiVersion =
+ M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
+ + M4VIDEOEDITING_VERSION_REVISION;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
+
+ if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: \
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * If there is a video input, allocate and fill the video stream structures for the writer */
+ if( pC->novideo == M4OSA_FALSE )
+ {
+ /**
+ * Fill Video properties structure for the AddStream method */
+ pC->WriterVideoStreamInfo.height = pC->EncodingHeight;
+ pC->WriterVideoStreamInfo.width = pC->EncodingWidth;
+ pC->WriterVideoStreamInfo.fps =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterVideoStreamInfo.Header.pBuf =
+ M4OSA_NULL; /**< Will be updated later */
+ pC->WriterVideoStreamInfo.Header.Size = 0; /**< Will be updated later */
+
+ /**
+ * Fill Video stream description structure for the AddStream method */
+ switch( pC->EncodingVideoFormat )
+ {
+ case M4ENCODER_kMPEG4:
+ pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
+ break;
+
+ case M4ENCODER_kH263:
+ pC->WriterVideoStream.streamType = M4SYS_kH263;
+ break;
+
+ case M4ENCODER_kH264:
+ pC->WriterVideoStream.streamType = M4SYS_kH264;
+ break;
+
+ case M4ENCODER_kNULL:
+ switch( pC->InputFileProperties.VideoStreamType )
+ {
+ case M4VIDEOEDITING_kMPEG4:
+ pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
+ break;
+
+ case M4VIDEOEDITING_kH263:
+ pC->WriterVideoStream.streamType = M4SYS_kH263;
+ break;
+
+ case M4VIDEOEDITING_kH264:
+ pC->WriterVideoStream.streamType = M4SYS_kH264;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: case input=M4ENCODER_kNULL, \
+ unknown format (0x%x),\
+ returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pC->EncodingVideoFormat);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+ break;
+
+ default: /**< It should never happen, already tested */
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: unknown format (0x%x),\
+ returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pC->EncodingVideoFormat);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+
+ /**
+ * Video bitrate value will be the real value */
+ pC->WriterVideoStream.averageBitrate =
+ (M4OSA_Int32)pC->uiEncVideoBitrate;
+ pC->WriterVideoStream.maxBitrate = (M4OSA_Int32)pC->uiEncVideoBitrate;
+
+ /**
+ * most other parameters are "dummy" */
+ pC->WriterVideoStream.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+ pC->WriterVideoStream.timeScale =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterVideoStream.profileLevel =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterVideoStream.duration =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterVideoStream.decoderSpecificInfoSize =
+ sizeof(M4WRITER_StreamVideoInfos);
+ pC->WriterVideoStream.decoderSpecificInfo =
+ (M4OSA_MemAddr32) &(pC->WriterVideoStreamInfo);
+
+ /**
+ * Update Encoder Header properties for Video stream if needed */
+ if( M4ENCODER_kH263 == pC->EncodingVideoFormat )
+ {
+ /**
+ * Creates the H263 DSI */
+ pC->WriterVideoStreamInfo.Header.Size =
+ 7; /**< H263 output DSI is always 7 bytes */
+ pDSI = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(7, M4MCS, (M4OSA_Char
+ *)"pC->WriterVideoStreamInfo.Header.pBuf (DSI H263)");
+
+ if( M4OSA_NULL == pDSI )
+ {
+ M4OSA_TRACE1_0("M4MCS_intPrepareWriter(): unable to allocate pDSI (H263),\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Vendor is NXP Software: N, X, P, S. */
+ pDSI[0] = 'N';
+ pDSI[1] = 'X';
+ pDSI[2] = 'P';
+ pDSI[3] = 'S';
+
+ /**
+ * Decoder version is 0 */
+ pDSI[4] = 0;
+
+ /**
+ * Level is the sixth byte of the DSI. */
+ switch( pC->EncodingWidth )
+ {
+ case M4ENCODER_SQCIF_Width:
+ case M4ENCODER_QCIF_Width:
+ if( ( pC->uiEncVideoBitrate <= M4ENCODER_k64_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+ {
+ pDSI[5] = 10;
+ }
+ else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+ {
+ pDSI[5] = 45;
+ }
+ else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+ {
+ pDSI[5] = 20;
+ }
+ else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+ {
+ pDSI[5] = 30;
+ }
+ else if( ( pC->uiEncVideoBitrate
+ <= M4ENCODER_k800_KBPS/*2048*/)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+ {
+ pDSI[5] = 40;
+ }
+ break;
+
+ case M4ENCODER_CIF_Width:
+ if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+ {
+ pDSI[5] = 20;
+ }
+ else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+ {
+ pDSI[5] = 30;
+ }
+ else if( ( pC->uiEncVideoBitrate
+ <= M4ENCODER_k800_KBPS/*2048*/)
+ && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+ {
+ pDSI[5] = 40;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /**
+ * Profile is the seventh byte of the DSI. */
+ pDSI[6] = 0;
+
+ pC->WriterVideoStreamInfo.Header.pBuf = pDSI;
+ }
+ else if( M4ENCODER_kNULL == pC->EncodingVideoFormat )
+ {
+ /* If we copy the stream from the input, we copy its DSI */
+
+ pC->WriterVideoStreamInfo.Header.Size = pC->pReaderVideoStream->
+ m_basicProperties.m_decoderSpecificInfoSize;
+ pC->WriterVideoStreamInfo.Header.pBuf =
+ (M4OSA_MemAddr8)pC->pReaderVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo;
+
+ }
+ /* otherwise (MPEG4), the DSI will be recovered from the encoder later on. */
+
+ /*+CRLV6775 - H.264 Trimming */
+ if( pC->bH264Trim == M4OSA_TRUE )
+ {
+ bMULPPSSPS = M4OSA_TRUE;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMUL_PPS_SPS,
+ (M4OSA_DataOption) &bMULPPSSPS);
+
+ if( ( M4NO_ERROR != err)
+ && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMUL_PPS_SPS) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ /*-CRLV6775 - H.264 Trimming */
+ /**
+ * Add the video stream */
+ err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
+ &pC->WriterVideoStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Update AU properties for video stream */
+ pC->WriterVideoAU.stream = &(pC->WriterVideoStream);
+ pC->WriterVideoAU.dataAddress = M4OSA_NULL;
+ pC->WriterVideoAU.size = 0;
+ pC->WriterVideoAU.CTS = 0; /** Reset time */
+ pC->WriterVideoAU.DTS = 0;
+ pC->WriterVideoAU.attribute = AU_RAP;
+ pC->WriterVideoAU.nbFrag = 0; /** No fragment */
+ pC->WriterVideoAU.frag = M4OSA_NULL;
+
+ /**
+ * Set the writer max video AU size */
+ optionValue.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+ optionValue.value = pC->uiVideoMaxAuSize;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+ (M4OSA_DataOption) &optionValue);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: \
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the writer max video chunk size */
+ optionValue.value = pC->uiVideoMaxChunckSize;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+ (M4OSA_DataOption) &optionValue);
+
+ if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * If there is an audio input, allocate and fill the audio stream structures for the writer */
+ if( pC->noaudio == M4OSA_FALSE )
+ {
+ M4WRITER_StreamAudioInfos streamAudioInfo;
+
+ streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
+ streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
+ streamAudioInfo.nbChannels = 1; /**< unused by our shell writer */
+
+ pC->WriterAudioStream.averageBitrate =
+ 0; /**< It is not used by the shell, the DSI is taken into account instead */
+ pC->WriterAudioStream.maxBitrate =
+ 0; /**< Not used by the shell/core writer */
+
+ /**
+ * Fill Audio stream description structure for the AddStream method */
+ switch( pC->AudioEncParams.Format )
+ {
+ case M4ENCODER_kAMRNB:
+ pC->WriterAudioStream.streamType = M4SYS_kAMR;
+ break;
+
+ case M4ENCODER_kAAC:
+ pC->WriterAudioStream.streamType = M4SYS_kAAC;
+ pC->WriterAudioStream.averageBitrate =
+ pC->AudioEncParams.Bitrate;
+ pC->WriterAudioStream.maxBitrate = pC->AudioEncParams.Bitrate;
+ break;
+
+ /*FlB 26.02.2009: add mp3 as output format*/
+ case M4ENCODER_kMP3:
+ pC->WriterAudioStream.streamType = M4SYS_kMP3;
+ break;
+
+ case M4ENCODER_kAudioNULL:
+ switch( pC->InputFileProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pC->WriterAudioStream.streamType = M4SYS_kAMR;
+ break;
+ /*FlB 26.02.2009: add mp3 as output format*/
+ case M4VIDEOEDITING_kMP3:
+ pC->WriterAudioStream.streamType = M4SYS_kMP3;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ pC->WriterAudioStream.streamType = M4SYS_kAAC;
+ pC->WriterAudioStream.averageBitrate =
+ pC->AudioEncParams.Bitrate;
+ pC->WriterAudioStream.maxBitrate =
+ pC->AudioEncParams.Bitrate;
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ pC->WriterAudioStream.streamType = M4SYS_kEVRC;
+ break;
+
+ case M4VIDEOEDITING_kNoneAudio:
+ case M4VIDEOEDITING_kPCM:
+ case M4VIDEOEDITING_kNullAudio:
+ case M4VIDEOEDITING_kUnsupportedAudio:
+ break;
+ }
+ break;
+
+ default: /**< It should never happen, already tested */
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: \
+ unknown format (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+ pC->AudioEncParams.Format);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+ }
+
+ /**
+ * MCS produces only AMR-NB output */
+ pC->WriterAudioStream.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
+ pC->WriterAudioStream.duration =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterAudioStream.profileLevel =
+ 0; /**< Not used by the shell/core writer */
+ pC->WriterAudioStream.timeScale = pC->AudioEncParams.Frequency;
+
+ if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+ {
+ /* If we copy the stream from the input, we copy its DSI */
+ streamAudioInfo.Header.Size = pC->pReaderAudioStream->
+ m_basicProperties.m_decoderSpecificInfoSize;
+ streamAudioInfo.Header.pBuf =
+ (M4OSA_MemAddr8)pC->pReaderAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo;
+ }
+ else
+ {
+ if( pC->pAudioEncDSI.pInfo != M4OSA_NULL )
+ {
+ /* Use the DSI given by the encoder open() */
+ streamAudioInfo.Header.Size = pC->pAudioEncDSI.infoSize;
+ streamAudioInfo.Header.pBuf = pC->pAudioEncDSI.pInfo;
+ }
+ else
+ {
+ /* Writer will put a default Philips DSI */
+ streamAudioInfo.Header.Size = 0;
+ streamAudioInfo.Header.pBuf = M4OSA_NULL;
+ }
+ }
+
+ /**
+ * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
+ in the DSI pointer... */
+ pC->WriterAudioStream.decoderSpecificInfo =
+ (M4OSA_MemAddr32) &streamAudioInfo;
+
+ /**
+ * Add the audio stream to the writer */
+ err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
+ &pC->WriterAudioStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Link the AU and the stream */
+ pC->WriterAudioAU.stream = &(pC->WriterAudioStream);
+ pC->WriterAudioAU.dataAddress = M4OSA_NULL;
+ pC->WriterAudioAU.size = 0;
+ pC->WriterAudioAU.CTS = 0; /** Reset time */
+ pC->WriterAudioAU.DTS = 0;
+ pC->WriterAudioAU.attribute = 0;
+ pC->WriterAudioAU.nbFrag = 0; /** No fragment */
+ pC->WriterAudioAU.frag = M4OSA_NULL;
+
+ /**
+ * Set the writer audio max AU size */
+ /* As max bitrate is now 320kbps instead of 128kbps, max AU
+ * size has to be increased adapt the max AU size according to the stream type and the
+ * channels numbers*/
+ /* After tests, a margin of 3 is taken (2 was not enough and raises to memory overwrite)
+ */
+ //pC->uiAudioMaxAuSize = M4MCS_AUDIO_MAX_AU_SIZE;
+ switch( pC->WriterAudioStream.streamType )
+ {
+ case M4SYS_kAMR:
+ pC->uiAudioMaxAuSize = M4MCS_PCM_AMR_GRANULARITY_SAMPLES
+ * (( pC->InputFileProperties.uiNbChannels
+ * sizeof(short)) + 3);
+ break;
+
+ case M4SYS_kMP3:
+ pC->uiAudioMaxAuSize = M4MCS_PCM_MP3_GRANULARITY_SAMPLES
+ * (( pC->InputFileProperties.uiNbChannels
+ * sizeof(short)) + 3);
+ break;
+
+ case M4SYS_kAAC:
+ pC->uiAudioMaxAuSize = M4MCS_PCM_AAC_GRANULARITY_SAMPLES
+ * (( pC->InputFileProperties.uiNbChannels
+ * sizeof(short)) + 3);
+ break;
+ /*case M4SYS_kEVRC:
+ pC->uiAudioMaxAuSize = M4MCS_PCM_EVRC_GRANULARITY_SAMPLES*
+ ((pC->InputFileProperties.uiNbChannels * sizeof(short))+3);
+ break;*/
+ default: /**< It should never happen, already tested */
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: unknown format (0x%x),\
+ returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+ pC->WriterAudioStream.streamType);
+ return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+ }
+
+ optionValue.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
+ optionValue.value = pC->uiAudioMaxAuSize;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+ (M4OSA_DataOption) &optionValue);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
+ M4WRITER_kMaxAUSize) returns 0x%x",
+ err);
+ return err;
+ }
+
+ optionValue.value = M4MCS_AUDIO_MAX_CHUNK_SIZE;
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+ (M4OSA_DataOption) &optionValue);
+
+ if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
+ M4WRITER_kMaxChunckSize) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /*
+ * Set the limitation size of the writer */
+ TargetedFileSize = pC->uiMaxFileSize;
+ /* add 1 kB margin */
+ if( TargetedFileSize > 8192 )
+ TargetedFileSize -= 1024;
+
+ err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxFileSize,
+ (M4OSA_DataOption) &TargetedFileSize);
+
+ if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption\
+ (M4WRITER_kMaxFileSize) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Close the stream registering in order to be ready to write data */
+ err = pC->pWriterGlobalFcts->pFctStartWriting(pC->pWriterContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctStartWriting returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intPrepareWriter(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareAudioBeginCut(M4MCS_InternalContext* pC);
+ * @brief DO the audio begin cut.
+ * @param pC (IN) MCS private context
+ * @return M4NO_ERROR No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareAudioBeginCut( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_Int32 iCts;
+ M4OSA_UInt32 uiFrameSize;
+
+ if( pC->noaudio )
+ return M4NO_ERROR;
+
+ /**
+ * Check if an audio begin cut is needed */
+ if( ( M4OSA_NULL == pC->pReaderAudioStream) || (0 == pC->uiBeginCutTime) )
+ {
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR (a)");
+ return M4NO_ERROR;
+ }
+
+ /**
+ * Jump at the begin cut time */
+ iCts = pC->uiBeginCutTime;
+ err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream, &iCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioBeginCut: m_pFctJump(Audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Remember audio begin cut offset */
+ pC->iAudioCtsOffset = iCts;
+
+ /**
+ * AMR-NB & EVRC: there may be many frames per AU.
+ * In that case we need to slice the first AU to keep the 20 ms cut precision */
+ if( ( M4DA_StreamTypeAudioAmrNarrowBand
+ == pC->pReaderAudioStream->m_basicProperties.m_streamType)
+ || (M4DA_StreamTypeAudioEvrc
+ == pC->pReaderAudioStream->m_basicProperties.m_streamType) )
+ {
+ /**
+ * If the next frame CTS is lower than the begin cut time,
+ * we must read the AU and parse its frames to reach the
+ * nearest to the begin cut */
+ if( ( iCts + 20) < (M4OSA_Int32)pC->uiBeginCutTime )
+ {
+ /**
+ * Read the first audio AU after the jump */
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(audio)\
+ returns M4WAR_NO_MORE_AU! Returning M4NO_ERROR");
+ return
+ M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(Audio)\
+ returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * While the next AU has a lower CTS than the begin cut time, we advance to
+ the next frame */
+ while( ( iCts + 20) <= (M4OSA_Int32)pC->uiBeginCutTime )
+ {
+ /**
+ * Get the size of the frame */
+ switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+ uiFrameSize = M4MCS_intGetFrameSize_AMRNB(
+ pC->ReaderAudioAU.m_dataAddress);
+ break;
+
+ case M4DA_StreamTypeAudioEvrc:
+ uiFrameSize = M4MCS_intGetFrameSize_EVRC(
+ pC->ReaderAudioAU.m_dataAddress);
+ break;
+
+ default:
+ uiFrameSize = 0;
+ break;
+ }
+
+ if( 0 == uiFrameSize )
+ {
+ /**
+ * Corrupted frame! We get out of this mess!
+ * We don't want to crash here... */
+ M4OSA_TRACE1_0(
+ "M4MCS_intPrepareAudioBeginCut(): \
+ M4MCS_intGetFrameSize_xxx returns 0! Returning M4NO_ERROR");
+ return
+ M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
+ }
+
+ /**
+ * Go to the next frame */
+ pC->ReaderAudioAU.m_dataAddress += uiFrameSize;
+ pC->ReaderAudioAU.m_size -= uiFrameSize;
+
+ /**
+ * Get the CTS of the next frame */
+ iCts += 20; /**< AMR, EVRC frame duration is always 20 ms */
+ pC->ReaderAudioAU.m_CTS = iCts;
+ pC->ReaderAudioAU.m_DTS = iCts;
+ }
+
+ /**
+ * Update the audio begin cut offset */
+ pC->iAudioCtsOffset = iCts;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepEncoding(M4MCS_InternalContext* pC, M4OSA_UInt8* pProgress)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepEncoding( M4MCS_InternalContext *pC,
+ M4OSA_UInt8 *pProgress )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 uiAudioStepCount = 0;
+
+ /* ---------- VIDEO TRANSCODING ---------- */
+
+ if( ( pC->novideo == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
+ == pC->VideoState) ) /**< If the video encoding is going on */
+ {
+ if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+ {
+ err = M4MCS_intVideoNullEncoding(pC);
+ }
+ else
+ {
+ err = M4MCS_intVideoTranscoding(pC);
+ }
+
+ /**
+ * No more space, quit properly */
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
+ - pC->uiBeginCutTime) * 100)
+ / (pC->uiEndCutTime - pC->uiBeginCutTime));
+
+ pC->State = M4MCS_kState_FINISHED;
+
+ /* bad file produced on very short 3gp file */
+ if( pC->dViDecCurrentCts - pC->uiBeginCutTime == 0 )
+ {
+ /* Nothing has been encoded -> bad produced file -> error returned */
+ M4OSA_TRACE2_0(
+ "M4MCS_intStepEncoding(): video transcoding returns\
+ M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
+ return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
+ }
+ else
+ {
+#ifndef M4MCS_AUDIOONLY
+ /* clean AIR context needed to keep media aspect ratio*/
+
+ if( M4OSA_NULL != pC->m_air_context )
+ {
+ err = M4AIR_cleanUp(pC->m_air_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+ err);
+ return err;
+ }
+ pC->m_air_context = M4OSA_NULL;
+ }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+ M4OSA_TRACE2_0(
+ "M4MCS_intStepEncoding(): video transcoding returns M4MCS_ERR_NOMORE_SPACE");
+ return M4MCS_ERR_NOMORE_SPACE;
+ }
+ }
+
+ /**< The input plane is null because the input image will be obtained by the
+ VPP filter from the context */
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepEncoding(): video transcoding returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /* ---------- AUDIO TRANSCODING ---------- */
+
+ if( ( pC->noaudio == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
+ == pC->AudioState) ) /**< If there is an audio stream */
+ {
+ while(
+ /**< If the video encoding is running, encode audio until we reach video time */
+ ( ( pC->novideo == M4OSA_FALSE)
+ && (M4MCS_kStreamState_STARTED == pC->VideoState)
+ && (pC->ReaderAudioAU.m_CTS
+ + pC->m_audioAUDuration < pC->ReaderVideoAU.m_CTS)) ||
+ /**< If the video encoding is not running, perform 1 step of audio encoding */
+ (( M4MCS_kStreamState_STARTED == pC->AudioState)
+ && (uiAudioStepCount < 1)) )
+ {
+ uiAudioStepCount++;
+
+ /**< check if an adio effect has to be applied*/
+ err = M4MCS_intCheckAudioEffects(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepEncoding(): M4MCS_intCheckAudioEffects returns err: 0x%x",
+ err);
+ return err;
+ }
+
+ if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+ {
+ err = M4MCS_intAudioNullEncoding(pC);
+ }
+ else /**< Audio transcoding */
+ {
+ err = M4MCS_intAudioTranscoding(pC);
+ }
+
+ /**
+ * No more space, quit properly */
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ *pProgress =
+ (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+ - pC->uiBeginCutTime) * 100)
+ / (pC->uiEndCutTime - pC->uiBeginCutTime));
+
+ pC->State = M4MCS_kState_FINISHED;
+
+ /* bad file produced on very short 3gp file */
+ if( pC->ReaderAudioAU.m_CTS - pC->uiBeginCutTime == 0 )
+ {
+ /* Nothing has been encoded -> bad produced file -> error returned */
+ M4OSA_TRACE2_0(
+ "M4MCS_intStepEncoding():\
+ audio transcoding returns M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
+ return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
+ }
+ else
+ {
+#ifndef M4MCS_AUDIOONLY
+ /* clean AIR context needed to keep media aspect ratio*/
+
+ if( M4OSA_NULL != pC->m_air_context )
+ {
+ err = M4AIR_cleanUp(pC->m_air_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+ err);
+ return err;
+ }
+ pC->m_air_context = M4OSA_NULL;
+ }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+ M4OSA_TRACE2_0(
+ "M4MCS_intStepEncoding(): \
+ audio transcoding returns M4MCS_ERR_NOMORE_SPACE");
+ return M4MCS_ERR_NOMORE_SPACE;
+ }
+ }
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ pC->AudioState = M4MCS_kStreamState_FINISHED;
+ M4OSA_TRACE3_0(
+ "M4MCS_intStepEncoding(): audio transcoding returns M4WAR_NO_MORE_AU");
+ break;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepEncoding(): audio transcoding returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check for end cut */
+ /* We absolutely want to have less or same audio duration as video ->
+ (2*pC->m_audioAUDuration) */
+ if( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+ + (2 *pC->m_audioAUDuration) > pC->uiEndCutTime )
+ {
+ pC->AudioState = M4MCS_kStreamState_FINISHED;
+ break;
+ }
+ }
+ }
+
+ /* ---------- PROGRESS MANAGEMENT ---------- */
+
+ /**
+ * Compute progress */
+ if( pC->novideo )
+ {
+ if( pC->ReaderAudioAU.m_CTS < pC->uiBeginCutTime )
+ {
+ *pProgress = 0;
+ }
+ else
+ {
+ *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+ - pC->uiBeginCutTime) * 100)
+ / (pC->uiEndCutTime - pC->uiBeginCutTime));
+ }
+ //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->ReaderAudioAU.m_CTS);
+
+ }
+ else
+ {
+ if( pC->dViDecCurrentCts < pC->uiBeginCutTime )
+ {
+ *pProgress = 0;
+ }
+ else
+ {
+ *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
+ - pC->uiBeginCutTime) * 100)
+ / (pC->uiEndCutTime - pC->uiBeginCutTime));
+ }
+ //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->dViDecCurrentCts);
+ }
+
+ /**
+ * Sanity check */
+ if( *pProgress > 99 )
+ {
+ *pProgress = 99;
+ }
+
+ /**
+ * Increment CTS for next step */
+ if( pC->novideo == M4OSA_FALSE )
+ {
+ if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+ {
+ pC->dViDecCurrentCts += 1;
+ }
+ else
+ {
+ pC->dViDecCurrentCts += pC->dCtsIncrement;
+ }
+ }
+
+ /**
+ * The transcoding is finished when no stream is being encoded anymore */
+ if( ( ( pC->novideo) || (M4MCS_kStreamState_FINISHED == pC->VideoState))
+ && (( pC->noaudio) || (M4MCS_kStreamState_FINISHED == pC->AudioState)) )
+ {
+ /* the AIR part can only be used when video codecs are compiled*/
+#ifndef M4MCS_AUDIOONLY
+ /* clean AIR context needed to keep media aspect ratio*/
+
+ if( M4OSA_NULL != pC->m_air_context )
+ {
+ err = M4AIR_cleanUp(pC->m_air_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+ err);
+ return err;
+ }
+ pC->m_air_context = M4OSA_NULL;
+ }
+
+#endif /*M4MCS_AUDIOONLY*/
+ /**/
+
+ *pProgress = 100;
+ pC->State = M4MCS_kState_FINISHED;
+ M4OSA_TRACE2_0(
+ "M4MCS_intStepEncoding(): transcoding finished, returning M4MCS_WAR_TRANSCODING_DONE");
+ return M4MCS_WAR_TRANSCODING_DONE;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intStepEncoding(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepBeginVideoJump(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepBeginVideoJump( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_Int32 iCts;
+
+ if( pC->novideo )
+ {
+ pC->State = M4MCS_kState_BEGINVIDEODECODE;
+ return M4NO_ERROR;
+ }
+
+ /**
+ * Jump to the previous RAP in the clip (first get the time, then jump) */
+ iCts = (M4OSA_Int32)pC->dViDecStartingCts;
+ err = pC->m_pReader->m_pFctGetPrevRapTime(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
+
+ if( M4WAR_READER_INFORMATION_NOT_PRESENT == err )
+ {
+ /* No RAP table, jump backward and predecode */
+ iCts = (M4OSA_Int32)pC->dViDecStartingCts - M4MCS_NO_STSS_JUMP_POINT;
+
+ if( iCts < 0 )
+ iCts = 0;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepBeginVideoJump: m_pFctGetPrevRapTime returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /* + CRLV6775 -H.264 Trimming */
+
+ if( M4OSA_TRUE == pC->bH264Trim )
+ {
+
+ // Save jump time for safety, this fix should be generic
+
+ M4OSA_Int32 iCtsOri = iCts;
+
+
+ err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepBeginVideoJump: m_pFctJump(V) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ if( pC->ReaderVideoAU1.m_structSize == 0 )
+ {
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+ err);
+ return err;
+ }
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU1);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE2_0(
+ "M4MCS_intVideoNullEncoding(): \
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+ /* The audio transcoding is finished */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->ReaderVideoAU1.m_structSize = 0;
+ }
+
+ err = H264MCS_ProcessSPS_PPS(pC->m_pInstance,
+ (M4OSA_UInt8 *)pC->ReaderVideoAU1.m_dataAddress, pC->ReaderVideoAU1.m_size);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepBeginVideoJump: H264MCS_ProcessSPS_PPS returns 0x%x!",
+ err);
+ return err;
+ }
+
+
+ // Restore jump time for safety, this fix should be generic
+
+ iCts = iCtsOri;
+
+
+ }
+ /* - CRLV6775 -H.264 Trimming */
+
+ /**
+ * Decode one step */
+ pC->dViDecCurrentCts = (M4OSA_Double)(iCts + pC->iVideoBeginDecIncr);
+
+ /**
+ * Be sure we don't decode too far */
+ if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+ {
+ pC->dViDecCurrentCts = pC->dViDecStartingCts;
+ }
+
+ /**
+ * Decode at least once with the bJump flag to true */
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f",
+ pC->dViDecCurrentCts);
+ pC->isRenderDup = M4OSA_FALSE;
+ err =
+ pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &pC->dViDecCurrentCts,
+ M4OSA_TRUE, 0);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+ && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepBeginVideoJump: m_pFctDecode returns 0x%x!", err);
+ return err;
+ }
+
+ if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+ {
+ M4OSA_TRACE2_0("Decoding output the same frame as before 1");
+ pC->isRenderDup = M4OSA_TRUE;
+ }
+
+ /**
+ * Increment decoding cts for the next step */
+ pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
+
+ /**
+ * Update state automaton */
+ if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+ {
+ /**
+ * Be sure we don't decode too far */
+ pC->dViDecCurrentCts = pC->dViDecStartingCts;
+ pC->State = M4MCS_kState_PROCESSING;
+ }
+ else
+ {
+ pC->State = M4MCS_kState_BEGINVIDEODECODE;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intStepBeginVideoJump(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepBeginVideoDecode(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepBeginVideoDecode( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+ M4_MediaTime dDecTarget;
+
+ if( pC->novideo )
+ {
+ pC->State = M4MCS_kState_PROCESSING;
+ return M4NO_ERROR;
+ }
+
+ /**
+ * Decode */
+ dDecTarget = pC->dViDecCurrentCts;
+ M4OSA_TRACE3_1("M4MCS_intStepBeginDecode: Decoding upTo CTS %.3f",
+ pC->dViDecCurrentCts);
+ pC->isRenderDup = M4OSA_FALSE;
+ err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &dDecTarget,
+ M4OSA_FALSE, 0);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+ && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intStepBeginVideoDecode: m_pFctDecode returns 0x%x!", err);
+ return err;
+ }
+
+ if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+ {
+ M4OSA_TRACE2_0("Decoding output the same frame as before 2");
+ pC->isRenderDup = M4OSA_TRUE;
+ }
+
+ /**
+ * Increment decoding cts for the next step */
+ pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
+
+ /**
+ * Update state automaton, if needed */
+ if( ( (M4OSA_UInt32)pC->dViDecCurrentCts > pC->dViDecStartingCts)
+ || (M4WAR_NO_MORE_AU == err) )
+ {
+ /**
+ * Be sure we don't decode too far */
+ pC->dViDecCurrentCts = (M4OSA_Double)pC->dViDecStartingCts;
+ pC->State = M4MCS_kState_PROCESSING;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intStepBeginVideoDecode(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/*****************************/
+/* define AMR silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+ {
+ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00
+ };
+#else
+
+extern
+const
+M4OSA_UInt8
+M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+
+#endif
+
+/*****************************/
+/* define AAC silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE 4
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[
+ M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
+ {
+ 0x00, 0xC8, 0x20, 0x07
+ };
+#else
+
+extern const M4OSA_UInt8
+M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
+
+#endif
+
+#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE 6
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
+ {
+ 0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
+ };
+#else
+
+extern const
+M4OSA_UInt8
+M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
+
+#endif
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intAudioNullEncoding(M4MCS_InternalContext* pC)
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err;
+
+ if( pC->noaudio )
+ return M4NO_ERROR;
+
+ /* Check if all audio frame has been written (happens at begin cut) */
+ if( pC->ReaderAudioAU.m_size == 0 )
+ {
+ /**
+ * Initializes a new AU if needed */
+ if( pC->ReaderAudioAU1.m_structSize == 0 )
+ {
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->m_pDataAddress1 =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderAudioAU1.m_maxsize,
+ M4MCS, (M4OSA_Char *)"Temporary AU1 buffer");
+
+ if( pC->m_pDataAddress1 == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intAudioNullEncoding(): allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU1);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE2_0(
+ "M4MCS_intAudioNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+ /* The audio transcoding is finished */
+ pC->AudioState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioNullEncoding(): \
+ m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+ /*FB 2009.04.02: PR surnxp#616: Crash in MCS while Audio AU copying ,
+ constant memory reader case*/
+ if( pC->ReaderAudioAU1.m_maxsize
+ > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
+ /* pC->m_pDataAddress1 and
+ pC->m_pDataAddress2 must be reallocated at the same time */
+ /* because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
+ maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
+ pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true */
+ /* and the size of the second buffer is never changed. */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
+ /* pC->m_pDataAddress1 and
+ pC->m_pDataAddress2 must be reallocated at the same time */
+ /* Update stream properties */
+ pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderAudioAU1.m_maxsize;
+ }
+ /**/
+ memcpy((void *)pC->m_pDataAddress1,
+ (void *)pC->ReaderAudioAU1.m_dataAddress,
+ pC->ReaderAudioAU1.m_size);
+ }
+
+ if( pC->ReaderAudioAU2.m_structSize == 0 )
+ {
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU2);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+ err);
+ return err;
+ }
+ pC->m_pDataAddress2 =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderAudioAU2.m_maxsize,
+ M4MCS, (M4OSA_Char *)"Temporary AU buffer");
+
+ if( pC->m_pDataAddress2 == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intAudioNullEncoding(): allocation error");
+ return M4ERR_ALLOC;
+ }
+ }
+ /**
+ * Read the next audio AU in the input file */
+ if( pC->ReaderAudioAU2.m_CTS > pC->ReaderAudioAU1.m_CTS )
+ {
+ memcpy((void *) &pC->ReaderAudioAU,
+ (void *) &pC->ReaderAudioAU2, sizeof(M4_AccessUnit));
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU1);
+
+ if( pC->ReaderAudioAU1.m_maxsize
+ > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
+ /* pC->m_pDataAddress1
+ * and pC->m_pDataAddress2 must be reallocated at the same time *
+ * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
+ * maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
+ * pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true *
+ * and the size of the second buffer is never changed.
+ */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
+ /* pC->m_pDataAddress1 and
+ * pC->m_pDataAddress2 must be reallocated at the same time
+ * Update stream properties
+ */
+ pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderAudioAU1.m_maxsize;
+ }
+ /**/
+ memcpy((void *)pC->m_pDataAddress1,
+ (void *)pC->ReaderAudioAU1.m_dataAddress,
+ pC->ReaderAudioAU1.m_size);
+ pC->m_audioAUDuration =
+ pC->ReaderAudioAU1.m_CTS - pC->ReaderAudioAU2.m_CTS;
+ pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress2;
+ }
+ else
+ {
+ memcpy((void *) &pC->ReaderAudioAU,
+ (void *) &pC->ReaderAudioAU1, sizeof(M4_AccessUnit));
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream,
+ &pC->ReaderAudioAU2);
+ /* Crash in MCS while Audio AU copying ,
+ * constant memory reader case
+ */
+ if( pC->ReaderAudioAU2.m_maxsize
+ > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress2), pC->ReaderAudioAU2.m_maxsize);
+ /* pC->m_pDataAddress1 and
+ * pC->m_pDataAddress2 must be reallocated at the same time
+ * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take maximum
+ * value. Then the test "if(pC->ReaderAudioAU?.m_maxsize > pC->pReaderAudioStream->
+ * m_basicProperties.m_maxAUSize)" is never true
+ * and the size of the second buffer is never changed.
+ */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataAddress1), pC->ReaderAudioAU2.m_maxsize);
+ /* [ END ] 20091008 JFV PR fix surnxpsw#1071: pC->m_pDataAddress1 and
+ pC->m_pDataAddress2 must be reallocated at the same time */
+ /* Update stream properties */
+ pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderAudioAU2.m_maxsize;
+ }
+ /**/
+ memcpy((void *)pC->m_pDataAddress2,
+ (void *)pC->ReaderAudioAU2.m_dataAddress,
+ pC->ReaderAudioAU2.m_size);
+ pC->m_audioAUDuration =
+ pC->ReaderAudioAU2.m_CTS - pC->ReaderAudioAU1.m_CTS;
+ pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress1;
+ }
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE2_0(
+ "M4MCS_intAudioNullEncoding(): \
+ m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+ /* The audio transcoding is finished */
+ pC->AudioState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioNullEncoding(): \
+ m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Prepare the writer AU */
+ err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+ M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ if( pC->uiAudioAUCount
+ == 0 ) /* If it is the first AU, we set it to silence
+ (else, errors 0x3841, 0x3847 in our AAC decoder) */
+ {
+ if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
+ || pC->InputFileProperties.AudioStreamType
+ == M4VIDEOEDITING_kAACplus
+ || pC->InputFileProperties.AudioStreamType
+ == M4VIDEOEDITING_keAACplus )
+ {
+ if( pC->InputFileProperties.uiNbChannels == 1 )
+ {
+ pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ memcpy((void *)pC->WriterAudioAU.dataAddress,
+ (void *)M4VSS3GPP_AAC_AU_SILENCE_MONO,
+ pC->WriterAudioAU.size);
+ }
+ else if( pC->InputFileProperties.uiNbChannels == 2 )
+ {
+ pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ memcpy((void *)pC->WriterAudioAU.dataAddress,
+ (void *)M4VSS3GPP_AAC_AU_SILENCE_STEREO,
+ pC->WriterAudioAU.size);
+ }
+ else
+ {
+ /* Must never happen ...*/
+ M4OSA_TRACE1_0(
+ "M4MCS_intAudioNullEncoding: Bad number of channels in audio input");
+ return M4MCS_ERR_INVALID_INPUT_FILE;
+ }
+ }
+ else if( pC->InputFileProperties.AudioStreamType
+ == M4VIDEOEDITING_kAMR_NB )
+ {
+ pC->WriterAudioAU.size = M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ memcpy((void *)pC->WriterAudioAU.dataAddress,
+ (void *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048,
+ pC->WriterAudioAU.size);
+ /* Some remaining AMR AU needs to be copied */
+ if( pC->ReaderAudioAU.m_size != 0 )
+ {
+ /* Update Writer AU */
+ pC->WriterAudioAU.size += pC->ReaderAudioAU.m_size;
+ memcpy((void *)(pC->WriterAudioAU.dataAddress
+ + M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE),
+ (void *)pC->ReaderAudioAU.m_dataAddress,
+ pC->ReaderAudioAU.m_size);
+ }
+ }
+ else
+ {
+ /*MP3 case: copy the AU*/
+ M4OSA_TRACE3_1(
+ "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
+ pC->ReaderAudioAU.m_size);
+ memcpy((void *)pC->WriterAudioAU.dataAddress,
+ (void *)pC->ReaderAudioAU.m_dataAddress,
+ pC->ReaderAudioAU.m_size);
+ pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
+ }
+ }
+ else
+ {
+ /**
+ * Copy audio data from reader AU to writer AU */
+ M4OSA_TRACE3_1(
+ "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
+ pC->ReaderAudioAU.m_size);
+ memcpy((void *)pC->WriterAudioAU.dataAddress,
+ (void *)pC->ReaderAudioAU.m_dataAddress,
+ pC->ReaderAudioAU.m_size);
+ pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
+ }
+
+ /**
+ * Convert CTS unit from milliseconds to timescale */
+ pC->WriterAudioAU.CTS =
+ (M4OSA_Time)((( pC->ReaderAudioAU.m_CTS - pC->iAudioCtsOffset)
+ * (pC->WriterAudioStream.timeScale / 1000.0)));
+
+ if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAMR_NB
+ && pC->uiAudioAUCount == 0 )
+ {
+ pC->iAudioCtsOffset -=
+ 20; /* Duration of a silence AMR AU, to handle the duration of the added
+ silence frame */
+ }
+ pC->WriterAudioAU.nbFrag = 0;
+ M4OSA_TRACE3_1("M4MCS_intAudioNullEncoding(): audio AU: CTS=%d ms",
+ pC->WriterAudioAU.CTS);
+
+ /**
+ * Write it to the output file */
+ pC->uiAudioAUCount++;
+ err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+ M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* All the audio has been written */
+ pC->ReaderAudioAU.m_size = 0;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intAudioNullEncoding(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * @brief Init Audio Transcoding
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err; /**< General error */
+
+ M4OSA_UInt32
+ uiBytesDec; /**< Nb of bytes available in the decoder OUT buffer */
+ M4OSA_UInt32
+ uiDecoder2Ssrc_NbBytes; /**< Nb of bytes copied into the ssrc IN buffer */
+
+ int ssrcErr; /**< Error while ssrc processing */
+ M4OSA_UInt32 uiSsrcInSize; /**< Size in bytes of ssrc intput buffer */
+ M4OSA_UInt32
+ uiSsrcInRoom; /**< Nb of bytes available in the ssrc IN buffer */
+ M4OSA_MemAddr8
+ pSsrcInput; /**< Pointer to the good buffer location for ssrc input */
+ M4OSA_UInt32 uiSsrcOutSize; /**< Size in bytes of ssrc output buffer */
+ M4OSA_UInt32
+ uiBytesSsrc; /**< Nb of bytes available in the ssrc OUT buffer */
+
+ M4OSA_UInt8
+ needChannelConversion; /**< Flag to indicate if a stereo <-> mono conversion is needed */
+ M4OSA_UInt32
+ uiChannelConvertorCoeff; /**< Multiplicative coefficient if stereo
+ <-> mono conversion is applied */
+ M4OSA_MemAddr8 pChannelConvertorInput =
+ M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor input */
+ M4OSA_UInt32 uiChannelConvertorNbSamples =
+ 0; /**< Nb of pcm samples to convert in channel convertor */
+ M4OSA_MemAddr8 pChannelConvertorOutput =
+ M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor output */
+
+ M4OSA_Time
+ frameTimeDelta; /**< Duration of the encoded (then written) data */
+ M4OSA_UInt32
+ uiEncoderInRoom; /**< Nb of bytes available in the encoder IN buffer */
+ M4OSA_UInt32
+ uiSsrc2Encoder_NbBytes; /**< Nb of bytes copied from the ssrc OUT buffer */
+ M4OSA_MemAddr8
+ pEncoderInput; /**< Pointer to the good buffer location for encoder input */
+ M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+ M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+
+ M4OSA_Int16 *tempBuffOut = M4OSA_NULL;
+ /*FlB 2009.03.04: apply audio effects if an effect is active*/
+ M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
+
+ uint32_t errCode = M4NO_ERROR;
+
+ if( pC->noaudio )
+ return M4NO_ERROR;
+
+ /* _________________ */
+ /*| |*/
+ /*| READ AND DECODE |*/
+ /*|_________________|*/
+
+ /* Check if we have to empty the decoder out buffer first */
+ if( M4OSA_NULL != pC->pPosInDecBufferOut )
+ {
+ goto m4mcs_intaudiotranscoding_feed_resampler;
+ }
+
+ err = pC->m_pAudioDecoder->m_pFctStepAudioDec(pC->pAudioDecCtxt,
+ M4OSA_NULL, &pC->AudioDecBufferOut, M4OSA_FALSE);
+
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
+ err);
+ return err;
+ }
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+ fwrite(pC->AudioDecBufferOut.m_dataAddress,
+ pC->AudioDecBufferOut.m_bufferSize, 1, file_pcm_decoder);
+
+#endif
+
+ pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_GetAudioAUErrCode, (M4OSA_DataOption) &errCode);
+
+ if ( M4WAR_NO_MORE_AU == errCode ) {
+ pC->AudioState = M4MCS_kStreamState_FINISHED;
+ M4OSA_TRACE2_0(
+ "M4MCS_intAudioTranscoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+ return errCode;
+ }
+
+ /* Set the current position in the decoder out buffer */
+ pC->pPosInDecBufferOut = pC->AudioDecBufferOut.m_dataAddress;
+
+ /* ________________ */
+ /*| |*/
+ /*| FEED RESAMPLER |*/
+ /*|________________|*/
+
+m4mcs_intaudiotranscoding_feed_resampler:
+
+ /* Check if we have to empty the ssrc out buffer first */
+ if( M4OSA_NULL != pC->pPosInSsrcBufferOut )
+ {
+ goto m4mcs_intaudiotranscoding_prepare_input_buffer;
+ }
+
+ /* Compute number of bytes remaining in the decoder buffer */
+ uiSsrcInSize = pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pReaderAudioStream->m_nbChannels;
+ uiBytesDec = ( pC->AudioDecBufferOut.m_dataAddress
+ + pC->AudioDecBufferOut.m_bufferSize) - pC->pPosInDecBufferOut;
+
+ /* Check if we can feed directly the Ssrc with the decoder out buffer */
+ if( ( pC->pPosInSsrcBufferIn == pC->pSsrcBufferIn)
+ && (uiBytesDec >= uiSsrcInSize) )
+ {
+ pSsrcInput = pC->pPosInDecBufferOut;
+
+ /* update data consumed into decoder buffer after resampling */
+ if( uiBytesDec == uiSsrcInSize )
+ pC->pPosInDecBufferOut = M4OSA_NULL;
+ else
+ pC->pPosInDecBufferOut += uiSsrcInSize;
+
+ goto m4mcs_intaudiotranscoding_do_resampling;
+ }
+
+ /**
+ * Compute remaining space in Ssrc buffer in */
+ uiSsrcInRoom = ( pC->pSsrcBufferIn + uiSsrcInSize) - pC->pPosInSsrcBufferIn;
+
+ /**
+ * Nb of bytes copied is the minimum between nb of bytes remaining in
+ * decoder out buffer and space remaining in ssrc in buffer */
+ uiDecoder2Ssrc_NbBytes =
+ (uiSsrcInRoom < uiBytesDec) ? uiSsrcInRoom : uiBytesDec;
+
+ /**
+ * Copy from the decoder out buffer into the Ssrc in buffer */
+ memcpy((void *)pC->pPosInSsrcBufferIn, (void *)pC->pPosInDecBufferOut,
+ uiDecoder2Ssrc_NbBytes);
+
+ /**
+ * Update the position in the decoder out buffer */
+ pC->pPosInDecBufferOut += uiDecoder2Ssrc_NbBytes;
+
+ /**
+ * Update the position in the Ssrc in buffer */
+ pC->pPosInSsrcBufferIn += uiDecoder2Ssrc_NbBytes;
+
+ /**
+ * Check if the decoder buffer out is empty */
+ if( ( pC->pPosInDecBufferOut - pC->AudioDecBufferOut.m_dataAddress)
+ == (M4OSA_Int32)pC->AudioDecBufferOut.m_bufferSize )
+ {
+ pC->pPosInDecBufferOut = M4OSA_NULL;
+ }
+
+ /* Check if the Ssrc in buffer is ready (= full) */
+ if( ( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn)
+ < (M4OSA_Int32)uiSsrcInSize )
+ {
+ goto m4mcs_intaudiotranscoding_end;
+ }
+
+ pSsrcInput = pC->pSsrcBufferIn;
+
+ /* update data consumed into ssrc buffer in after resampling (empty) */
+ pC->pPosInSsrcBufferIn = pC->pSsrcBufferIn;
+
+ /* ___________________ */
+ /*| |*/
+ /*| DO THE RESAMPLING |*/
+ /*|___________________|*/
+
+m4mcs_intaudiotranscoding_do_resampling:
+
+ /**
+ * No need for memcopy, we can feed Ssrc directly with the data in the audio
+ decoder out buffer*/
+
+ ssrcErr = 0;
+
+ if( pC->pReaderAudioStream->m_nbChannels == 1 )
+ {
+ tempBuffOut =
+ (short *)M4OSA_32bitAlignedMalloc((pC->iSsrcNbSamplOut * sizeof(short) * 2
+ * ((*pC).InputFileProperties).uiNbChannels),
+ M4VSS3GPP,(M4OSA_Char *) "tempBuffOut");
+ memset((void *)tempBuffOut, 0,(pC->iSsrcNbSamplOut * sizeof(short) * 2
+ * ((*pC).InputFileProperties).uiNbChannels));
+
+ LVAudioresample_LowQuality((short *)tempBuffOut, (short *)pSsrcInput,
+ pC->iSsrcNbSamplOut, pC->pLVAudioResampler);
+ }
+ else
+ {
+ memset((void *)pC->pSsrcBufferOut, 0, (pC->iSsrcNbSamplOut * sizeof(short)
+ * ((*pC).InputFileProperties).uiNbChannels));
+
+ LVAudioresample_LowQuality((short *)pC->pSsrcBufferOut,
+ (short *)pSsrcInput, pC->iSsrcNbSamplOut, pC->pLVAudioResampler);
+ }
+
+ if( pC->pReaderAudioStream->m_nbChannels == 1 )
+ {
+ From2iToMono_16((short *)tempBuffOut, (short *)pC->pSsrcBufferOut,
+ (short)pC->iSsrcNbSamplOut);
+ free(tempBuffOut);
+ }
+
+
+ if( 0 != ssrcErr )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding: SSRC_Process returns 0x%x, \
+ returning M4MCS_ERR_AUDIO_CONVERSION_FAILED",
+ ssrcErr);
+ return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+ }
+
+ pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+
+ /* ______________________ */
+ /*| |*/
+ /*| PREPARE INPUT BUFFER |*/
+ /*|______________________|*/
+
+m4mcs_intaudiotranscoding_prepare_input_buffer:
+
+ /* Set the flag for channel conversion requirement */
+ if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+ && (pC->pReaderAudioStream->m_nbChannels == 2) )
+ {
+ needChannelConversion = 1;
+ uiChannelConvertorCoeff = 4;
+ }
+ else if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kStereo)
+ && (pC->pReaderAudioStream->m_nbChannels == 1) )
+ {
+ needChannelConversion = 2;
+ uiChannelConvertorCoeff = 1;
+ }
+ else
+ {
+ needChannelConversion = 0;
+ uiChannelConvertorCoeff = 2;
+ }
+
+ /* Compute number of bytes remaining in the Ssrc buffer */
+ uiSsrcOutSize = pC->iSsrcNbSamplOut * sizeof(short)
+ * pC->pReaderAudioStream->m_nbChannels;
+ uiBytesSsrc =
+ ( pC->pSsrcBufferOut + uiSsrcOutSize) - pC->pPosInSsrcBufferOut;
+
+ /* Check if the ssrc buffer is full */
+ if( pC->pPosInSsrcBufferOut == pC->pSsrcBufferOut )
+ {
+ uiSsrc2Encoder_NbBytes =
+ pC->audioEncoderGranularity * uiChannelConvertorCoeff / 2;
+
+ /* Check if we can feed directly the encoder with the ssrc out buffer */
+ if( ( pC->pPosInAudioEncoderBuffer == M4OSA_NULL)
+ && (uiBytesSsrc >= uiSsrc2Encoder_NbBytes) )
+ {
+ /* update position in ssrc out buffer after encoding */
+ if( uiBytesSsrc == uiSsrc2Encoder_NbBytes )
+ pC->pPosInSsrcBufferOut = M4OSA_NULL;
+ else
+ pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
+
+ /* mark the encoder buffer ready (= full) */
+ pC->pPosInAudioEncoderBuffer =
+ pC->pAudioEncoderBuffer + pC->audioEncoderGranularity;
+
+ if( needChannelConversion > 0 )
+ {
+ /* channel convertor writes directly into encoder buffer */
+ pEncoderInput = pC->pAudioEncoderBuffer;
+
+ pChannelConvertorInput = pC->pSsrcBufferOut;
+ pChannelConvertorOutput = pC->pAudioEncoderBuffer;
+ uiChannelConvertorNbSamples =
+ uiSsrc2Encoder_NbBytes / sizeof(short);
+
+ goto m4mcs_intaudiotranscoding_channel_convertor;
+ }
+ else
+ {
+ /* encode directly from ssrc out buffer */
+ pEncoderInput = pC->pSsrcBufferOut;
+
+ goto m4mcs_intaudiotranscoding_encode_and_write;
+ }
+ }
+ }
+
+ /**
+ * Compute remaining space in encoder buffer in */
+ if( pC->pPosInAudioEncoderBuffer == M4OSA_NULL )
+ {
+ pC->pPosInAudioEncoderBuffer = pC->pAudioEncoderBuffer;
+ }
+
+ uiEncoderInRoom = ( pC->pAudioEncoderBuffer + pC->audioEncoderGranularity)
+ - pC->pPosInAudioEncoderBuffer;
+ pEncoderInput = pC->pAudioEncoderBuffer;
+
+ /**
+ * Nb of bytes copied is the minimum between nb of bytes remaining in
+ * decoder out buffer and space remaining in ssrc in buffer */
+ uiSsrc2Encoder_NbBytes =
+ (( uiEncoderInRoom * uiChannelConvertorCoeff / 2) < uiBytesSsrc)
+ ? (uiEncoderInRoom * uiChannelConvertorCoeff / 2) : uiBytesSsrc;
+
+ if( needChannelConversion > 0 )
+ {
+ /* channel convertor writes directly into encoder buffer */
+ pChannelConvertorInput = pC->pPosInSsrcBufferOut;
+ pChannelConvertorOutput = pC->pPosInAudioEncoderBuffer;
+ uiChannelConvertorNbSamples = uiSsrc2Encoder_NbBytes / sizeof(short);
+ }
+ else
+ {
+ /* copy from the ssrc out buffer into the encoder in buffer */
+ memcpy((void *)pC->pPosInAudioEncoderBuffer, (void *)pC->pPosInSsrcBufferOut,
+ uiSsrc2Encoder_NbBytes);
+ }
+
+ /* Update position in ssrc out buffer after encoding */
+ pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
+
+ /* Update the position in the encoder in buffer */
+ pC->pPosInAudioEncoderBuffer +=
+ uiSsrc2Encoder_NbBytes * 2 / uiChannelConvertorCoeff;
+
+ /* Check if the ssrc buffer out is empty */
+ if( ( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut)
+ == (M4OSA_Int32)uiSsrcOutSize )
+ {
+ pC->pPosInSsrcBufferOut = M4OSA_NULL;
+ }
+
+ /* go to next statement */
+ if( needChannelConversion > 0 )
+ goto m4mcs_intaudiotranscoding_channel_convertor;
+ else
+ goto m4mcs_intaudiotranscoding_encode_and_write;
+
+ /* _________________ */
+ /*| |*/
+ /*| STEREO <-> MONO |*/
+ /*|_________________|*/
+
+m4mcs_intaudiotranscoding_channel_convertor:
+
+ /* convert the input pcm stream to mono or to stereo */
+ switch( needChannelConversion )
+ {
+ case 1: /* stereo to mono */
+ From2iToMono_16((short *)pChannelConvertorInput,
+ (short *)pChannelConvertorOutput,
+ (short)(uiChannelConvertorNbSamples / 2));
+ break;
+
+ case 2: /* mono to stereo */
+ MonoTo2I_16((short *)pChannelConvertorInput,
+ (short *)pChannelConvertorOutput,
+ (short)uiChannelConvertorNbSamples);
+ break;
+ }
+
+ /* __________________ */
+ /*| |*/
+ /*| ENCODE AND WRITE |*/
+ /*|__________________|*/
+
+m4mcs_intaudiotranscoding_encode_and_write:
+
+ /* Check if the encoder in buffer is ready (= full) */
+ if( ( pC->pPosInAudioEncoderBuffer - pC->pAudioEncoderBuffer)
+ < (M4OSA_Int32)pC->audioEncoderGranularity )
+ {
+ goto m4mcs_intaudiotranscoding_end;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] = pEncoderInput;
+ pEncInBuffer.pTableBufferSize[0] = pC->audioEncoderGranularity;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ ( pEncInBuffer.pTableBufferSize[0] * uiChannelConvertorCoeff / 2)
+ / sizeof(short) / pC->pReaderAudioStream->m_nbChannels;
+
+ /**
+ * Prepare the writer AU */
+ err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+ M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /*FlB 2009.03.04: apply audio effects if an effect is active*/
+ if( *pActiveEffectNumber >= 0 && *pActiveEffectNumber < pC->nbEffects )
+ {
+ if( pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct != M4OSA_NULL )
+ {
+ M4MCS_ExternalProgress pProgress;
+ M4OSA_UInt32 tempProgress = 0;
+ pProgress.uiClipTime = (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS;
+
+ pProgress.uiOutputTime = ( pC->WriterAudioAU.CTS * 1000)
+ / pC->WriterAudioStream.timeScale;
+ tempProgress = ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+ - pC->pEffects[*pActiveEffectNumber].uiStartTime
+ - pC->uiBeginCutTime) * 1000;
+ pProgress.uiProgress =
+ (M4OSA_UInt32)(tempProgress / (M4OSA_UInt32)pC->pEffects[
+ *pActiveEffectNumber].uiDuration);
+
+ err = pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct(
+ pC->pEffects[*pActiveEffectNumber].pExtAudioEffectFctCtxt,
+ (M4OSA_Int16 *)pEncInBuffer.pTableBuffer[0],
+ pEncInBuffer.pTableBufferSize[0], &pProgress);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): ExtAudioEffectFct() returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+ fwrite(pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0], 1,
+ file_pcm_encoder);
+
+#endif
+
+ if( M4OSA_FALSE == pC->b_isRawWriter )
+ {
+ /* This allow to write PCM data to file and to encode AMR data,
+ when output file is not RAW */
+ if( pC->pOutputPCMfile != M4OSA_NULL )
+ {
+ pC->pOsaFileWritPtr->writeData(pC->pOutputPCMfile,
+ pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0]);
+ }
+
+ /**
+ * Encode the PCM audio */
+ err = pC->pAudioEncoderGlobalFcts->pFctStep(pC->pAudioEncCtxt,
+ &pEncInBuffer, &pEncOutBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* update data consumed into encoder buffer in after encoding (empty) */
+ pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+
+ /**
+ * Set AU cts and size */
+ pC->WriterAudioAU.size =
+ pEncOutBuffer.
+ pTableBufferSize[0]; /**< Get the size of encoded data */
+ pC->WriterAudioAU.CTS += frameTimeDelta;
+
+ /**
+ * Update duration of the encoded AU */
+ pC->m_audioAUDuration =
+ ( frameTimeDelta * 1000) / pC->WriterAudioStream.timeScale;
+
+ /**
+ * Write the encoded AU to the output file */
+ pC->uiAudioAUCount++;
+ err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+ M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else
+ {
+ /* update data consumed into encoder buffer in after encoding (empty) */
+ pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+
+ pC->WriterAudioAU.dataAddress =
+ (M4OSA_MemAddr32)
+ pEncoderInput; /* will be converted back to u8* in file write */
+ pC->WriterAudioAU.size = pC->audioEncoderGranularity;
+ pC->uiAudioAUCount++;
+
+ err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+ M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /* _______________ */
+ /*| |*/
+ /*| ONE PASS DONE |*/
+ /*|_______________|*/
+
+m4mcs_intaudiotranscoding_end:
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intAudioTranscoding(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intReallocTemporaryAU(M4OSA_MemAddr8* addr, M4OSA_UInt32 newSize)
+ * Used only in case of 3GP constant memory reader, to be able to realloc temporary AU
+ * because max AU size can be reevaluated during reading
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intReallocTemporaryAU( M4OSA_MemAddr8 *addr,
+ M4OSA_UInt32 newSize )
+{
+ if( *addr != M4OSA_NULL )
+ {
+ free(*addr);
+ *addr = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(newSize, M4MCS,
+ (M4OSA_Char *)"Reallocation of temporary AU buffer");
+
+ if( *addr == M4OSA_NULL )
+ {
+ return M4ERR_ALLOC;
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intVideoNullEncoding(M4MCS_InternalContext* pC)
+ * @author Alexis Vapillon (NXP Software Vision)
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ /* Duration of the AU (find the next AU duration
+ * to obtain a more precise video end cut)
+ */
+ M4OSA_UInt32 videoAUDuration = 0;
+
+ M4OSA_MemAddr8 WritebufferAdd = M4OSA_NULL;
+ M4OSA_Int32 lastdecodedCTS = 0;
+ M4_AccessUnit lReaderVideoAU; /**< Read video access unit */
+
+ if( pC->novideo )
+ return M4NO_ERROR;
+
+ /* H.264 Trimming */
+ if( ( ( pC->bH264Trim == M4OSA_TRUE)
+ && (pC->uiVideoAUCount < pC->m_pInstance->clip_sps.num_ref_frames)
+ && (pC->uiBeginCutTime > 0))
+ || (( pC->uiVideoAUCount == 0) && (pC->uiBeginCutTime > 0)) )
+ {
+ err = M4MCS_intVideoTranscoding(pC);
+ return err;
+ }
+
+
+ if((pC->bLastDecodedFrameCTS == M4OSA_FALSE) && (pC->uiBeginCutTime > 0))
+ {
+ // StageFright encoder does prefetch, the one frame we requested will not be written until
+ // the encoder is closed, so do it now rather than in MCS_close
+ if( ( M4NO_ERROR != err)
+ || (M4MCS_kEncoderRunning != pC->encoderState) )
+ {
+ M4OSA_TRACE1_2(
+ "!!! M4MCS_intVideoNullEncoding ERROR : M4MCS_intVideoTranscoding "
+ "returns 0x%X w/ encState=%d", err, pC->encoderState);
+
+ return err;
+ }
+
+ /* Stop and close the encoder now to flush the frame (prefetch) */
+ if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+ {
+ err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "!!! M4MCS_intVideoNullEncoding ERROR : encoder stop returns 0x%X",
+ err);
+ return err;
+ }
+ }
+ pC->encoderState = M4MCS_kEncoderStopped;
+ err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "!!! M4MCS_intVideoNullEncoding ERROR : encoder close returns 0x%X",
+ err);
+ return err;
+ }
+ pC->encoderState = M4MCS_kEncoderClosed;
+ }
+
+
+ if ((pC->EncodingVideoFormat == M4ENCODER_kNULL)
+ && (pC->bLastDecodedFrameCTS == M4OSA_FALSE)
+ && (pC->uiBeginCutTime > 0)) {
+
+ pC->bLastDecodedFrameCTS = M4OSA_TRUE;
+ err = pC->m_pVideoDecoder->m_pFctGetOption(pC->pViDecCtxt,
+ M4DECODER_kOptionID_AVCLastDecodedFrameCTS, &lastdecodedCTS);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding: m_pVideoDecoder->m_pFctGetOption returns 0x%x!",
+ err);
+ return err;
+ }
+ /* Do not need video decoder any more, need to destroy it. Otherwise it
+ * will call reader function which will cause frame lost during triming,
+ * since the 3gp reader is shared between MCS and decoder.*/
+ if (M4OSA_NULL != pC->pViDecCtxt) {
+ err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
+ pC->pViDecCtxt = M4OSA_NULL;
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding: decoder pFctDestroy returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream, &lastdecodedCTS);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding: m_pFctJump(V) returns 0x%x!",
+ err);
+ return err;
+ }
+
+
+ /* Initializes an access Unit */
+
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding:m_pReader->m_pFctFillAuStruct(video)\
+ returns 0x%x", err);
+ return err;
+ }
+
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
+
+ if (M4WAR_NO_MORE_AU == err) {
+ M4OSA_TRACE2_0(
+ "M4MCS_intVideoNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+ /* The audio transcoding is finished */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE1_1(
+ "### [TS_CHECK] M4MCS_intVideoNullEncoding video AU CTS: %d ",
+ lReaderVideoAU.m_CTS);
+
+
+ }
+
+
+ pC->bLastDecodedFrameCTS = M4OSA_TRUE;
+
+
+ /* Find the next AU duration to obtain a more precise video end cut*/
+ /**
+ * Initializes a new AU if needed */
+
+ if (pC->ReaderVideoAU1.m_structSize == 0) {
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU1);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->m_pDataVideoAddress1 =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderVideoAU1.m_maxsize, M4MCS,
+ (M4OSA_Char *)"Temporary video AU1 buffer");
+
+ if (pC->m_pDataVideoAddress1 == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
+ return M4ERR_ALLOC;
+ }
+
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU1);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE2_0(
+ "M4MCS_intVideoNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+ /* The audio transcoding is finished */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(video)\
+ returns 0x%x", err);
+ return err;
+ }
+
+ if( pC->ReaderVideoAU1.m_maxsize
+ > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
+ /* pC->m_pDataVideoAddress1
+ and pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+ Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+ m_basicProperties.m_maxAUSize)" is never true */
+ /* and the size of the second buffer is never changed. */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
+ /* pC->m_pDataVideoAddress1 and
+ pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* Update stream properties */
+ pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderVideoAU1.m_maxsize;
+ }
+ memcpy((void *)pC->m_pDataVideoAddress1,
+ (void *)pC->ReaderVideoAU1.m_dataAddress,
+ pC->ReaderVideoAU1.m_size);
+ }
+
+ if( pC->ReaderVideoAU2.m_structSize == 0 )
+ {
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU2);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+ err);
+ return err;
+ }
+ pC->m_pDataVideoAddress2 =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ReaderVideoAU2.m_maxsize, M4MCS,
+ (M4OSA_Char *)"Temporary video AU buffer");
+
+ if( pC->m_pDataVideoAddress2 == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
+ return M4ERR_ALLOC;
+ }
+ }
+ /**
+ * Read the next video AU in the input file */
+ if( pC->ReaderVideoAU2.m_CTS > pC->ReaderVideoAU1.m_CTS )
+ {
+ memcpy((void *) &pC->ReaderVideoAU,
+ (void *) &pC->ReaderVideoAU2, sizeof(M4_AccessUnit));
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU1);
+
+ if( pC->ReaderVideoAU1.m_maxsize
+ > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
+ /* pC->m_pDataVideoAddress1 and
+ pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+ Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+ m_basicProperties.m_maxAUSize)" is never true */
+ /* and the size of the second buffer is never changed. */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
+ /* pC->m_pDataVideoAddress1 and
+ pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* Update stream properties */
+ pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderVideoAU1.m_maxsize;
+ }
+ memcpy((void *)pC->m_pDataVideoAddress1,
+ (void *)pC->ReaderVideoAU1.m_dataAddress,
+ pC->ReaderVideoAU1.m_size);
+ videoAUDuration = pC->ReaderVideoAU1.m_CTS - pC->ReaderVideoAU2.m_CTS;
+ pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress2;
+ }
+ else
+ {
+ memcpy((void *) &pC->ReaderVideoAU,
+ (void *) &pC->ReaderVideoAU1, sizeof(M4_AccessUnit));
+ err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderVideoStream,
+ &pC->ReaderVideoAU2);
+
+ if( pC->ReaderVideoAU2.m_maxsize
+ > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+ {
+ /* Constant memory reader case, we need to reallocate the temporary buffers */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU2.m_maxsize);
+ /* pC->m_pDataVideoAddress1 and
+ pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+ Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+ m_basicProperties.m_maxAUSize)" is never true */
+ /* and the size of the second buffer is never changed. */
+ M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+ *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU2.m_maxsize);
+ /* pC->m_pDataVideoAddress1 and
+ pC->m_pDataVideoAddress2 must be reallocated at the same time */
+ /* Update stream properties */
+ pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+ pC->ReaderVideoAU2.m_maxsize;
+ }
+ memcpy((void *)pC->m_pDataVideoAddress2,
+ (void *)pC->ReaderVideoAU2.m_dataAddress,
+ pC->ReaderVideoAU2.m_size);
+ videoAUDuration = pC->ReaderVideoAU2.m_CTS - pC->ReaderVideoAU1.m_CTS;
+ pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress1;
+ }
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ M4OSA_TRACE2_0(
+ "M4MCS_intVideoNullEncoding():\
+ m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+ /* The video transcoding is finished */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ return err;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(Video) returns 0x%x",
+ err);
+ return err;
+ }
+ else
+ {
+ /**
+ * Prepare the writer AU */
+ err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+ M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pStartAU(Video) returns 0x%x",
+ err);
+ return err;
+ }
+ /**
+ * Copy video data from reader AU to writer AU */
+ M4OSA_TRACE3_1(
+ "M4MCS_intVideoNullEncoding(): Copying video AU: size=%d",
+ pC->ReaderVideoAU.m_size);
+ /* + CRLV6775 -H.264 Trimming */
+ if( M4OSA_TRUE == pC->bH264Trim )
+ {
+ if( pC->H264MCSTempBufferSize
+ < (pC->ReaderVideoAU.m_size + 2048) )
+ {
+ pC->H264MCSTempBufferSize =
+ (pC->ReaderVideoAU.m_size + 2048);
+
+ if( pC->H264MCSTempBuffer != M4OSA_NULL )
+ {
+ free(pC->H264MCSTempBuffer);
+ }
+ pC->H264MCSTempBuffer =
+ (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(pC->H264MCSTempBufferSize,
+ M4MCS, (M4OSA_Char *)"pC->H264MCSTempBuffer");
+
+ if( pC->H264MCSTempBuffer == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "M4MCS_intVideoNullEncoding(): allocation error");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ pC->H264MCSTempBufferDataSize = pC->H264MCSTempBufferSize;
+
+ err = H264MCS_ProcessNALU(pC->m_pInstance,
+ (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress,
+ pC->ReaderVideoAU.m_size, pC->H264MCSTempBuffer,
+ (M4OSA_Int32 *)&pC->H264MCSTempBufferDataSize);
+
+ if( pC->m_pInstance->is_done == 1 )
+ {
+ M4MCS_convetFromByteStreamtoNALStream(
+ (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress ,
+ pC->ReaderVideoAU.m_size);
+
+ memcpy((void *)pC->WriterVideoAU.dataAddress,
+ (void *)(pC->ReaderVideoAU.m_dataAddress + 4),
+ pC->ReaderVideoAU.m_size - 4);
+ pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size - 4;
+ WritebufferAdd =
+ (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
+ }
+ else
+ {
+ memcpy((void *)pC->WriterVideoAU.dataAddress,
+ (void *)(pC->H264MCSTempBuffer + 4),
+ pC->H264MCSTempBufferDataSize - 4);
+ pC->WriterVideoAU.size = pC->H264MCSTempBufferDataSize - 4;
+ WritebufferAdd =
+ (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
+ }
+ }
+ /* H.264 Trimming */
+ else
+ {
+ memcpy((void *)pC->WriterVideoAU.dataAddress,
+ (void *)pC->ReaderVideoAU.m_dataAddress,
+ pC->ReaderVideoAU.m_size);
+ pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size;
+ }
+ /**
+ * Convert CTS unit from milliseconds to timescale */
+ pC->WriterVideoAU.CTS =
+ (M4OSA_Time)((( pC->ReaderVideoAU.m_CTS - pC->dViDecStartingCts)
+ * (pC->WriterVideoStream.timeScale / 1000.0)));
+ pC->WriterVideoAU.nbFrag = 0;
+ pC->WriterVideoAU.attribute = pC->ReaderVideoAU.m_attribute;
+
+ M4OSA_TRACE3_1("M4MCS_intVideoNullEncoding(): video AU: CTS=%d ms",
+ pC->WriterVideoAU.CTS);
+
+ /**
+ * Write it to the output file */
+ pC->uiVideoAUCount++;
+ err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+ M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pProcessAU(Video) returns 0x%x",
+ err);
+ return err;
+ }
+ /* + CRLV6775 -H.264 Trimming */
+ if( M4OSA_TRUE == pC->bH264Trim )
+ {
+ if( pC->m_pInstance->is_done == 1 )
+ {
+ memcpy((void *)(WritebufferAdd - 4),
+ (void *)(pC->ReaderVideoAU.m_dataAddress), 4);
+ }
+ else
+ {
+ memcpy((void *)(WritebufferAdd - 4),
+ (void *)(pC->H264MCSTempBuffer), 4);
+ }
+ } /* H.264 Trimming */
+ }
+ /**
+ * Check for end cut. */
+ /* Bug fix 11/12/2008: We absolutely want to have less or same video duration ->
+ (2*videoAUDuration) to have a more precise end cut*/
+ if( pC->ReaderVideoAU.m_CTS + (2 *videoAUDuration) > pC->uiEndCutTime )
+ {
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_intVideoNullEncoding(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intVideoTranscoding(M4MCS_InternalContext* pC)
+ * @author Alexis Vapillon (NXP Software Vision)
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4_MediaTime mtTranscodedTime = 0.0;
+ M4ENCODER_FrameMode FrameMode;
+ M4OSA_Int32 derive = 0;
+
+ /**
+ * Get video CTS to decode */
+ mtTranscodedTime = pC->dViDecCurrentCts;
+ FrameMode = M4ENCODER_kNormalFrame;
+
+ /**
+ * Decode video */
+ M4OSA_TRACE3_1(
+ "M4MCS_intVideoTranscoding(): Calling m_pVideoDecoder->m_pFctDecode(%.2f)",
+ mtTranscodedTime);
+ pC->isRenderDup = M4OSA_FALSE;
+ err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &mtTranscodedTime,
+ M4OSA_FALSE, 0);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ FrameMode =
+ M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
+ ask for the end of the encoding */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ }
+ else if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+ {
+ M4OSA_TRACE2_0("Decoding output the same frame as before 3");
+ pC->isRenderDup = M4OSA_TRUE;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intVideoTranscoding(): m_pVideoDecoder->m_pFctDecode returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Check for end cut.
+ * We must check here if the end cut is reached, because in that case we must
+ * call the last encode step (-> bLastFrame set to true) */
+ if( ( pC->dViDecCurrentCts + pC->dCtsIncrement ) >= (pC->uiEndCutTime
+ + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)) )
+ {
+ FrameMode =
+ M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
+ ask for the end of the encoding */
+ pC->VideoState = M4MCS_kStreamState_FINISHED;
+ derive = (M4OSA_Int32)(( pC->dViDecCurrentCts + pC->dCtsIncrement + 0.5)
+ - (pC->uiEndCutTime
+ + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)));
+ }
+
+ /* Update starting CTS to have a more precise value (
+ the begin cut is not a real CTS)*/
+ if( pC->uiVideoAUCount == 0 )
+ {
+ pC->dViDecStartingCts = mtTranscodedTime;
+ pC->dViDecCurrentCts = pC->dViDecStartingCts;
+ }
+
+ /**
+ * Encode video */
+ M4OSA_TRACE3_1(
+ "M4MCS_intVideoTranscoding(): Calling pVideoEncoderGlobalFcts->pFctEncode with videoCts\
+ = %.2f",pC->ReaderVideoAU.m_CTS);
+ pC->uiVideoAUCount++;
+ /* update the given duration (the begin cut is not a real CTS)*/
+ err = pC->pVideoEncoderGlobalFcts->pFctEncode(pC->pViEncCtxt, M4OSA_NULL,
+ (pC->dViDecCurrentCts - pC->dViDecStartingCts - (derive >> 1)),
+ FrameMode);
+
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intGetInputClipProperties(M4MCS_InternalContext* pContext)
+ * @author Dounya Manai (NXP Software Vision)
+ * @brief Retrieve the properties of the audio and video streams from the input file.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intGetInputClipProperties( M4MCS_InternalContext *pC )
+{
+ M4DECODER_MPEG4_DecoderConfigInfo DecConfInfo;
+ M4READER_3GP_H263Properties H263prop;
+ M4OSA_ERR err;
+ M4OSA_UInt32 videoBitrate;
+ M4DECODER_VideoSize videoSize;
+ M4_AACType iAacType = 0;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2(M4OSA_NULL == pC, M4ERR_PARAMETER,
+ "M4MCS_intGetInputClipProperties: pC is M4OSA_NULL");
+
+ /**
+ * Reset common characteristics */
+ pC->InputFileProperties.bAnalysed = M4OSA_FALSE;
+ pC->InputFileProperties.FileType = 0;
+ pC->InputFileProperties.Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
+ pC->InputFileProperties.Version[1] = M4VIDEOEDITING_VERSION_MINOR;
+ pC->InputFileProperties.Version[2] = M4VIDEOEDITING_VERSION_REVISION;
+ pC->InputFileProperties.uiClipDuration = 0;
+
+ memset((void *) &pC->InputFileProperties.ftyp,
+ 0, sizeof(M4VIDEOEDITING_FtypBox));
+
+ /**
+ * Reset video characteristics */
+ pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+ pC->InputFileProperties.uiClipVideoDuration = 0;
+ pC->InputFileProperties.uiVideoBitrate = 0;
+ pC->InputFileProperties.uiVideoMaxAuSize = 0;
+ pC->InputFileProperties.uiVideoWidth = 0;
+ pC->InputFileProperties.uiVideoHeight = 0;
+ pC->InputFileProperties.uiVideoTimeScale = 0;
+ pC->InputFileProperties.fAverageFrameRate = 0.0;
+ pC->InputFileProperties.uiVideoLevel =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ pC->InputFileProperties.uiVideoProfile =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ pC->InputFileProperties.bMPEG4dataPartition = M4OSA_FALSE;
+ pC->InputFileProperties.bMPEG4rvlc = M4OSA_FALSE;
+ pC->InputFileProperties.bMPEG4resynchMarker = M4OSA_FALSE;
+
+ /**
+ * Reset audio characteristics */
+ pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+ pC->InputFileProperties.uiClipAudioDuration = 0;
+ pC->InputFileProperties.uiAudioBitrate = 0;
+ pC->InputFileProperties.uiAudioMaxAuSize = 0;
+ pC->InputFileProperties.uiNbChannels = 0;
+ pC->InputFileProperties.uiSamplingFrequency = 0;
+ pC->InputFileProperties.uiExtendedSamplingFrequency = 0;
+ pC->InputFileProperties.uiDecodedPcmSize = 0;
+
+ /* Reset compatibility chart (not used in MCS) */
+ pC->InputFileProperties.bVideoIsEditable = M4OSA_FALSE;
+ pC->InputFileProperties.bAudioIsEditable = M4OSA_FALSE;
+ pC->InputFileProperties.bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
+ pC->InputFileProperties.bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+ /**
+ * Video stream properties */
+ if( M4OSA_NULL != pC->pReaderVideoStream )
+ {
+ switch( pC->pReaderVideoStream->m_basicProperties.m_streamType )
+ {
+ case M4DA_StreamTypeVideoMpeg4:
+ pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kMPEG4;
+ break;
+
+ case M4DA_StreamTypeVideoH263:
+ pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH263;
+ break;
+
+ case M4DA_StreamTypeVideoMpeg4Avc:
+ pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH264;
+ break;
+
+ case M4DA_StreamTypeUnknown:
+ default:
+ pC->InputFileProperties.VideoStreamType =
+ M4VIDEOEDITING_kUnsupportedVideo;
+ break;
+ }
+
+ /* if bitrate not available retrieve an estimation of the overall bitrate */
+ pC->InputFileProperties.uiVideoBitrate =
+ pC->pReaderVideoStream->m_basicProperties.m_averageBitRate;
+
+ if( 0 == pC->InputFileProperties.uiVideoBitrate )
+ {
+ pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+ M4READER_kOptionID_Bitrate, &videoBitrate);
+
+ if( M4OSA_NULL != pC->pReaderAudioStream )
+ {
+ /* we get the overall bitrate, substract the audio bitrate if any */
+ videoBitrate -=
+ pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
+ }
+ pC->InputFileProperties.uiVideoBitrate = videoBitrate;
+ }
+
+ /**
+ * Retrieve the Profile & Level */
+ if( ( M4VIDEOEDITING_kH263 != pC->InputFileProperties.VideoStreamType)
+ && (M4VIDEOEDITING_kH264
+ != pC->InputFileProperties.VideoStreamType) )
+ {
+ /* Use the DSI parsing function from the external video shell decoder.
+ See the comments in M4VSS3GPP_ClipAnalysis.c, it's pretty much the
+ same issue. */
+
+ err = M4DECODER_EXTERNAL_ParseVideoDSI(pC->pReaderVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pC->pReaderVideoStream->
+ m_basicProperties.m_decoderSpecificInfoSize,
+ &DecConfInfo, &videoSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intGetInputClipProperties():\
+ M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X",
+ err);
+ return err;
+ }
+
+ pC->pReaderVideoStream->m_videoWidth = videoSize.m_uiWidth;
+ pC->pReaderVideoStream->m_videoHeight = videoSize.m_uiHeight;
+ pC->InputFileProperties.uiVideoTimeScale = DecConfInfo.uiTimeScale;
+ pC->InputFileProperties.bMPEG4dataPartition =
+ DecConfInfo.bDataPartition;
+ pC->InputFileProperties.bMPEG4rvlc = DecConfInfo.bUseOfRVLC;
+ pC->InputFileProperties.bMPEG4resynchMarker =
+ DecConfInfo.uiUseOfResynchMarker;
+
+ err = getMPEG4ProfileAndLevel(DecConfInfo.uiProfile,
+ &(pC->InputFileProperties.uiVideoProfile),
+ &(pC->InputFileProperties.uiVideoLevel));
+ if ( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
+ getMPEG4ProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ }
+ else if( M4VIDEOEDITING_kH263 ==
+ pC->InputFileProperties.VideoStreamType ) {
+
+ err = getH263ProfileAndLevel(pC->pReaderVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pC->pReaderVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
+ &(pC->InputFileProperties.uiVideoProfile),
+ &(pC->InputFileProperties.uiVideoLevel));
+ if ( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
+ getH263ProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ /* For h263 set default timescale : 30000:1001 */
+ pC->InputFileProperties.uiVideoTimeScale = 30000;
+ }
+ else if ( M4VIDEOEDITING_kH264 ==
+ pC->InputFileProperties.VideoStreamType ) {
+
+ pC->InputFileProperties.uiVideoTimeScale = 30000;
+ err = getAVCProfileAndLevel(pC->pReaderVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pC->pReaderVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
+ &(pC->InputFileProperties.uiVideoProfile),
+ &(pC->InputFileProperties.uiVideoLevel));
+ if ( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1("M4MCS_intGetInputClipProperties():\
+ getAVCProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ }
+
+ /* Here because width x height is correct only after dsi parsing
+ (done in create decoder) */
+ pC->InputFileProperties.uiVideoHeight =
+ pC->pReaderVideoStream->m_videoHeight;
+ pC->InputFileProperties.uiVideoWidth =
+ pC->pReaderVideoStream->m_videoWidth;
+ pC->InputFileProperties.uiClipVideoDuration =
+ (M4OSA_UInt32)pC->pReaderVideoStream->m_basicProperties.m_duration;
+ pC->InputFileProperties.fAverageFrameRate =
+ pC->pReaderVideoStream->m_averageFrameRate;
+ pC->InputFileProperties.uiVideoMaxAuSize =
+ pC->pReaderVideoStream->m_basicProperties.m_maxAUSize;
+ pC->InputFileProperties.videoRotationDegrees =
+ pC->pReaderVideoStream->videoRotationDegrees;
+ }
+ else
+ {
+ if( M4OSA_TRUE == pC->bUnsupportedVideoFound )
+ {
+ pC->InputFileProperties.VideoStreamType =
+ M4VIDEOEDITING_kUnsupportedVideo;
+ }
+ else
+ {
+ pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+ }
+ }
+
+ /**
+ * Audio stream properties */
+ if( M4OSA_NULL != pC->pReaderAudioStream )
+ {
+ switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+ pC->InputFileProperties.AudioStreamType =
+ M4VIDEOEDITING_kAMR_NB;
+ break;
+
+ case M4DA_StreamTypeAudioAac:
+ pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kAAC;
+ break;
+
+ case M4DA_StreamTypeAudioMp3:
+ pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kMP3;
+ break;
+
+ case M4DA_StreamTypeAudioEvrc:
+ pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kEVRC;
+ break;
+
+ case M4DA_StreamTypeUnknown:
+ default:
+ pC->InputFileProperties.AudioStreamType =
+ M4VIDEOEDITING_kUnsupportedAudio;
+ break;
+ }
+
+ if( ( M4OSA_NULL != pC->m_pAudioDecoder)
+ && (M4OSA_NULL == pC->pAudioDecCtxt) )
+ {
+ M4OSA_TRACE3_1(
+ "M4MCS_intGetInputClipProperties: calling CreateAudioDecoder, userData= 0x%x",
+ pC->m_pCurrentAudioDecoderUserData);
+
+ if( M4OSA_FALSE == pC->bExtOMXAudDecoder ) {
+ err = M4MCS_intCheckAndGetCodecProperties(pC);
+ }
+ else
+ {
+ err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pC->pAudioDecCtxt, pC->pReaderAudioStream,
+ pC->m_pCurrentAudioDecoderUserData);
+
+ if( M4NO_ERROR == err )
+ {
+ /* AAC properties*/
+ //get from Reader; temporary, till Audio decoder shell API available to
+ //get the AAC properties
+ pC->AacProperties.aNumChan =
+ pC->pReaderAudioStream->m_nbChannels;
+ pC->AacProperties.aSampFreq =
+ pC->pReaderAudioStream->m_samplingFrequency;
+
+ err = pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(
+ pC->pAudioDecCtxt, M4AD_kOptionID_StreamType,
+ (M4OSA_DataOption) &iAacType);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intGetInputClipProperties:\
+ m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x",
+ err);
+ iAacType = M4_kAAC; //set to default
+ err = M4NO_ERROR;
+ }
+ else
+ {
+ M4OSA_TRACE3_1(
+ "M4MCS_intGetInputClipProperties:\
+ m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
+ iAacType);
+ }
+
+ switch( iAacType )
+ {
+ case M4_kAAC:
+ pC->AacProperties.aSBRPresent = 0;
+ pC->AacProperties.aPSPresent = 0;
+ break;
+
+ case M4_kAACplus:
+ pC->AacProperties.aSBRPresent = 1;
+ pC->AacProperties.aPSPresent = 0;
+ pC->AacProperties.aExtensionSampFreq =
+ pC->pReaderAudioStream->
+ m_samplingFrequency; //TODO
+ break;
+
+ case M4_keAACplus:
+ pC->AacProperties.aSBRPresent = 1;
+ pC->AacProperties.aPSPresent = 1;
+ pC->AacProperties.aExtensionSampFreq =
+ pC->pReaderAudioStream->
+ m_samplingFrequency; //TODO
+ break;
+ case M4_kUnknown:
+ break;
+ default:
+ break;
+ }
+ M4OSA_TRACE3_2(
+ "M4MCS_intGetInputClipProperties: AAC NBChans=%d, SamplFreq=%d",
+ pC->AacProperties.aNumChan,
+ pC->AacProperties.aSampFreq);
+ }
+ }
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intGetInputClipProperties:\
+ m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ //EVRC
+ if( pC->pReaderAudioStream->m_basicProperties.m_streamType
+ == M4DA_StreamTypeAudioEvrc )
+ {
+ /* decoder not implemented yet, provide some default values for the null encoding */
+ pC->pReaderAudioStream->m_nbChannels = 1;
+ pC->pReaderAudioStream->m_samplingFrequency = 8000;
+ }
+
+ /**
+ * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps according
+ the GetProperties function */
+ if( 0 == pC->pReaderAudioStream->m_basicProperties.m_averageBitRate )
+ {
+ if( M4VIDEOEDITING_kAMR_NB
+ == pC->InputFileProperties.AudioStreamType )
+ {
+ /**
+ * Better returning a guessed 12.2 kbps value than a sure-to-be-false
+ 0 kbps value! */
+ pC->InputFileProperties.uiAudioBitrate =
+ M4VIDEOEDITING_k12_2_KBPS;
+ }
+ else if( M4VIDEOEDITING_kEVRC
+ == pC->InputFileProperties.AudioStreamType )
+ {
+ /**
+ * Better returning a guessed 8.5 kbps value than a sure-to-be-false
+ 0 kbps value! */
+ pC->InputFileProperties.uiAudioBitrate =
+ M4VIDEOEDITING_k9_2_KBPS;
+ }
+ else
+ {
+ M4OSA_UInt32 FileBitrate;
+
+ /* Can happen also for aac, in this case we calculate an approximative */
+ /* value from global bitrate and video bitrate */
+ err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+ M4READER_kOptionID_Bitrate,
+ (M4OSA_DataOption) &FileBitrate);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_intGetInputClipProperties: M4READER_kOptionID_Bitrate returns 0x%x",
+ err);
+ return err;
+ }
+ pC->InputFileProperties.uiAudioBitrate =
+ FileBitrate
+ - pC->
+ InputFileProperties.
+ uiVideoBitrate /* normally setted to 0, if no video */;
+ }
+ }
+ else
+ {
+ pC->InputFileProperties.uiAudioBitrate =
+ pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
+ }
+
+ pC->InputFileProperties.uiNbChannels =
+ pC->pReaderAudioStream->m_nbChannels;
+ pC->InputFileProperties.uiSamplingFrequency =
+ pC->pReaderAudioStream->m_samplingFrequency;
+ pC->InputFileProperties.uiClipAudioDuration =
+ (M4OSA_UInt32)pC->pReaderAudioStream->m_basicProperties.m_duration;
+ pC->InputFileProperties.uiAudioMaxAuSize =
+ pC->pReaderAudioStream->m_basicProperties.m_maxAUSize;
+
+ /* Bug: with aac, value is 0 until decoder start() is called */
+ pC->InputFileProperties.uiDecodedPcmSize =
+ pC->pReaderAudioStream->m_byteFrameLength
+ * pC->pReaderAudioStream->m_byteSampleSize
+ * pC->pReaderAudioStream->m_nbChannels;
+
+ /* New aac properties */
+ if( M4DA_StreamTypeAudioAac
+ == pC->pReaderAudioStream->m_basicProperties.m_streamType )
+ {
+ pC->InputFileProperties.uiNbChannels = pC->AacProperties.aNumChan;
+ pC->InputFileProperties.uiSamplingFrequency =
+ pC->AacProperties.aSampFreq;
+
+ if( pC->AacProperties.aSBRPresent )
+ {
+ pC->InputFileProperties.AudioStreamType =
+ M4VIDEOEDITING_kAACplus;
+ pC->InputFileProperties.uiExtendedSamplingFrequency =
+ pC->AacProperties.aExtensionSampFreq;
+ }
+
+ if( pC->AacProperties.aPSPresent )
+ {
+ pC->InputFileProperties.AudioStreamType =
+ M4VIDEOEDITING_keAACplus;
+ }
+ }
+ }
+ else
+ {
+ if( M4OSA_TRUE == pC->bUnsupportedAudioFound )
+ {
+ pC->InputFileProperties.AudioStreamType =
+ M4VIDEOEDITING_kUnsupportedAudio;
+ }
+ else
+ {
+ pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+ }
+ }
+
+ /* Get 'ftyp' atom */
+ err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+ M4READER_kOptionID_3gpFtypBox, &pC->InputFileProperties.ftyp);
+
+ /* Analysis is successful */
+ if( pC->InputFileProperties.uiClipVideoDuration
+ > pC->InputFileProperties.uiClipAudioDuration )
+ pC->InputFileProperties.uiClipDuration =
+ pC->InputFileProperties.uiClipVideoDuration;
+ else
+ pC->InputFileProperties.uiClipDuration =
+ pC->InputFileProperties.uiClipAudioDuration;
+
+ pC->InputFileProperties.FileType = pC->InputFileType;
+ pC->InputFileProperties.bAnalysed = M4OSA_TRUE;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame)
+ * @brief Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param pCpAudioFrame (IN) AMRNB frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
+{
+ M4OSA_UInt32 frameSize = 0;
+ M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
+
+ switch( frameType )
+ {
+ case 0:
+ frameSize = 95;
+ break; /* 4750 bps */
+
+ case 1:
+ frameSize = 103;
+ break; /* 5150 bps */
+
+ case 2:
+ frameSize = 118;
+ break; /* 5900 bps */
+
+ case 3:
+ frameSize = 134;
+ break; /* 6700 bps */
+
+ case 4:
+ frameSize = 148;
+ break; /* 7400 bps */
+
+ case 5:
+ frameSize = 159;
+ break; /* 7950 bps */
+
+ case 6:
+ frameSize = 204;
+ break; /* 10200 bps */
+
+ case 7:
+ frameSize = 244;
+ break; /* 12000 bps */
+
+ case 8:
+ frameSize = 39;
+ break; /* SID (Silence) */
+
+ case 15:
+ frameSize = 0;
+ break; /* No data */
+
+ default:
+ M4OSA_TRACE3_0(
+ "M4MCS_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
+ return 0;
+ }
+
+ return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame)
+ * @brief Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ * 0 1 2 3
+ * +-+-+-+-+
+ * |fr type| RFC 3558
+ * +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ * The frame type indicates the type of the corresponding codec data
+ * frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value Rate Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ * 0 Blank 0 (0 bit)
+ * 1 1/8 2 (16 bits)
+ * 2 1/4 5 (40 bits; not valid for EVRC)
+ * 3 1/2 10 (80 bits)
+ * 4 1 22 (171 bits; 5 padded at end with zeros)
+ * 5 Erasure 0 (SHOULD NOT be transmitted by sender)
+ *
+ * @param pCpAudioFrame (IN) EVRC frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
+{
+ M4OSA_UInt32 frameSize = 0;
+ M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
+
+ switch( frameType )
+ {
+ case 0:
+ frameSize = 0;
+ break; /* blank */
+
+ case 1:
+ frameSize = 16;
+ break; /* 1/8 */
+
+ case 2:
+ frameSize = 40;
+ break; /* 1/4 */
+
+ case 3:
+ frameSize = 80;
+ break; /* 1/2 */
+
+ case 4:
+ frameSize = 171;
+ break; /* 1 */
+
+ case 5:
+ frameSize = 0;
+ break; /* erasure */
+
+ default:
+ M4OSA_TRACE3_0(
+ "M4MCS_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
+ return 0;
+ }
+
+ return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckMaxFileSize(M4MCS_Context pContext)
+ * @brief Check if max file size is greater enough to encode a file with the
+ * current selected bitrates and duration.
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR
+ * @return M4MCS_ERR_MAXFILESIZE_TOO_SMALL
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext )
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+ M4OSA_UInt32 duration;
+ M4OSA_UInt32 audiobitrate;
+ M4OSA_UInt32 videobitrate;
+
+ /* free file size : OK */
+ if( pC->uiMaxFileSize == 0 )
+ return M4NO_ERROR;
+
+ /* duration */
+ if( pC->uiEndCutTime == 0 )
+ {
+ duration = pC->InputFileProperties.uiClipDuration - pC->uiBeginCutTime;
+ }
+ else
+ {
+ duration = pC->uiEndCutTime - pC->uiBeginCutTime;
+ }
+
+ /* audio bitrate */
+ if( pC->noaudio )
+ {
+ audiobitrate = 0;
+ }
+ else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+ {
+ audiobitrate = pC->InputFileProperties.uiAudioBitrate;
+ }
+ else if( pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ switch( pC->AudioEncParams.Format )
+ {
+ case M4ENCODER_kAMRNB:
+ audiobitrate = M4VIDEOEDITING_k12_2_KBPS;
+ break;
+ //EVRC
+ // case M4ENCODER_kEVRC:
+ // audiobitrate = M4VIDEOEDITING_k9_2_KBPS;
+ // break;
+
+ default: /* AAC and MP3*/
+ audiobitrate =
+ (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+ ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
+ break;
+ }
+ }
+ else
+ {
+ audiobitrate = pC->uiAudioBitrate;
+ }
+
+ /* video bitrate */
+ if( pC->novideo )
+ {
+ videobitrate = 0;
+ }
+ else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+ {
+ videobitrate = pC->InputFileProperties.uiVideoBitrate;
+ }
+ else if( pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ videobitrate = M4VIDEOEDITING_k16_KBPS;
+ }
+ else
+ {
+ videobitrate = pC->uiVideoBitrate;
+ }
+
+ /* max file size */
+ if( (M4OSA_UInt32)pC->uiMaxFileSize
+ < (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
+ * (audiobitrate + videobitrate) * (duration / 8000.0)) )
+ return M4MCS_ERR_MAXFILESIZE_TOO_SMALL;
+ else
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(M4OSA_UInt32 freebitrate, M4OSA_Int8 mode)
+ * @brief Returns the closest bitrate value from the enum list of type M4VIDEOEDITING_Bitrate
+ * @param freebitrate: unsigned int value
+ * @param mode: -1:previous,0:current,1:next
+ * @return bitrate value in enum list M4VIDEOEDITING_Bitrate
+ ******************************************************************************
+ */
+static M4VIDEOEDITING_Bitrate
+M4MCS_intGetNearestBitrate( M4OSA_Int32 freebitrate, M4OSA_Int8 mode )
+{
+ M4OSA_Int32 bitarray [] =
+ {
+ 0, M4VIDEOEDITING_k16_KBPS, M4VIDEOEDITING_k24_KBPS,
+ M4VIDEOEDITING_k32_KBPS, M4VIDEOEDITING_k48_KBPS,
+ M4VIDEOEDITING_k64_KBPS, M4VIDEOEDITING_k96_KBPS,
+ M4VIDEOEDITING_k128_KBPS, M4VIDEOEDITING_k192_KBPS,
+ M4VIDEOEDITING_k256_KBPS, M4VIDEOEDITING_k288_KBPS,
+ M4VIDEOEDITING_k384_KBPS, M4VIDEOEDITING_k512_KBPS,
+ M4VIDEOEDITING_k800_KBPS, M4VIDEOEDITING_k2_MBPS,
+ M4VIDEOEDITING_k5_MBPS,
+ M4VIDEOEDITING_k8_MBPS, /*+ New Encoder bitrates */
+ M4OSA_INT32_MAX
+ };
+
+ const M4OSA_UInt32 nbbitrates = 14;
+ M4OSA_UInt32 i;
+
+ for ( i = 0; freebitrate >= bitarray[i]; i++ );
+
+ switch( mode )
+ {
+ case -1: /* previous */
+ if( i <= 2 )
+ return 0;
+ else
+ return bitarray[i - 2];
+ break;
+
+ case 0: /* current */
+ if( i <= 1 )
+ return 0;
+ else
+ return bitarray[i - 1];
+ break;
+
+ case 1: /* next */
+ if( i >= nbbitrates )
+ return M4OSA_INT32_MAX;
+ else
+ return bitarray[i];
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(M4MCS_InternalContext* pC);
+ * @brief Free all resources allocated by M4MCS_open()
+ * @param pContext (IN) MCS context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders( M4MCS_InternalContext *pC )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_TRACE2_1("M4MCS_intCleanUp_ReadersDecoders called with pC=0x%x", pC);
+
+ /**/
+ /* ----- Free video decoder stuff, if needed ----- */
+
+ if( M4OSA_NULL != pC->pViDecCtxt )
+ {
+ err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
+ pC->pViDecCtxt = M4OSA_NULL;
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: m_pVideoDecoder->pFctDestroy returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+ }
+
+ /* ----- Free the audio decoder stuff ----- */
+
+ if( M4OSA_NULL != pC->pAudioDecCtxt )
+ {
+ err = pC->m_pAudioDecoder->m_pFctDestroyAudioDec(pC->pAudioDecCtxt);
+ pC->pAudioDecCtxt = M4OSA_NULL;
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+ }
+
+ if( M4OSA_NULL != pC->AudioDecBufferOut.m_dataAddress )
+ {
+ free(pC->AudioDecBufferOut.m_dataAddress);
+ pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+ }
+
+ /* ----- Free reader stuff, if needed ----- */
+ // We cannot free the reader before decoders because the decoders may read
+ // from the reader (in another thread) before being stopped.
+
+ if( M4OSA_NULL != pC->
+ pReaderContext ) /**< may be M4OSA_NULL if M4MCS_open was not called */
+ {
+ err = pC->m_pReader->m_pFctClose(pC->pReaderContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4MCS_cleanUp: m_pReader->m_pFctClose returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ err = pC->m_pReader->m_pFctDestroy(pC->pReaderContext);
+ pC->pReaderContext = M4OSA_NULL;
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4MCS_cleanUp: m_pReader->m_pFctDestroy returns 0x%x", err);
+ /**< don't return, we still have stuff to free */
+ }
+ }
+
+ if( pC->m_pDataAddress1 != M4OSA_NULL )
+ {
+ free(pC->m_pDataAddress1);
+ pC->m_pDataAddress1 = M4OSA_NULL;
+ }
+
+ if( pC->m_pDataAddress2 != M4OSA_NULL )
+ {
+ free(pC->m_pDataAddress2);
+ pC->m_pDataAddress2 = M4OSA_NULL;
+ }
+ /*Bug fix 11/12/2008 (to obtain more precise video end cut)*/
+ if( pC->m_pDataVideoAddress1 != M4OSA_NULL )
+ {
+ free(pC->m_pDataVideoAddress1);
+ pC->m_pDataVideoAddress1 = M4OSA_NULL;
+ }
+
+ if( pC->m_pDataVideoAddress2 != M4OSA_NULL )
+ {
+ free(pC->m_pDataVideoAddress2);
+ pC->m_pDataVideoAddress2 = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ * M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+ * @brief Set the MCS input and output files. It is the same as M4MCS_open without
+ * M4MCS_WITH_FAST_OPEN flag
+It is used in VideoArtist
+ * @note It opens the input file, but the output file is not created yet.
+ * @param pContext (IN) MCS context
+ * @param pFileIn (IN) Input file to transcode (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param mediaType (IN) Container type (.3gp,.amr, ...) of input file.
+ * @param pFileOut (IN) Output file to create (The type of this parameter
+ * (URL, pipe...) depends on the OSAL implementation).
+ * @param pTempFile (IN) Temporary file for the constant memory writer to store
+ * metadata ("moov.bin").
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: MCS is not in an appropriate state for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return M4ERR_FILE_NOT_FOUND: The input file has not been found
+ * @return M4MCS_ERR_INVALID_INPUT_FILE: The input file is not a valid file, or is corrupted
+ * @return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM: The input file contains no
+ * supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ M4VIDEOEDITING_FileType InputFileType,
+ M4OSA_Void* pFileOut, M4OSA_Void* pTempFile)
+{
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
+ M4OSA_ERR err;
+
+ M4READER_MediaFamily mediaFamily;
+ M4_StreamHandler* pStreamHandler;
+
+ M4OSA_TRACE2_3("M4MCS_open_normalMode called with pContext=0x%x, pFileIn=0x%x,\
+ pFileOut=0x%x", pContext, pFileIn, pFileOut);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4MCS_open_normalMode: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn) , M4ERR_PARAMETER,
+ "M4MCS_open_normalMode: pFileIn is M4OSA_NULL");
+
+ if ((InputFileType == M4VIDEOEDITING_kFileType_JPG)
+ ||(InputFileType == M4VIDEOEDITING_kFileType_PNG)
+ ||(InputFileType == M4VIDEOEDITING_kFileType_GIF)
+ ||(InputFileType == M4VIDEOEDITING_kFileType_BMP))
+ {
+ M4OSA_TRACE1_0("M4MCS_open_normalMode: Still picture is not\
+ supported with this function");
+ return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+ }
+
+ /**
+ * Check state automaton */
+ if (M4MCS_kState_CREATED != pC->State)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): Wrong State (%d), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /* Copy function input parameters into our context */
+ pC->pInputFile = pFileIn;
+ pC->InputFileType = InputFileType;
+ pC->pOutputFile = pFileOut;
+ pC->pTemporaryFile = pTempFile;
+
+ /***********************************/
+ /* Open input file with the reader */
+ /***********************************/
+
+ err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Reset reader related variables */
+ pC->VideoState = M4MCS_kStreamState_NOSTREAM;
+ pC->AudioState = M4MCS_kStreamState_NOSTREAM;
+ pC->pReaderVideoStream = M4OSA_NULL;
+ pC->pReaderAudioStream = M4OSA_NULL;
+
+ /*******************************************************/
+ /* Initializes the reader shell and open the data file */
+ /*******************************************************/
+ err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctCreate returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Link the reader interface to the reader context */
+ pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
+
+ /**
+ * Set the reader shell file access functions */
+ err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+ M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+ (M4OSA_DataOption)pC->pOsaFileReadPtr);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctSetOption returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Open the input file */
+ err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_UInt32 uiDummy, uiCoreId;
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+ if (err == ((M4OSA_UInt32)M4ERR_UNSUPPORTED_MEDIA_TYPE)) {
+ M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning M4MCS_ERR_FILE_DRM_PROTECTED");
+ return M4MCS_ERR_FILE_DRM_PROTECTED;
+ } else {
+ /**
+ * If the error is from the core reader, we change it to a public VXS error */
+ M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+ if (M4MP4_READER == uiCoreId)
+ {
+ M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning M4MCS_ERR_INVALID_INPUT_FILE");
+ return M4MCS_ERR_INVALID_INPUT_FILE;
+ }
+ }
+ return err;
+ }
+
+ /**
+ * Get the streams from the input file */
+ while (M4NO_ERROR == err)
+ {
+ err = pC->m_pReader->m_pFctGetNextStream(pC->pReaderContext, &mediaFamily,
+ &pStreamHandler);
+
+ /**
+ * In case we found a BIFS stream or something else...*/
+ if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+ || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
+ {
+ err = M4NO_ERROR;
+ continue;
+ }
+
+ if (M4NO_ERROR == err) /**< One stream found */
+ {
+ /**
+ * Found the first video stream */
+ if ((M4READER_kMediaFamilyVideo == mediaFamily) \
+ && (M4OSA_NULL == pC->pReaderVideoStream))
+ {
+ if ((M4DA_StreamTypeVideoH263==pStreamHandler->m_streamType) ||
+ (M4DA_StreamTypeVideoMpeg4==pStreamHandler->m_streamType)
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+ ||(M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType))
+#else
+ ||((M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType)
+ &&(pC->m_pVideoDecoderItTable[M4DECODER_kVideoTypeAVC] != M4OSA_NULL)))
+#endif
+ {
+ M4OSA_TRACE3_0("M4MCS_open_normalMode():\
+ Found a H263 or MPEG-4 video stream in input 3gpp clip");
+
+ /**
+ * Keep pointer to the video stream */
+ pC->pReaderVideoStream = (M4_VideoStreamHandler*)pStreamHandler;
+ pC->bUnsupportedVideoFound = M4OSA_FALSE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+ /**
+ * Init our video stream state variable */
+ pC->VideoState = M4MCS_kStreamState_STARTED;
+
+ /**
+ * Reset the stream reader */
+ err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+ (M4_StreamHandler*)pC->pReaderVideoStream);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+ m_pReader->m_pFctReset(video) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
+ &pC->ReaderVideoAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+ m_pReader->m_pFctFillAuStruct(video) returns 0x%x", err);
+ return err;
+ }
+ }
+ else /**< Not H263 or MPEG-4 (H264, etc.) */
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+ Found an unsupported video stream (0x%x) in input 3gpp clip",
+ pStreamHandler->m_streamType);
+
+ pC->bUnsupportedVideoFound = M4OSA_TRUE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ /**
+ * Found the first audio stream */
+ else if ((M4READER_kMediaFamilyAudio == mediaFamily)
+ && (M4OSA_NULL == pC->pReaderAudioStream))
+ {
+ if ((M4DA_StreamTypeAudioAmrNarrowBand==pStreamHandler->m_streamType) ||
+ (M4DA_StreamTypeAudioAac==pStreamHandler->m_streamType) ||
+ (M4DA_StreamTypeAudioMp3==pStreamHandler->m_streamType) ||
+ (M4DA_StreamTypeAudioEvrc==pStreamHandler->m_streamType) )
+ {
+ M4OSA_TRACE3_0("M4MCS_open_normalMode(): Found an AMR-NB, AAC \
+ or MP3 audio stream in input clip");
+
+ /**
+ * Keep pointer to the audio stream */
+ pC->pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+ pC->bUnsupportedAudioFound = M4OSA_FALSE;
+
+ /**
+ * Init our audio stream state variable */
+ pC->AudioState = M4MCS_kStreamState_STARTED;
+
+ /**
+ * Reset the stream reader */
+ err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+ (M4_StreamHandler*)pC->pReaderAudioStream);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+ m_pReader->m_pFctReset(audio) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
+ &pC->ReaderAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): \
+ m_pReader->m_pFctFillAuStruct(audio) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Output max AU size is equal to input max AU size (this value
+ * will be changed if there is audio transcoding) */
+ pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
+
+ }
+ else
+ {
+ /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
+ M4OSA_TRACE1_1("M4MCS_open_normalMode(): Found an unsupported audio stream\
+ (0x%x) in input 3gpp clip", pStreamHandler->m_streamType);
+
+ pC->bUnsupportedAudioFound = M4OSA_TRUE;
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ }
+ } /**< end of while (M4NO_ERROR == err) */
+
+ /**
+ * Check we found at least one supported stream */
+ if((M4OSA_NULL == pC->pReaderVideoStream) && (M4OSA_NULL == pC->pReaderAudioStream))
+ {
+ M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning \
+ M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
+ return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+ }
+
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+ if(pC->VideoState == M4MCS_kStreamState_STARTED)
+ {
+ err = M4MCS_setCurrentVideoDecoder(pContext,
+ pC->pReaderVideoStream->m_basicProperties.m_streamType);
+ M4ERR_CHECK_RETURN(err);
+ }
+#endif
+
+ if(pC->AudioState == M4MCS_kStreamState_STARTED)
+ {
+ //EVRC
+ if(M4DA_StreamTypeAudioEvrc != pStreamHandler->m_streamType)
+ /* decoder not supported yet, but allow to do null encoding */
+ {
+ err = M4MCS_setCurrentAudioDecoder(pContext,
+ pC->pReaderAudioStream->m_basicProperties.m_streamType);
+ M4ERR_CHECK_RETURN(err);
+ }
+ }
+
+ /**
+ * Get the audio and video stream properties */
+ err = M4MCS_intGetInputClipProperties(pC);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+ M4MCS_intGetInputClipProperties returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set the begin cut decoding increment according to the input frame rate */
+ if (0. != pC->InputFileProperties.fAverageFrameRate) /**< sanity check */
+ {
+ pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000. \
+ / pC->InputFileProperties.fAverageFrameRate); /**< about 3 frames */
+ }
+ else
+ {
+ pC->iVideoBeginDecIncr = 200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
+ }
+
+ /**
+ * Update state automaton */
+ pC->State = M4MCS_kState_OPENED;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4MCS_open_normalMode(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4MCS_intCheckAndGetCodecProperties(
+ M4MCS_InternalContext *pC) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4AD_Buffer outputBuffer;
+ uint32_t optionValue =0;
+
+ M4OSA_TRACE3_0("M4MCS_intCheckAndGetCodecProperties :start");
+
+ // Decode first audio frame from clip to get properties from codec
+
+ if (M4DA_StreamTypeAudioAac ==
+ pC->pReaderAudioStream->m_basicProperties.m_streamType) {
+
+ err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pC->pAudioDecCtxt,
+ pC->pReaderAudioStream, &(pC->AacProperties));
+ } else {
+ err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pC->pAudioDecCtxt,
+ pC->pReaderAudioStream,
+ pC->m_pCurrentAudioDecoderUserData);
+ }
+ if (M4NO_ERROR != err) {
+
+ M4OSA_TRACE1_1(
+ "M4MCS_intCheckAndGetCodecProperties: m_pFctCreateAudioDec \
+ returns 0x%x", err);
+ return err;
+ }
+
+ pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_3gpReaderInterface, (M4OSA_DataOption) pC->m_pReaderDataIt);
+
+ pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_AudioAU, (M4OSA_DataOption) &pC->ReaderAudioAU);
+
+ if( pC->m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL ) {
+
+ err = pC->m_pAudioDecoder->m_pFctStartAudioDec(pC->pAudioDecCtxt);
+ if( M4NO_ERROR != err ) {
+
+ M4OSA_TRACE1_1(
+ "M4MCS_intCheckAndGetCodecProperties: m_pFctStartAudioDec \
+ returns 0x%x", err);
+ return err;
+ }
+ }
+
+ /**
+ * Allocate output buffer for the audio decoder */
+ outputBuffer.m_bufferSize =
+ pC->pReaderAudioStream->m_byteFrameLength
+ * pC->pReaderAudioStream->m_byteSampleSize
+ * pC->pReaderAudioStream->m_nbChannels;
+
+ if( outputBuffer.m_bufferSize > 0 ) {
+
+ outputBuffer.m_dataAddress =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(outputBuffer.m_bufferSize \
+ *sizeof(short), M4MCS, (M4OSA_Char *)"outputBuffer.m_bufferSize");
+
+ if( M4OSA_NULL == outputBuffer.m_dataAddress ) {
+
+ M4OSA_TRACE1_0(
+ "M4MCS_intCheckAndGetCodecProperties():\
+ unable to allocate outputBuffer.m_dataAddress, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ err = pC->m_pAudioDecoder->m_pFctStepAudioDec(pC->pAudioDecCtxt,
+ M4OSA_NULL, &outputBuffer, M4OSA_FALSE);
+
+ if ( err == M4WAR_INFO_FORMAT_CHANGE ) {
+
+ // Get the properties from codec node
+ pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_AudioNbChannels, (M4OSA_DataOption) &optionValue);
+
+ // Reset Reader structure value also
+ pC->pReaderAudioStream->m_nbChannels = optionValue;
+
+ pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(pC->pAudioDecCtxt,
+ M4AD_kOptionID_AudioSampFrequency, (M4OSA_DataOption) &optionValue);
+
+ // Reset Reader structure value also
+ pC->pReaderAudioStream->m_samplingFrequency = optionValue;
+
+ if (M4DA_StreamTypeAudioAac ==
+ pC->pReaderAudioStream->m_basicProperties.m_streamType) {
+
+ pC->AacProperties.aNumChan =
+ pC->pReaderAudioStream->m_nbChannels;
+ pC->AacProperties.aSampFreq =
+ pC->pReaderAudioStream->m_samplingFrequency;
+
+ }
+
+ } else if( err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("M4MCS_intCheckAndGetCodecProperties:\
+ m_pFctStepAudioDec returns err = 0x%x", err);
+ }
+
+ free(outputBuffer.m_dataAddress);
+
+ // Reset the stream reader
+ err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+ (M4_StreamHandler *)pC->pReaderAudioStream);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4MCS_intCheckAndGetCodecProperties\
+ Error in reseting reader: 0x%x", err);
+ }
+
+ return err;
+
+}
+
+M4OSA_ERR M4MCS_intLimitBitratePerCodecProfileLevel(
+ M4ENCODER_AdvancedParams* EncParams) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ switch (EncParams->Format) {
+ case M4ENCODER_kH263:
+ EncParams->Bitrate = M4MCS_intLimitBitrateForH263Enc(
+ EncParams->videoProfile,
+ EncParams->videoLevel, EncParams->Bitrate);
+ break;
+
+ case M4ENCODER_kMPEG4:
+ EncParams->Bitrate = M4MCS_intLimitBitrateForMpeg4Enc(
+ EncParams->videoProfile,
+ EncParams->videoLevel, EncParams->Bitrate);
+ break;
+
+ case M4ENCODER_kH264:
+ EncParams->Bitrate = M4MCS_intLimitBitrateForH264Enc(
+ EncParams->videoProfile,
+ EncParams->videoLevel, EncParams->Bitrate);
+ break;
+
+ default:
+ M4OSA_TRACE1_1("M4MCS_intLimitBitratePerCodecProfileLevel: \
+ Wrong enc format %d", EncParams->Format);
+ err = M4ERR_PARAMETER;
+ break;
+ }
+
+ return err;
+
+}
+
+M4OSA_Int32 M4MCS_intLimitBitrateForH264Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate) {
+
+ M4OSA_Int32 vidBitrate = 0;
+
+ switch (profile) {
+ case OMX_VIDEO_AVCProfileBaseline:
+ case OMX_VIDEO_AVCProfileMain:
+
+ switch (level) {
+
+ case OMX_VIDEO_AVCLevel1:
+ vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel1b:
+ vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel11:
+ vidBitrate = (bitrate > 192000) ? 192000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel12:
+ vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel13:
+ vidBitrate = (bitrate > 768000) ? 768000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel2:
+ vidBitrate = (bitrate > 2000000) ? 2000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel21:
+ vidBitrate = (bitrate > 4000000) ? 4000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel22:
+ vidBitrate = (bitrate > 4000000) ? 4000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel3:
+ vidBitrate = (bitrate > 10000000) ? 10000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel31:
+ vidBitrate = (bitrate > 14000000) ? 14000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel32:
+ vidBitrate = (bitrate > 20000000) ? 20000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel4:
+ vidBitrate = (bitrate > 20000000) ? 20000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel41:
+ vidBitrate = (bitrate > 50000000) ? 50000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel42:
+ vidBitrate = (bitrate > 50000000) ? 50000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel5:
+ vidBitrate = (bitrate > 135000000) ? 135000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel51:
+ vidBitrate = (bitrate > 240000000) ? 240000000 : bitrate;
+ break;
+
+ default:
+ vidBitrate = bitrate;
+ break;
+ }
+ break;
+
+ case OMX_VIDEO_AVCProfileHigh:
+ switch (level) {
+ case OMX_VIDEO_AVCLevel1:
+ vidBitrate = (bitrate > 80000) ? 80000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel1b:
+ vidBitrate = (bitrate > 160000) ? 160000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel11:
+ vidBitrate = (bitrate > 240000) ? 240000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel12:
+ vidBitrate = (bitrate > 480000) ? 480000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel13:
+ vidBitrate = (bitrate > 960000) ? 960000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel2:
+ vidBitrate = (bitrate > 2500000) ? 2500000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel21:
+ vidBitrate = (bitrate > 5000000) ? 5000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel22:
+ vidBitrate = (bitrate > 5000000) ? 5000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel3:
+ vidBitrate = (bitrate > 12500000) ? 12500000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel31:
+ vidBitrate = (bitrate > 17500000) ? 17500000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel32:
+ vidBitrate = (bitrate > 25000000) ? 25000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel4:
+ vidBitrate = (bitrate > 25000000) ? 25000000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel41:
+ vidBitrate = (bitrate > 62500000) ? 62500000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel42:
+ vidBitrate = (bitrate > 62500000) ? 62500000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel5:
+ vidBitrate = (bitrate > 168750000) ? 168750000 : bitrate;
+ break;
+
+ case OMX_VIDEO_AVCLevel51:
+ vidBitrate = (bitrate > 300000000) ? 300000000 : bitrate;
+ break;
+
+ default:
+ vidBitrate = bitrate;
+ break;
+ }
+ break;
+
+ default:
+ // We do not handle any other AVC profile for now.
+ // Return input bitrate
+ vidBitrate = bitrate;
+ break;
+ }
+
+ return vidBitrate;
+}
+
+M4OSA_Int32 M4MCS_intLimitBitrateForMpeg4Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate) {
+
+ M4OSA_Int32 vidBitrate = 0;
+
+ switch (profile) {
+ case OMX_VIDEO_MPEG4ProfileSimple:
+ switch (level) {
+
+ case OMX_VIDEO_MPEG4Level0:
+ vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
+ break;
+
+ case OMX_VIDEO_MPEG4Level0b:
+ vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
+ break;
+
+ case OMX_VIDEO_MPEG4Level1:
+ vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
+ break;
+
+ case OMX_VIDEO_MPEG4Level2:
+ vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
+ break;
+
+ case OMX_VIDEO_MPEG4Level3:
+ vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
+ break;
+
+ default:
+ vidBitrate = bitrate;
+ break;
+ }
+ break;
+
+ default:
+ // We do not handle any other MPEG4 profile for now.
+ // Return input bitrate
+ vidBitrate = bitrate;
+ break;
+ }
+
+ return vidBitrate;
+}
+
+M4OSA_Int32 M4MCS_intLimitBitrateForH263Enc(M4OSA_Int32 profile,
+ M4OSA_Int32 level, M4OSA_Int32 bitrate) {
+
+ M4OSA_Int32 vidBitrate = 0;
+
+ switch (profile) {
+ case OMX_VIDEO_H263ProfileBaseline:
+ switch (level) {
+
+ case OMX_VIDEO_H263Level10:
+ vidBitrate = (bitrate > 64000) ? 64000 : bitrate;
+ break;
+
+ case OMX_VIDEO_H263Level20:
+ vidBitrate = (bitrate > 128000) ? 128000 : bitrate;
+ break;
+
+ case OMX_VIDEO_H263Level30:
+ vidBitrate = (bitrate > 384000) ? 384000 : bitrate;
+ break;
+
+ default:
+ vidBitrate = bitrate;
+ break;
+ }
+ break;
+
+ default:
+ // We do not handle any other H263 profile for now.
+ // Return input bitrate
+ vidBitrate = bitrate;
+ break;
+ }
+
+ return vidBitrate;
+}
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c b/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
new file mode 100755
index 0000000..488de68
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4MCS_API.c
+ * @brief MCS implementation (Video Compressor Service)
+ * @note This file implements the API and the processing of the MCS
+ *************************************************************************
+ **/
+
+/****************/
+/*** Includes ***/
+/****************/
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+
+/* Our headers */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_InternalConfig.h"
+#include "M4MCS_InternalFunctions.h"
+
+/* Common headers (for aac) */
+#include "M4_Common.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
+ * @brief Check if an effect has to be applied currently
+ * @note It is called by the stepEncoding function
+ * @param pContext (IN) MCS internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC)
+{
+ M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
+
+ *pActiveEffectNumber = -1;
+
+ if(pC->ReaderAudioAU.m_CTS > pC->uiBeginCutTime
+ && pC->ReaderAudioAU.m_CTS < pC->uiEndCutTime)
+ {
+ M4OSA_UInt32 outputRelatedTime = 0;
+ M4OSA_UInt8 uiEffectIndex = 0;
+ outputRelatedTime =
+ (M4OSA_UInt32)(pC->ReaderAudioAU.m_CTS - pC->uiBeginCutTime + 0.5);
+
+ for(uiEffectIndex=0; uiEffectIndex<pC->nbEffects; uiEffectIndex++)
+ {
+ if ((outputRelatedTime >=
+ (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime)) &&
+ (outputRelatedTime <
+ (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime +\
+ pC->pEffects[uiEffectIndex].uiDuration)))
+ {
+ *pActiveEffectNumber = uiEffectIndex;
+ uiEffectIndex = pC->nbEffects;
+ }
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief Apply audio effect FadeIn to pPCMdata
+ * @param pC (IN/OUT) Internal edit context
+ * @param pPCMdata (IN/OUT) Input and Output PCM audio data
+ * @param uiPCMsize (IN) Size of pPCMdata
+ * @param pProgress (IN) Effect progress
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn( M4OSA_Void *pFunctionContext,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize,
+ M4MCS_ExternalProgress *pProgress)
+{
+ /* we will cast each Int16 sample into this Int32 variable */
+ M4OSA_Int32 i32sample;
+
+ /**
+ * Sanity check */
+ if(pProgress->uiProgress > 1000)
+ {
+ pProgress->uiProgress = 1000;
+ }
+
+ /**
+ * From buffer size (bytes) to number of sample (int16): divide by two */
+ uiPCMsize >>= 1;
+
+ /**
+ * Loop on samples */
+ while (uiPCMsize-->0) /**< decrementing to optimize */
+ {
+ i32sample = *pPCMdata;
+ i32sample *= pProgress->uiProgress;
+ i32sample /= 1000;
+ *pPCMdata++ = (M4OSA_Int16)i32sample;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeIn: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut()
+ * @brief Apply audio effect FadeIn to pPCMdata
+ * @param pC (IN/OUT) Internal edit context
+ * @param pPCMdata (IN/OUT) Input and Output PCM audio data
+ * @param uiPCMsize (IN) Size of pPCMdata
+ * @param pProgress (IN) Effect progress
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize,
+ M4MCS_ExternalProgress *pProgress)
+{
+ /* we will cast each Int16 sample into this Int32 variable */
+ M4OSA_Int32 i32sample;
+
+ /**
+ * Sanity check */
+ if(pProgress->uiProgress > 1000)
+ {
+ pProgress->uiProgress = 1000;
+ }
+ pProgress->uiProgress = 1000 - pProgress->uiProgress;
+
+ /**
+ * From buffer size (bytes) to number of sample (int16): divide by two */
+ uiPCMsize >>= 1;
+
+ /**
+ * Loop on samples */
+ while (uiPCMsize-->0) /**< decrementing to optimize */
+ {
+ i32sample = *pPCMdata;
+ i32sample *= pProgress->uiProgress;
+ i32sample /= 1000;
+ *pPCMdata++ = (M4OSA_Int16)i32sample;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeOut: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c b/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
new file mode 100755
index 0000000..554492b
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4MCS_Codecs.c
+ * @brief MCS implementation
+ * @note This file contains all functions related to audio/video
+ * codec manipulations.
+ ************************************************************************
+ */
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+#include "NXPSW_CompilerSwitches.h"
+#include "M4OSA_Debug.h" /* Include for OSAL debug services */
+#include "M4MCS_InternalTypes.h" /* Internal types of the MCS */
+
+
+#ifdef M4MCS_SUPPORT_VIDEC_3GP
+#include "M4_MPEG4VI_VideoHandler.h" /*needed for renderer error codes*/
+#endif
+
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_clearInterfaceTables()
+ * @brief Clear encoders, decoders, reader and writers interfaces tables
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: The context is null
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_clearInterfaceTables(M4MCS_Context pContext)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4OSA_UInt8 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ /* Initialisation that will allow to check if registering twice */
+ pC->pWriterGlobalFcts = M4OSA_NULL;
+ pC->pWriterDataFcts = M4OSA_NULL;
+ pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+ pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+ pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
+ pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
+
+ for (i = 0; i < M4WRITER_kType_NB; i++ )
+ {
+ pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+ pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+ }
+
+ for (i = 0; i < M4ENCODER_kVideo_NB; i++ )
+ {
+ pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+ pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
+ pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ for (i = 0; i < M4ENCODER_kAudio_NB; i++ )
+ {
+ pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+ pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
+ pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ /* Initialisation that will allow to check if registering twice */
+ pC->m_pReader = M4OSA_NULL;
+ pC->m_pReaderDataIt = M4OSA_NULL;
+ pC->m_uiNbRegisteredReaders = 0;
+
+ for (i = 0; i < M4READER_kMediaType_NB; i++ )
+ {
+ pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+ pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+ }
+
+ pC->m_pVideoDecoder = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+ pC->m_uiNbRegisteredVideoDec = 0;
+ for (i = 0; i < M4DECODER_kVideoType_NB; i++ )
+ {
+ pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+ }
+
+ pC->m_pAudioDecoder = M4OSA_NULL;
+ for (i = 0; i < M4AD_kType_NB; i++ )
+ {
+ pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+ pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
+ pC->m_pAudioDecoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerWriter()
+ * @brief This function will register a specific file format writer.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the writer context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext,pWtrGlobalInterface or
+ * pWtrDataInterface is M4OSA_NULL
+ * (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerWriter(M4MCS_Context pContext, M4WRITER_OutputFileType MediaType,
+ M4WRITER_GlobalInterface* pWtrGlobalInterface,
+ M4WRITER_DataInterface* pWtrDataInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+ "MCS: context is M4OSA_NULL in M4MCS_registerWriter");
+ M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+ "pWtrGlobalInterface is M4OSA_NULL in M4MCS_registerWriter");
+ M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL),M4ERR_PARAMETER,
+ "pWtrDataInterface is M4OSA_NULL in M4MCS_registerWriter");
+
+ M4OSA_TRACE3_3("MCS: M4MCS_registerWriter called with pContext=0x%x,\
+ pWtrGlobalInterface=0x%x, pWtrDataInterface=0x%x", pC,pWtrGlobalInterface,
+ pWtrDataInterface);
+
+ if((MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB))
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL)
+ {
+ /* a writer corresponding to this media type has already been registered !*/
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
+ return M4ERR_PARAMETER;
+ }
+
+ /*
+ * Save writer interface in context */
+ pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
+ pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerEncoder()
+ * @brief This function will register a specific video encoder.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ * or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoEncoder (
+ M4MCS_Context pContext,
+ M4ENCODER_Format MediaType,
+ M4ENCODER_GlobalInterface *pEncGlobalInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+ "MCS: context is M4OSA_NULL in M4MCS_registerVideoEncoder");
+ M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+ "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerVideoEncoder");
+
+ M4OSA_TRACE3_2("MCS: M4MCS_registerVideoEncoder called with pContext=0x%x,\
+ pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
+
+ if (MediaType >= M4ENCODER_kVideo_NB)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video encoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL)
+ {
+ /* can be legitimate, in cases where we have one version that can use external encoders
+ but which still has the built-in one to be able to work without an external encoder; in
+ this case the new encoder simply replaces the old one (i.e. we unregister it first). */
+ free(pC->pVideoEncoderInterface[MediaType]);
+ pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
+ }
+
+ /*
+ * Save encoder interface in context */
+ pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
+ /* The actual userData and external API will be set by the registration function in the case
+ of an external encoder (add it as a parameter to this function in the long run?) */
+ pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
+ pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioEncoder()
+ * @brief This function will register a specific audio encoder.
+ * @note According to the Mediatype, this function will store in the internal context
+ * the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @param mediaType: (IN) The media type.
+ * @param pEncGlobalInterface: (OUT) the encoder interface functions.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pEncGlobalInterface is
+ * M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioEncoder(
+ M4MCS_Context pContext,
+ M4ENCODER_AudioFormat MediaType,
+ M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+ "MCS: context is M4OSA_NULL in M4MCS_registerAudioEncoder");
+ M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+ "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerAudioEncoder");
+
+ M4OSA_TRACE3_2("MCS: M4MCS_registerAudioEncoder called with pContext=0x%x,\
+ pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
+
+ if (MediaType >= M4ENCODER_kAudio_NB)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio encoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if(M4OSA_NULL != pC->pAudioEncoderInterface[MediaType])
+ {
+ free(pC->pAudioEncoderInterface[MediaType]);
+ pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+
+ if(M4OSA_NULL != pC->pAudioEncoderUserDataTable[MediaType])
+ {
+ free(pC->pAudioEncoderUserDataTable[MediaType]);
+ pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
+ }
+ }
+
+ /*
+ * Save encoder interface in context */
+ pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+ pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_registerReader()
+ * @brief Register reader.
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerReader(
+ M4MCS_Context pContext,
+ M4READER_MediaType mediaType,
+ M4READER_GlobalInterface *pRdrGlobalInterface,
+ M4READER_DataInterface *pRdrDataInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface),
+ M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on global interface");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface),
+ M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on data interface");
+
+ if (mediaType == M4READER_kMediaTypeUnknown || mediaType >= M4READER_kMediaType_NB)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL)
+ {
+ /* a reader corresponding to this media type has already been registered !*/
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
+ return M4ERR_PARAMETER;
+ }
+
+ pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
+ pC->m_pReaderDataItTable[mediaType] = pRdrDataInterface;
+
+ pC->m_uiNbRegisteredReaders++;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_registerVideoDecoder()
+ * @brief Register video decoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @param pDecoderInterface (IN) Decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only), or the decoder
+ * type is invalid
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoDecoder(
+ M4MCS_Context pContext,
+ M4DECODER_VideoType decoderType,
+ M4DECODER_VideoInterface *pDecoderInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+ "M4MCS_registerVideoDecoder: invalid pointer on decoder interface");
+
+ if (decoderType >= M4DECODER_kVideoType_NB)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video decoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL)
+ {
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+ /* a decoder corresponding to this media type has already been registered !*/
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Decoder has already been registered");
+ return M4ERR_PARAMETER;
+#else /* external decoders are possible */
+ /* can be legitimate, in cases where we have one version that can use external decoders
+ but which still has the built-in one to be able to work without an external decoder; in
+ this case the new decoder simply replaces the old one (i.e. we unregister it first). */
+ free(pC->m_pVideoDecoderItTable[decoderType]);
+ pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
+ /* oh, and don't forget the user data, too. */
+ if (pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL)
+ {
+ free(pC->m_pVideoDecoderUserDataTable[decoderType]);
+ pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+ }
+#endif /* are external decoders possible? */
+ }
+
+ pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+ /* The actual userData will be set by the registration function in the case
+ of an external decoder (add it as a parameter to this function in the long run?) */
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+ pC->m_uiNbRegisteredVideoDec++;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioDecoder()
+ * @brief Register audio decoder
+ * @note This function is used internaly by the MCS to
+ * register audio decoders,
+ * @param context (IN/OUT) MCS context.
+ * @param decoderType (IN) Audio decoder type
+ * @param pDecoderInterface (IN) Audio decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioDecoder(
+ M4MCS_Context pContext,
+ M4AD_Type decoderType,
+ M4AD_Interface *pDecoderInterface)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+ "M4MCS_registerAudioDecoder: invalid pointer on decoder interface");
+
+ if (decoderType >= M4AD_kType_NB)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio decoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+ {
+ free(pC->m_pAudioDecoderItTable[decoderType]);
+ pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+
+ if(M4OSA_NULL != pC->m_pAudioDecoderUserDataTable[decoderType])
+ {
+ free(pC->m_pAudioDecoderUserDataTable[decoderType]);
+ pC->m_pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
+ }
+ }
+ pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+ pC->m_pAudioDecoderFlagTable[decoderType] = M4OSA_FALSE; /* internal decoder */
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_unRegisterAllWriters()
+ * @brief Unregister writer
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllWriters(M4MCS_Context pContext)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ for (i = 0; i < M4WRITER_kType_NB; i++)
+ {
+ if (pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL)
+ {
+ free(pC->WriterInterface[i].pGlobalFcts );
+ pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+ }
+ if (pC->WriterInterface[i].pDataFcts != M4OSA_NULL)
+ {
+ free(pC->WriterInterface[i].pDataFcts );
+ pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+ }
+ }
+
+ pC->pWriterGlobalFcts = M4OSA_NULL;
+ pC->pWriterDataFcts = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_unRegisterAllEncoders()
+ * @brief Unregister the encoders
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllEncoders(M4MCS_Context pContext)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ for (i = 0; i < M4ENCODER_kVideo_NB; i++)
+ {
+ if (pC->pVideoEncoderInterface[i] != M4OSA_NULL)
+ {
+ free(pC->pVideoEncoderInterface[i] );
+ pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+ }
+ }
+
+ for (i = 0; i < M4ENCODER_kAudio_NB; i++)
+ {
+ if (pC->pAudioEncoderInterface[i] != M4OSA_NULL)
+ {
+ /*Don't free external audio encoders interfaces*/
+ if (M4OSA_FALSE == pC->pAudioEncoderFlag[i])
+ {
+ free(pC->pAudioEncoderInterface[i] );
+ }
+ pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+ pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_unRegisterAllReaders()
+ * @brief Unregister reader
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllReaders(M4MCS_Context pContext)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ for (i = 0; i < M4READER_kMediaType_NB; i++)
+ {
+ if (pC->m_pReaderGlobalItTable[i] != M4OSA_NULL)
+ {
+ free(pC->m_pReaderGlobalItTable[i] );
+ pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+ }
+ if (pC->m_pReaderDataItTable[i] != M4OSA_NULL)
+ {
+ free(pC->m_pReaderDataItTable[i] );
+ pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->m_uiNbRegisteredReaders = 0;
+ pC->m_pReader = M4OSA_NULL;
+ pC->m_pReaderDataIt = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_unRegisterAllDecoders()
+ * @brief Unregister the decoders
+ * @param pContext (IN/OUT) MCS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_unRegisterAllDecoders(M4MCS_Context pContext)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ for (i = 0; i < M4DECODER_kVideoType_NB; i++)
+ {
+ if (pC->m_pVideoDecoderItTable[i] != M4OSA_NULL)
+ {
+ free(pC->m_pVideoDecoderItTable[i] );
+ pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+ }
+ }
+
+ for (i = 0; i < M4AD_kType_NB; i++)
+ {
+ if (pC->m_pAudioDecoderItTable[i] != M4OSA_NULL)
+ {
+ /*Don't free external audio decoders interfaces*/
+ if (M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i])
+ {
+ free(pC->m_pAudioDecoderItTable[i] );
+ }
+ pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->m_uiNbRegisteredVideoDec = 0;
+ pC->m_pVideoDecoder = M4OSA_NULL;
+
+ pC->m_pAudioDecoder = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentWriter()
+ * @brief Set current writer
+ * @param pContext (IN/OUT) MCS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentWriter( M4MCS_Context pContext,
+ M4VIDEOEDITING_FileType mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4WRITER_OutputFileType writerType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4VIDEOEDITING_kFileType_3GPP:
+ case M4VIDEOEDITING_kFileType_MP4:
+ case M4VIDEOEDITING_kFileType_M4V:
+ writerType = M4WRITER_k3GPP;
+ break;
+ case M4VIDEOEDITING_kFileType_AMR:
+ writerType = M4WRITER_kAMR;
+ break;
+ case M4VIDEOEDITING_kFileType_MP3:
+ writerType = M4WRITER_kMP3;
+ break;
+ case M4VIDEOEDITING_kFileType_PCM:
+ pC->b_isRawWriter = M4OSA_TRUE;
+ writerType = M4WRITER_kPCM;
+ break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Writer type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
+ pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
+
+ if (pC->pWriterGlobalFcts == M4OSA_NULL || pC->pWriterDataFcts == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Writer type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentVideoEncoder()
+ * @brief Set a video encoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentVideoEncoder(
+ M4MCS_Context pContext,
+ M4VIDEOEDITING_VideoFormat mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4ENCODER_Format encoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4VIDEOEDITING_kH263:
+ encoderType = M4ENCODER_kH263;
+ break;
+ case M4VIDEOEDITING_kMPEG4:
+ encoderType = M4ENCODER_kMPEG4;
+ break;
+ case M4VIDEOEDITING_kH264:
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+ encoderType = M4ENCODER_kH264;
+ break;
+#endif
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Video encoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
+ pC->pCurrentVideoEncoderExternalAPI = pC->pVideoEncoderExternalAPITable[encoderType];
+ pC->pCurrentVideoEncoderUserData = pC->pVideoEncoderUserDataTable[encoderType];
+
+ if (pC->pVideoEncoderGlobalFcts == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Video encoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentAudioEncoder()
+ * @brief Set an audio encoder
+ * @param context (IN/OUT) MCS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentAudioEncoder(
+ M4MCS_Context pContext,
+ M4VIDEOEDITING_AudioFormat mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4ENCODER_AudioFormat encoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ encoderType = M4ENCODER_kAMRNB;
+ break;
+ case M4VIDEOEDITING_kAAC:
+ encoderType = M4ENCODER_kAAC;
+ break;
+ case M4VIDEOEDITING_kMP3:
+ encoderType = M4ENCODER_kMP3;
+ break;
+//EVRC
+// case M4VIDEOEDITING_kEVRC:
+// encoderType = M4ENCODER_kEVRC;
+// break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Audio encoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
+ pC->pCurrentAudioEncoderUserData = pC->pAudioEncoderUserDataTable[encoderType];
+
+ if (pC->pAudioEncoderGlobalFcts == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Audio encoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentReader()
+ * @brief Set current reader
+ * @param pContext (IN/OUT) MCS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentReader( M4MCS_Context pContext,
+ M4VIDEOEDITING_FileType mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4READER_MediaType readerType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4VIDEOEDITING_kFileType_3GPP:
+ case M4VIDEOEDITING_kFileType_MP4:
+ case M4VIDEOEDITING_kFileType_M4V:
+ readerType = M4READER_kMediaType3GPP;
+ break;
+ case M4VIDEOEDITING_kFileType_AMR:
+ readerType = M4READER_kMediaTypeAMR;
+ break;
+ case M4VIDEOEDITING_kFileType_MP3:
+ readerType = M4READER_kMediaTypeMP3;
+ break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Reader type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->m_pReader = pC->m_pReaderGlobalItTable[readerType];
+ pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
+
+ if (pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Reader type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentVideoDecoder()
+ * @brief Set a video decoder
+ * @param pContext (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentVideoDecoder( M4MCS_Context pContext,
+ M4_StreamType mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4DECODER_VideoType decoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4DA_StreamTypeVideoMpeg4:
+ case M4DA_StreamTypeVideoH263:
+ decoderType = M4DECODER_kVideoTypeMPEG4;
+ break;
+ case M4DA_StreamTypeVideoMpeg4Avc:
+ decoderType = M4DECODER_kVideoTypeAVC;
+ break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Video decoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ pC->m_pCurrentVideoDecoderUserData =
+ pC->m_pVideoDecoderUserDataTable[decoderType];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ if (pC->m_pVideoDecoder == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Video decoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_setCurrentAudioDecoder()
+ * @brief Set an audio decoder
+ * @param context (IN/OUT) MCS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_setCurrentAudioDecoder( M4MCS_Context pContext,
+ M4_StreamType mediaType)
+{
+ M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+ M4AD_Type decoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+ switch (mediaType)
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+ decoderType = M4AD_kTypeAMRNB;
+ break;
+ case M4DA_StreamTypeAudioAac:
+ case M4DA_StreamTypeAudioAacADTS:
+ case M4DA_StreamTypeAudioAacADIF:
+ decoderType = M4AD_kTypeAAC;
+ break;
+ case M4DA_StreamTypeAudioMp3:
+ decoderType = M4AD_kTypeMP3;
+ break;
+//EVRC
+// case M4DA_StreamTypeAudioEvrc:
+// decoderType = M4AD_kTypeEVRC;
+// break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Audio decoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
+ pC->m_pCurrentAudioDecoderUserData =
+ pC->m_pAudioDecoderUserDataTable[decoderType];
+
+ if (pC->m_pAudioDecoder == M4OSA_NULL)
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+ "Audio decoder type not supported");
+ return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+ }
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c b/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
new file mode 100755
index 0000000..631ca87
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4MCS_MediaAndCodecSubscription.c
+ * @brief Media readers and codecs subscription
+ * @note This file implements the subscription of supported media
+ * readers and decoders for the MCS. Potential support can
+ * be activated or de-activated
+ * using compilation flags set in the projects settings.
+ ************************************************************************
+ */
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+#include "NXPSW_CompilerSwitches.h"
+
+
+#include "M4OSA_Debug.h"
+#include "M4MCS_InternalTypes.h" /**< Include for MCS specific types */
+#include "M4MCS_InternalFunctions.h" /**< Registration module */
+
+/* _______________________ */
+/*| |*/
+/*| reader subscription |*/
+/*|_______________________|*/
+
+/* Reader registration : at least one reader must be defined */
+#ifndef M4VSS_SUPPORT_READER_3GP
+#ifndef M4VSS_SUPPORT_READER_AMR
+#ifndef M4VSS_SUPPORT_READER_MP3
+#error "no reader registered"
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+#endif /* M4VSS_SUPPORT_READER_AMR */
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+/* Include files for each reader to subscribe */
+#ifdef M4VSS_SUPPORT_READER_3GP
+#include "VideoEditor3gpReader.h"
+#endif
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+#include "M4READER_Amr.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_MP3
+#include "VideoEditorMp3Reader.h"
+#endif
+
+/* ______________________________ */
+/*| |*/
+/*| video decoder subscription |*/
+/*|______________________________|*/
+
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorVideoDecoder.h"
+
+
+
+/* _______________________ */
+/*| |*/
+/*| writer subscription |*/
+/*|_______________________|*/
+
+/* Writer registration : at least one writer must be defined */
+#ifndef M4VSS_SUPPORT_WRITER_AMR
+#ifndef M4VSS_SUPPORT_WRITER_3GPP
+#ifndef M4VSS_SUPPORT_WRITER_PCM
+#ifndef M4VSS_SUPPORT_WRITER_MP3
+#error "no writer registered"
+#endif /* M4VSS_SUPPORT_WRITER_MP3 */
+#endif /* M4VSS_SUPPORT_WRITER_PCM */
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+#endif /* M4VSS_SUPPORT_WRITER_AMR */
+
+/* Include files for each writer to subscribe */
+#ifdef M4VSS_SUPPORT_WRITER_AMR
+extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_PCM
+extern M4OSA_ERR M4WRITER_PCM_getInterfaces( M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_MP3
+extern M4OSA_ERR M4WRITER_MP3_getInterfaces( M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+#endif
+
+/* ______________________________ */
+/*| |*/
+/*| video encoder subscription |*/
+/*|______________________________|*/
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorVideoEncoder.h"
+
+
+/* Include files for each video encoder to subscribe */
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+//#include "M4MP4E_interface.h"
+#endif
+
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) \
+ if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
+ * @brief This function registers the reader, decoders, writers and encoders
+ * in the MCS.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext is NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4READER_MediaType readerMediaType;
+ M4READER_GlobalInterface* pReaderGlobalInterface;
+ M4READER_DataInterface* pReaderDataInterface;
+
+ M4WRITER_OutputFileType writerMediaType;
+ M4WRITER_GlobalInterface* pWriterGlobalInterface;
+ M4WRITER_DataInterface* pWriterDataInterface;
+
+ M4AD_Type audioDecoderType;
+ M4ENCODER_AudioFormat audioCodecType;
+ M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
+ M4AD_Interface* pAudioDecoderInterface;
+
+ M4DECODER_VideoType videoDecoderType;
+ M4ENCODER_Format videoCodecType;
+ M4ENCODER_GlobalInterface* pVideoCodecInterface;
+ M4DECODER_VideoInterface* pVideoDecoderInterface;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+
+ /* _______________________ */
+ /*| |*/
+ /*| reader subscription |*/
+ /*|_______________________|*/
+
+ /* --- 3GP --- */
+
+#ifdef M4VSS_SUPPORT_READER_3GP
+ err = VideoEditor3gpReader_getInterface(&readerMediaType,
+ &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerReader( pContext, readerMediaType,
+ pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register 3GP reader");
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+ /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+ err = M4READER_AMR_getInterfaces( &readerMediaType,
+ &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerReader( pContext, readerMediaType,
+ pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register AMR reader");
+#endif /* M4VSS_SUPPORT_READER_AMR */
+
+ /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_READER_MP3
+
+ err = VideoEditorMp3Reader_getInterface(&readerMediaType,
+ &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerReader( pContext, readerMediaType,
+ pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register MP3 reader");
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+
+ /* ______________________________ */
+ /*| |*/
+ /*| video decoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- MPEG4 & H263 --- */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+
+ err = VideoEditorVideoDecoder_getInterface_MPEG4( &videoDecoderType,
+ (M4OSA_Void *)&pVideoDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerVideoDecoder( pContext, videoDecoderType,
+ pVideoDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register MPEG4 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+
+ err = VideoEditorVideoDecoder_getInterface_H264( &videoDecoderType,
+ (M4OSA_Void *)&pVideoDecoderInterface);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4DECODER_AVC interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerVideoDecoder( pContext, videoDecoderType,
+ pVideoDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register AVC decoder");
+#endif /* M4VSS_SUPPORT_VIDEO_AVC */
+
+
+ /* ______________________________ */
+ /*| |*/
+ /*| audio decoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- AMRNB --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+ err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType,
+ &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AD PHILIPS AMRNB interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioDecoder( pContext, audioDecoderType,
+ pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AMRNB decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
+
+ /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+
+ err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType,
+ &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AD PHILIPS AAC interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioDecoder( pContext, audioDecoderType,
+ pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AAC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AAC */
+
+ /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+
+ err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType,
+ &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AD PHILIPS MP3 interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioDecoder( pContext, audioDecoderType,
+ pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register PHILIPS MP3 decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_MP3 */
+
+ /* --- EVRC --- */
+
+
+ /* _______________________ */
+ /*| |*/
+ /*| writer subscription |*/
+ /*|_______________________|*/
+
+ /* --- PCM --- */
+
+
+ /* --- 3GPP --- */
+
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+ /* retrieves the 3GPP writer media type and pointer to functions*/
+ err = M4WRITER_3GP_getInterfaces( &writerMediaType,
+ &pWriterGlobalInterface,
+ &pWriterDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerWriter( pContext, writerMediaType,
+ pWriterGlobalInterface,
+ pWriterDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register 3GPP writer");
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+
+
+ /* ______________________________ */
+ /*| |*/
+ /*| video encoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- MPEG4 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ /* retrieves the MPEG4 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType,
+ &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerVideoEncoder( pContext, videoCodecType,
+ pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register video MPEG4 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+ /* --- H263 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ /* retrieves the H263 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType,
+ &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerVideoEncoder( pContext, videoCodecType,
+ pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register video H263 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+ /* retrieves the H263 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType,
+ &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4H264E interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register video H264 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+ /* ______________________________ */
+ /*| |*/
+ /*| audio encoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+ /* retrieves the AMR encoder type and pointer to functions*/
+ err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType,
+ &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AMR interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioEncoder( pContext, audioCodecType,
+ pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register audio AMR encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AMR */
+
+ /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+ /* retrieves the AAC encoder type and pointer to functions*/
+ err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType,
+ &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AAC interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioEncoder( pContext, audioCodecType,
+ pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register audio AAC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AAC */
+
+
+
+ /* --- MP3 --- */
+#ifdef M4VSS_SUPPORT_ENCODER_MP3
+ /* retrieves the MP3 encoder type and pointer to functions*/
+ err = VideoEditorAudioEncoder_getInterface_MP3(&audioCodecType,
+ &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4MP3E interface allocation error");
+ return err;
+ }
+ err = M4MCS_registerAudioEncoder( pContext, audioCodecType,
+ pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4MCS_subscribeMediaAndCodec: can't register audio MP3 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MP3 */
+
+ return err;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c b/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
new file mode 100755
index 0000000..749f68e
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4MCS_VideoPreProcessing.c
+ * @brief MCS implementation
+ * @note This file implements the encoder callback of the MCS.
+ *************************************************************************
+ **/
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+/* OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h" /* OSAL debug management */
+
+
+/* Core headers */
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_ErrorCodes.h"
+
+/**
+ * Video preprocessing interface definition */
+#include "M4VPP_API.h"
+
+/**
+ * Video filters */
+#include "M4VIFI_FiltersAPI.h" /**< for M4VIFI_ResizeBilinearYUV420toYUV420() */
+
+#ifndef M4MCS_AUDIOONLY
+#include "M4AIR_API.h"
+#endif /*M4MCS_AUDIOONLY*/
+/**/
+
+
+
+
+/*
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ * M4VIFI_ImagePlane* pPlaneOut)
+ * @brief Do the video rendering and the resize (if needed)
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the MCS internal context in our case
+ * @param pPlaneIn (IN) Contains the image
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the output
+ * YUV420 image
+ * @return M4NO_ERROR: No error
+ * @return M4MCS_ERR_VIDEO_DECODE_ERROR: the video decoding failed
+ * @return M4MCS_ERR_RESIZE_ERROR: the resizing failed
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+/* This part is used only if video codecs are compiled*/
+#ifndef M4MCS_AUDIOONLY
+ /**
+ * The VPP context is actually the MCS context! */
+ M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
+
+ M4_MediaTime mtCts = pC->dViDecCurrentCts;
+
+ /**
+ * When Closing after an error occured, it may happen that pReaderVideoAU->m_dataAddress has
+ * not been allocated yet. When closing in pause mode, the decoder can be null.
+ * We don't want an error to be returned because it would interrupt the close process and
+ * thus some resources would be locked. So we return M4NO_ERROR.
+ */
+ /* Initialize to black plane the output plane if the media rendering
+ is black borders */
+ if(pC->MediaRendering == M4MCS_kBlackBorders)
+ {
+ memset((void *)pPlaneOut[0].pac_data,Y_PLANE_BORDER_VALUE,
+ (pPlaneOut[0].u_height*pPlaneOut[0].u_stride));
+ memset((void *)pPlaneOut[1].pac_data,U_PLANE_BORDER_VALUE,
+ (pPlaneOut[1].u_height*pPlaneOut[1].u_stride));
+ memset((void *)pPlaneOut[2].pac_data,V_PLANE_BORDER_VALUE,
+ (pPlaneOut[2].u_height*pPlaneOut[2].u_stride));
+ }
+ else if ((M4OSA_NULL == pC->ReaderVideoAU.m_dataAddress) ||
+ (M4OSA_NULL == pC->pViDecCtxt))
+ {
+ /**
+ * We must fill the input of the encoder with a dummy image, because
+ * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
+ memset((void *)pPlaneOut[0].pac_data,0,
+ pPlaneOut[0].u_stride * pPlaneOut[0].u_height);
+ memset((void *)pPlaneOut[1].pac_data,0,
+ pPlaneOut[1].u_stride * pPlaneOut[1].u_height);
+ memset((void *)pPlaneOut[2].pac_data,0,
+ pPlaneOut[2].u_stride * pPlaneOut[2].u_height);
+
+ M4OSA_TRACE1_0("M4MCS_intApplyVPP: pReaderVideoAU->m_dataAddress is M4OSA_NULL,\
+ returning M4NO_ERROR");
+ return M4NO_ERROR;
+ }
+
+ if(pC->isRenderDup == M4OSA_FALSE)
+ {
+ /**
+ * m_pPreResizeFrame different than M4OSA_NULL means that resizing is needed */
+ if (M4OSA_NULL != pC->pPreResizeFrame)
+ {
+ /** FB 2008/10/20:
+ Used for cropping and black borders*/
+ M4AIR_Params Params;
+
+ M4OSA_TRACE3_0("M4MCS_intApplyVPP: Need to resize");
+ err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt, &mtCts,
+ pC->pPreResizeFrame, M4OSA_TRUE);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
+ return err;
+ }
+
+ if(pC->MediaRendering == M4MCS_kResizing)
+ {
+ /*
+ * Call the resize filter. From the intermediate frame to the encoder
+ * image plane
+ */
+ err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL,
+ pC->pPreResizeFrame, pPlaneOut);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_intApplyVPP: M4ViFilResizeBilinearYUV420toYUV420\
+ returns 0x%x!", err);
+ return err;
+ }
+ }
+ else
+ {
+ M4VIFI_ImagePlane pImagePlanesTemp[3];
+ M4VIFI_ImagePlane* pPlaneTemp;
+ M4OSA_UInt8* pOutPlaneY = pPlaneOut[0].pac_data +
+ pPlaneOut[0].u_topleft;
+ M4OSA_UInt8* pOutPlaneU = pPlaneOut[1].pac_data +
+ pPlaneOut[1].u_topleft;
+ M4OSA_UInt8* pOutPlaneV = pPlaneOut[2].pac_data +
+ pPlaneOut[2].u_topleft;
+ M4OSA_UInt8* pInPlaneY = M4OSA_NULL;
+ M4OSA_UInt8* pInPlaneU = M4OSA_NULL;
+ M4OSA_UInt8* pInPlaneV = M4OSA_NULL;
+ M4OSA_UInt32 i = 0;
+
+ /*FB 2008/10/20: to keep media aspect ratio*/
+ /*Initialize AIR Params*/
+ Params.m_inputCoord.m_x = 0;
+ Params.m_inputCoord.m_y = 0;
+ Params.m_inputSize.m_height = pC->pPreResizeFrame->u_height;
+ Params.m_inputSize.m_width = pC->pPreResizeFrame->u_width;
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_bOutputStripe = M4OSA_FALSE;
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+ /**
+ Media rendering: Black borders*/
+ if(pC->MediaRendering == M4MCS_kBlackBorders)
+ {
+ pImagePlanesTemp[0].u_width = pPlaneOut[0].u_width;
+ pImagePlanesTemp[0].u_height = pPlaneOut[0].u_height;
+ pImagePlanesTemp[0].u_stride = pPlaneOut[0].u_width;
+ pImagePlanesTemp[0].u_topleft = 0;
+
+ pImagePlanesTemp[1].u_width = pPlaneOut[1].u_width;
+ pImagePlanesTemp[1].u_height = pPlaneOut[1].u_height;
+ pImagePlanesTemp[1].u_stride = pPlaneOut[1].u_width;
+ pImagePlanesTemp[1].u_topleft = 0;
+
+ pImagePlanesTemp[2].u_width = pPlaneOut[2].u_width;
+ pImagePlanesTemp[2].u_height = pPlaneOut[2].u_height;
+ pImagePlanesTemp[2].u_stride = pPlaneOut[2].u_width;
+ pImagePlanesTemp[2].u_topleft = 0;
+
+ /* Allocates plan in local image plane structure */
+ pImagePlanesTemp[0].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[0]\
+ .u_width * pImagePlanesTemp[0].u_height, M4VS,
+ (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferY") ;
+ if(pImagePlanesTemp[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[1].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[1]\
+ .u_width * pImagePlanesTemp[1].u_height, M4VS,
+ (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferU") ;
+ if(pImagePlanesTemp[1].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[2].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(pImagePlanesTemp[2]\
+ .u_width * pImagePlanesTemp[2].u_height,
+ M4VS,(M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferV") ;
+ if(pImagePlanesTemp[2].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+ return M4ERR_ALLOC;
+ }
+
+ pInPlaneY = pImagePlanesTemp[0].pac_data ;
+ pInPlaneU = pImagePlanesTemp[1].pac_data ;
+ pInPlaneV = pImagePlanesTemp[2].pac_data ;
+
+ memset((void *)pImagePlanesTemp[0].pac_data,Y_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
+ memset((void *)pImagePlanesTemp[1].pac_data,U_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
+ memset((void *)pImagePlanesTemp[2].pac_data,V_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
+
+ if((M4OSA_UInt32)((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
+ /pC->pPreResizeFrame->u_width) <= pPlaneOut->u_height)
+ //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+ {
+ /*it is height so black borders will be on the top and on the bottom side*/
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ Params.m_outputSize.m_height =
+ (M4OSA_UInt32)
+ ((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
+ /pC->pPreResizeFrame->u_width);
+ /*number of lines at the top*/
+ pImagePlanesTemp[0].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)
+ (pImagePlanesTemp[0].u_height\
+ -Params.m_outputSize.m_height)>>1)) *
+ pImagePlanesTemp[0].u_stride;
+ pImagePlanesTemp[0].u_height = Params.m_outputSize.m_height;
+ pImagePlanesTemp[1].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)\
+ * pImagePlanesTemp[1].u_stride;
+ pImagePlanesTemp[1].u_height = Params.m_outputSize.m_height>>1;
+ pImagePlanesTemp[2].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)\
+ * pImagePlanesTemp[2].u_stride;
+ pImagePlanesTemp[2].u_height = Params.m_outputSize.m_height>>1;
+ }
+ else
+ {
+ /*it is width so black borders will be on the left and right side*/
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_outputSize.m_width =
+ (M4OSA_UInt32)((pC->pPreResizeFrame->u_width
+ * pPlaneOut->u_height)\
+ /pC->pPreResizeFrame->u_height);
+
+ pImagePlanesTemp[0].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width-\
+ Params.m_outputSize.m_width)>>1));
+ pImagePlanesTemp[0].u_width = Params.m_outputSize.m_width;
+ pImagePlanesTemp[1].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width-\
+ (Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[1].u_width = Params.m_outputSize.m_width>>1;
+ pImagePlanesTemp[2].u_topleft =
+ (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width-\
+ (Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[2].u_width = Params.m_outputSize.m_width>>1;
+ }
+
+ /*Width and height have to be even*/
+ Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+ Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ pImagePlanesTemp[0].u_width = (pImagePlanesTemp[0].u_width>>1)<<1;
+ pImagePlanesTemp[1].u_width = (pImagePlanesTemp[1].u_width>>1)<<1;
+ pImagePlanesTemp[2].u_width = (pImagePlanesTemp[2].u_width>>1)<<1;
+ pImagePlanesTemp[0].u_height = (pImagePlanesTemp[0].u_height>>1)<<1;
+ pImagePlanesTemp[1].u_height = (pImagePlanesTemp[1].u_height>>1)<<1;
+ pImagePlanesTemp[2].u_height = (pImagePlanesTemp[2].u_height>>1)<<1;
+
+ /*Check that values are coherent*/
+ if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+ {
+ Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+ }
+ else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+ {
+ Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+ }
+ pPlaneTemp = pImagePlanesTemp;
+ }
+
+ /**
+ Media rendering: Cropping*/
+ if(pC->MediaRendering == M4MCS_kCropping)
+ {
+ Params.m_outputSize.m_height = pPlaneOut->u_height;
+ Params.m_outputSize.m_width = pPlaneOut->u_width;
+ if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
+ /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+ {
+ /*height will be cropped*/
+ Params.m_inputSize.m_height =
+ (M4OSA_UInt32)((Params.m_outputSize.m_height \
+ * Params.m_inputSize.m_width) /
+ Params.m_outputSize.m_width);
+ Params.m_inputSize.m_height =
+ (Params.m_inputSize.m_height>>1)<<1;
+ Params.m_inputCoord.m_y =
+ (M4OSA_Int32)((M4OSA_Int32)
+ ((pC->pPreResizeFrame->u_height\
+ - Params.m_inputSize.m_height))>>1);
+ }
+ else
+ {
+ /*width will be cropped*/
+ Params.m_inputSize.m_width =
+ (M4OSA_UInt32)((Params.m_outputSize.m_width\
+ * Params.m_inputSize.m_height) /
+ Params.m_outputSize.m_height);
+ Params.m_inputSize.m_width =
+ (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputCoord.m_x =
+ (M4OSA_Int32)((M4OSA_Int32)
+ ((pC->pPreResizeFrame->u_width\
+ - Params.m_inputSize.m_width))>>1);
+ }
+ pPlaneTemp = pPlaneOut;
+ }
+ /**
+ * Call AIR functions */
+ if(M4OSA_NULL == pC->m_air_context)
+ {
+ err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+ Error when initializing AIR: 0x%x", err);
+ return err;
+ }
+ }
+
+ err = M4AIR_configure(pC->m_air_context, &Params);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+ Error when configuring AIR: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ return err;
+ }
+
+ err = M4AIR_get(pC->m_air_context, pC->pPreResizeFrame,
+ pPlaneTemp);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+ Error when getting AIR plane: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ return err;
+ }
+
+ if(pC->MediaRendering == M4MCS_kBlackBorders)
+ {
+ for(i=0; i<pPlaneOut[0].u_height; i++)
+ {
+ memcpy( (void *)pOutPlaneY,
+ (void *)pInPlaneY,
+ pPlaneOut[0].u_width);
+ pInPlaneY += pPlaneOut[0].u_width;
+ pOutPlaneY += pPlaneOut[0].u_stride;
+ }
+ for(i=0; i<pPlaneOut[1].u_height; i++)
+ {
+ memcpy( (void *)pOutPlaneU,
+ (void *)pInPlaneU,
+ pPlaneOut[1].u_width);
+ pInPlaneU += pPlaneOut[1].u_width;
+ pOutPlaneU += pPlaneOut[1].u_stride;
+ }
+ for(i=0; i<pPlaneOut[2].u_height; i++)
+ {
+ memcpy( (void *)pOutPlaneV,
+ (void *)pInPlaneV,
+ pPlaneOut[2].u_width);
+ pInPlaneV += pPlaneOut[2].u_width;
+ pOutPlaneV += pPlaneOut[2].u_stride;
+ }
+
+ for(i=0; i<3; i++)
+ {
+ if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+ {
+ free(
+ pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ M4OSA_TRACE3_0("M4MCS_intApplyVPP: Don't need resizing");
+ err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt,
+ &mtCts, pPlaneOut,
+ M4OSA_TRUE);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
+ return err;
+ }
+ }
+ pC->lastDecodedPlane = pPlaneOut;
+ }
+ else
+ {
+ /* Copy last decoded plane to output plane */
+ memcpy((void *)pPlaneOut[0].pac_data,
+ (void *)pC->lastDecodedPlane[0].pac_data,
+ (pPlaneOut[0].u_height * pPlaneOut[0].u_width));
+ memcpy((void *)pPlaneOut[1].pac_data,
+ (void *)pC->lastDecodedPlane[1].pac_data,
+ (pPlaneOut[1].u_height * pPlaneOut[1].u_width));
+ memcpy((void *)pPlaneOut[2].pac_data,
+ (void *)pC->lastDecodedPlane[2].pac_data,
+ (pPlaneOut[2].u_height * pPlaneOut[2].u_width));
+ pC->lastDecodedPlane = pPlaneOut;
+ }
+
+
+#endif /*M4MCS_AUDIOONLY*/
+ M4OSA_TRACE3_0("M4MCS_intApplyVPP: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk
new file mode 100755
index 0000000..df6925e
--- /dev/null
+++ b/libvideoeditor/vss/src/Android.mk
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvss
+#
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_core
+
+LOCAL_SRC_FILES:= \
+ M4PTO3GPP_API.c \
+ M4PTO3GPP_VideoPreProcessing.c \
+ M4VIFI_xVSS_RGB565toYUV420.c \
+ M4xVSS_API.c \
+ M4xVSS_internal.c \
+ M4VSS3GPP_AudioMixing.c \
+ M4VSS3GPP_Clip.c \
+ M4VSS3GPP_ClipAnalysis.c \
+ M4VSS3GPP_Codecs.c \
+ M4VSS3GPP_Edit.c \
+ M4VSS3GPP_EditAudio.c \
+ M4VSS3GPP_EditVideo.c \
+ M4VSS3GPP_MediaAndCodecSubscription.c \
+ M4ChannelConverter.c \
+ M4VD_EXTERNAL_BitstreamParser.c \
+ M4AIR_API.c \
+ M4READER_Pcm.c \
+ M4PCMR_CoreReader.c \
+ M4AD_Null.c \
+ M4AMRR_CoreReader.c \
+ M4READER_Amr.c \
+ M4VD_Tools.c \
+ VideoEditorResampler.cpp \
+ M4DECODER_Null.c
+
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils libaudioutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal \
+ libvideoeditor_3gpwriter \
+ libvideoeditor_mcs \
+ libvideoeditor_videofilters \
+ libvideoeditor_stagefrightshells
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
+ $(TOP)/frameworks/base/services/audioflinger \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/services/audioflinger \
+ $(TOP)/system/media/audio_effects/include \
+ $(TOP)/system/media/audio_utils/include
+
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+ -DM4xVSS_RESERVED_MOOV_DISK_SPACEno \
+ -DDECODE_GIF_ON_SAVING
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/src/M4AD_Null.c b/libvideoeditor/vss/src/M4AD_Null.c
new file mode 100755
index 0000000..cd1ec73
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AD_Null.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4AD_Null.c
+ * @brief Implementation of the MP3 decoder public interface
+ * @note This file implements a "null" audio decoder, that is a decoder
+ * that do nothing except getting AU from the reader
+*************************************************************************
+*/
+#include "M4OSA_Debug.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Debug.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4AD_Common.h"
+#include "M4AD_Null.h"
+
+#define M4AD_FORCE_16BITS
+
+/**
+ ************************************************************************
+ * NULL Audio Decoder version information
+ ************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AD_NULL_MAJOR 1
+#define M4AD_NULL_MINOR 1
+#define M4AD_NULL_REVISION 4
+
+/**
+ ************************************************************************
+ * structure M4AD_NullContext
+ * @brief Internal null decoder context
+ ************************************************************************
+*/
+typedef struct
+{
+ /**< Pointer to the stream handler provided by the user */
+ M4_AudioStreamHandler* m_pAudioStreamhandler;
+} M4AD_NullContext;
+
+
+/**
+ ************************************************************************
+ * NXP MP3 decoder functions definition
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief Creates an instance of the null decoder
+ * @note Allocates the context
+ *
+ * @param pContext: (OUT) Context of the decoder
+ * @param pStreamHandler: (IN) Pointer to an audio stream description
+ * @param pUserData: (IN) Pointer to User data
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_create( M4AD_Context* pContext,
+ M4_AudioStreamHandler *pStreamHandler,
+ void* pUserData)
+{
+ M4AD_NullContext* pC;
+
+ M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+ "M4AD_NULL_create: invalid context pointer");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4AD_NULL_create: invalid pointer pStreamHandler");
+
+ pC = (M4AD_NullContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AD_NullContext),
+ M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_NullContext");
+ if (pC == (M4AD_NullContext*)0)
+ {
+ M4OSA_TRACE1_0("Can not allocate null decoder context");
+ return M4ERR_ALLOC;
+ }
+
+ *pContext = pC;
+
+ pC->m_pAudioStreamhandler = pStreamHandler;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Destroys the instance of the null decoder
+ * @note After this call the context is invalid
+ *
+ * @param context: (IN) Context of the decoder
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_destroy(M4AD_Context context)
+{
+ M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+ M4OSA_DEBUG_IF1((context == M4OSA_NULL), M4ERR_PARAMETER, "M4AD_NULL_destroy: invalid context");
+
+ free(pC);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Simply output the given audio data
+ * @note
+ *
+ * @param context: (IN) Context of the decoder
+ * @param pInputBuffer: (IN/OUT)Input Data buffer. It contains at least one audio frame.
+ * The size of the buffer must be updated inside the function
+ * to reflect the size of the actually decoded data.
+ * (e.g. the first frame in pInputBuffer)
+ * @param pDecodedPCMBuffer: (OUT) Output PCM buffer (decoded data).
+ * @param jumping: (IN) M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_step(M4AD_Context context, M4AD_Buffer *pInputBuffer,
+ M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping)
+{
+ M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+ /*The VPS sends a zero buffer at the end*/
+ if (0 == pInputBuffer->m_bufferSize)
+ {
+ return M4WAR_NO_MORE_AU;
+ }
+
+ if (pInputBuffer->m_bufferSize > pDecodedPCMBuffer->m_bufferSize)
+ {
+ return M4ERR_PARAMETER;
+ }
+#ifdef M4AD_FORCE_16BITS
+ /*if read samples are 8 bits, complete them to 16 bits*/
+ if (pC->m_pAudioStreamhandler->m_byteSampleSize == 1)
+ {
+ M4OSA_UInt32 i;
+ M4OSA_Int16 val;
+
+ for (i = 0; i < pInputBuffer->m_bufferSize; i++)
+ {
+ val = (M4OSA_Int16)((M4OSA_UInt8)(pInputBuffer->m_dataAddress[i]) - 128);
+
+ pDecodedPCMBuffer->m_dataAddress[i*2] = (M4OSA_Int8)(val>>8);
+ pDecodedPCMBuffer->m_dataAddress[i*2+1] = (M4OSA_Int8)(val&0x00ff);
+ }
+ }
+ else
+ {
+ memcpy((void *)pDecodedPCMBuffer->m_dataAddress, (void *)pInputBuffer->m_dataAddress,
+ pInputBuffer->m_bufferSize );
+ }
+#else /*M4AD_FORCE_16BITS*/
+ memcpy((void *)pDecodedPCMBuffer->m_dataAddress, (void *)pInputBuffer->m_dataAddress,
+ pInputBuffer->m_bufferSize );
+#endif /*M4AD_FORCE_16BITS*/
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Gets the decoder version
+ * @note The version is given in a M4_VersionInfo structure
+ *
+ * @param pValue: (OUT) Pointer to the version structure
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER pVersionInfo pointer is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getVersion(M4_VersionInfo* pVersionInfo)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_DEBUG_IF1((pVersionInfo == 0), M4ERR_PARAMETER,
+ "M4AD_NULL_getVersion: invalid pointer pVersionInfo");
+
+ /* Up until now, the null decoder version is not available */
+
+ /* CHANGE_VERSION_HERE */
+ pVersionInfo->m_major = M4AD_NULL_MAJOR; /*major version of the component*/
+ pVersionInfo->m_minor = M4AD_NULL_MINOR; /*minor version of the component*/
+ pVersionInfo->m_revision = M4AD_NULL_REVISION; /*revision version of the component*/
+ pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
+
+ return err;
+}
+
+
+/**
+ ************************************************************************
+ * getInterface function definitions of NXP MP3 decoder
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType : pointer on an M4AD_Type (allocated by the caller)
+ * that will be filled with the decoder type supported by
+ * this decoder
+ * @param pDecoderInterface : address of a pointer that will be set to the interface
+ * implemented by this decoder. The interface is a structure
+ * allocated by the function and must be un-allocated by the
+ * caller.
+ *
+ * @return M4NO_ERROR if OK
+ * @return M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface)
+{
+ *pDecoderInterface = ( M4AD_Interface*)M4OSA_32bitAlignedMalloc( sizeof(M4AD_Interface),
+ M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_Interface" );
+ if (M4OSA_NULL == *pDecoderInterface)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ *pDecoderType = M4AD_kTypePCM;
+
+ (*pDecoderInterface)->m_pFctCreateAudioDec = M4AD_NULL_create;
+ (*pDecoderInterface)->m_pFctDestroyAudioDec = M4AD_NULL_destroy;
+ (*pDecoderInterface)->m_pFctStepAudioDec = M4AD_NULL_step;
+ (*pDecoderInterface)->m_pFctGetVersionAudioDec = M4AD_NULL_getVersion;
+ (*pDecoderInterface)->m_pFctStartAudioDec = M4OSA_NULL;
+ (*pDecoderInterface)->m_pFctResetAudioDec = M4OSA_NULL;
+ (*pDecoderInterface)->m_pFctSetOptionAudioDec = M4OSA_NULL;
+ (*pDecoderInterface)->m_pFctGetOptionAudioDec = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4AIR_API.c b/libvideoeditor/vss/src/M4AIR_API.c
new file mode 100755
index 0000000..62897b0
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AIR_API.c
@@ -0,0 +1,968 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4AIR_API.c
+ * @brief Area of Interest Resizer API
+ *************************************************************************
+ */
+
+#define M4AIR_YUV420_FORMAT_SUPPORTED
+#define M4AIR_YUV420A_FORMAT_SUPPORTED
+
+/************************* COMPILATION CHECKS ***************************/
+#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
+#ifndef M4AIR_JPG_FORMAT_SUPPORTED
+
+#error "Please define at least one input format for the AIR component"
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+/******************************* INCLUDES *******************************/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Memory.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4AIR_API.h"
+
+/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum M4AIR_States
+ * @brief The following enumeration defines the internal states of the AIR.
+ ******************************************************************************
+ */
+typedef enum
+{
+ M4AIR_kCreated, /**< State after M4AIR_create has been called */
+ M4AIR_kConfigured /**< State after M4AIR_configure has been called */
+}M4AIR_States;
+
+
+/**
+ ******************************************************************************
+ * struct M4AIR_InternalContext
+ * @brief The following structure is the internal context of the AIR.
+ ******************************************************************************
+ */
+typedef struct
+{
+ M4AIR_States m_state; /**< Internal state */
+ M4AIR_InputFormatType m_inputFormat; /**< Input format like YUV420Planar,
+ RGB565, JPG, etc ... */
+ M4AIR_Params m_params; /**< Current input Parameter of the processing */
+ M4OSA_UInt32 u32_x_inc[4]; /**< ratio between input and ouput width for YUV */
+ M4OSA_UInt32 u32_y_inc[4]; /**< ratio between input and ouput height for YUV */
+ M4OSA_UInt32 u32_x_accum_start[4]; /**< horizontal initial accumulator value */
+ M4OSA_UInt32 u32_y_accum_start[4]; /**< Vertical initial accumulator value */
+ M4OSA_UInt32 u32_x_accum[4]; /**< save of horizontal accumulator value */
+ M4OSA_UInt32 u32_y_accum[4]; /**< save of vertical accumulator value */
+ M4OSA_UInt8* pu8_data_in[4]; /**< Save of input plane pointers
+ in case of stripe mode */
+ M4OSA_UInt32 m_procRows; /**< Number of processed rows,
+ used in stripe mode only */
+ M4OSA_Bool m_bOnlyCopy; /**< Flag to know if we just perform a copy
+ or a bilinear interpolation */
+ M4OSA_Bool m_bFlipX; /**< Depend on output orientation, used during
+ processing to revert processing order in X
+ coordinates */
+ M4OSA_Bool m_bFlipY; /**< Depend on output orientation, used during
+ processing to revert processing order in Y
+ coordinates */
+ M4OSA_Bool m_bRevertXY; /**< Depend on output orientation, used during
+ processing to revert X and Y processing order
+ (+-90° rotation) */
+}M4AIR_InternalContext;
+
+/********************************* MACROS *******************************/
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer)\
+ if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+
+/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+ * @brief This function initialize an instance of the AIR.
+ * @param pContext: (IN/OUT) Address of the context to create
+ * @param inputFormat: (IN) input format type.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+{
+ M4OSA_ERR err = M4NO_ERROR ;
+ M4AIR_InternalContext* pC = M4OSA_NULL ;
+
+ /* Check that the address on the context is not NULL */
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ *pContext = M4OSA_NULL ;
+
+ /* Internal Context creation */
+ pC = (M4AIR_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4AIR_InternalContext),
+ M4AIR,(M4OSA_Char *)"AIR internal context") ;
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
+
+
+ /* Check if the input format is supported */
+ switch(inputFormat)
+ {
+#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
+ case M4AIR_kYUV420P:
+ break ;
+#endif
+#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
+ case M4AIR_kYUV420AP:
+ break ;
+#endif
+ default:
+ err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
+ goto M4AIR_create_cleanup ;
+ }
+
+ /**< Save input format and update state */
+ pC->m_inputFormat = inputFormat;
+ pC->m_state = M4AIR_kCreated;
+
+ /* Return the context to the caller */
+ *pContext = pC ;
+
+ return M4NO_ERROR ;
+
+M4AIR_create_cleanup:
+ /* Error management : we destroy the context if needed */
+ if(M4OSA_NULL != pC)
+ {
+ free(pC) ;
+ }
+
+ *pContext = M4OSA_NULL ;
+
+ return err ;
+}
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @brief This function destroys an instance of the AIR component
+ * @param pContext: (IN) Context identifying the instance to destroy
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_STATE: Internal state is incompatible with this function call.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ /**< Check state */
+ if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+ {
+ return M4ERR_STATE;
+ }
+ free(pC) ;
+
+ return M4NO_ERROR ;
+
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief This function will configure the AIR.
+ * @note It will set the input and output coordinates and sizes,
+ * and indicates if we will proceed in stripe or not.
+ * In case a M4AIR_get in stripe mode was on going, it will cancel this previous
+ * processing and reset the get process.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param pParams->m_inputCoord: (IN) X,Y coordinates of the first valid pixel in input.
+ * @param pParams->m_inputSize: (IN) input ROI size.
+ * @param pParams->m_outputSize: (IN) output size.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+ M4OSA_UInt32 i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4OSA_UInt32 nb_planes;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ if(M4AIR_kYUV420AP == pC->m_inputFormat)
+ {
+ nb_planes = 4;
+ }
+ else
+ {
+ nb_planes = 3;
+ }
+
+ /**< Check state */
+ if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+ {
+ return M4ERR_STATE;
+ }
+
+ /** Save parameters */
+ pC->m_params = *pParams;
+
+ /* Check for the input&output width and height are even */
+ if( ((pC->m_params.m_inputSize.m_height)&0x1) ||
+ ((pC->m_params.m_inputSize.m_height)&0x1))
+ {
+ return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+ }
+
+ if( ((pC->m_params.m_inputSize.m_width)&0x1) ||
+ ((pC->m_params.m_inputSize.m_width)&0x1))
+ {
+ return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+ }
+ if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
+ &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
+ {
+ /**< No resize in this case, we will just copy input in output */
+ pC->m_bOnlyCopy = M4OSA_TRUE;
+ }
+ else
+ {
+ pC->m_bOnlyCopy = M4OSA_FALSE;
+
+ /**< Initialize internal variables used for resize filter */
+ for(i=0;i<nb_planes;i++)
+ {
+
+ u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:\
+ (pC->m_params.m_inputSize.m_width+1)>>1;
+ u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:\
+ (pC->m_params.m_inputSize.m_height+1)>>1;
+ u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:\
+ (pC->m_params.m_outputSize.m_width+1)>>1;
+ u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:\
+ (pC->m_params.m_outputSize.m_height+1)>>1;
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in)
+ {
+ pC->u32_x_inc[i] = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
+ }
+ else
+ {
+ pC->u32_x_inc[i] = (u32_width_in * 0x10000) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in)
+ {
+ pC->u32_y_inc[i] = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
+ }
+ else
+ {
+ pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (pC->u32_y_inc[i] >= 0x10000)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded
+ on the 16 high bits and the fractionnal on the 15 low bits
+ */
+ pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
+
+ if (!pC->u32_y_accum_start[i])
+ {
+ pC->u32_y_accum_start[i] = 0x10000;
+ }
+
+ pC->u32_y_accum_start[i] >>= 1;
+ }
+ else
+ {
+ pC->u32_y_accum_start[i] = 0;
+ }
+ /**< Take into account that Y coordinate can be odd
+ in this case we have to put a 0.5 offset
+ for U and V plane as there a 2 times sub-sampled vs Y*/
+ if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
+ {
+ pC->u32_y_accum_start[i] += 0x8000;
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between
+ 0 and 0.5
+ */
+
+ if (pC->u32_x_inc[i] >= 0x10000)
+ {
+ pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
+
+ if (!pC->u32_x_accum_start[i])
+ {
+ pC->u32_x_accum_start[i] = 0x10000;
+ }
+
+ pC->u32_x_accum_start[i] >>= 1;
+ }
+ else
+ {
+ pC->u32_x_accum_start[i] = 0;
+ }
+ /**< Take into account that X coordinate can be odd
+ in this case we have to put a 0.5 offset
+ for U and V plane as there a 2 times sub-sampled vs Y*/
+ if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
+ {
+ pC->u32_x_accum_start[i] += 0x8000;
+ }
+ }
+ }
+
+ /**< Reset variable used for stripe mode */
+ pC->m_procRows = 0;
+
+ /**< Initialize var for X/Y processing order according to orientation */
+ pC->m_bFlipX = M4OSA_FALSE;
+ pC->m_bFlipY = M4OSA_FALSE;
+ pC->m_bRevertXY = M4OSA_FALSE;
+ switch(pParams->m_outputOrientation)
+ {
+ case M4COMMON_kOrientationTopLeft:
+ break;
+ case M4COMMON_kOrientationTopRight:
+ pC->m_bFlipX = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationBottomRight:
+ pC->m_bFlipX = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationBottomLeft:
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationLeftTop:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationRightTop:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationRightBottom:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipX = M4OSA_TRUE;
+ pC->m_bFlipY = M4OSA_TRUE;
+ break;
+ case M4COMMON_kOrientationLeftBottom:
+ pC->m_bRevertXY = M4OSA_TRUE;
+ pC->m_bFlipX = M4OSA_TRUE;
+ break;
+ default:
+ return M4ERR_PARAMETER;
+ }
+ /**< Update state */
+ pC->m_state = M4AIR_kConfigured;
+
+ return M4NO_ERROR ;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief This function will provide the requested resized area of interest according to
+ * settings provided in M4AIR_configure.
+ * @note In case the input format type is JPEG, input plane(s)
+ * in pIn is not used. In normal mode, dimension specified in output plane(s) structure
+ * must be the same than the one specified in M4AIR_configure. In stripe mode, only the
+ * width will be the same, height will be taken as the stripe height (typically 16).
+ * In normal mode, this function is call once to get the full output picture.
+ * In stripe mode, it is called for each stripe till the whole picture has been
+ * retrieved,and the position of the output stripe in the output picture
+ * is internally incremented at each step.
+ * Any call to M4AIR_configure during stripe process will reset this one to the
+ * beginning of the output picture.
+ * @param pContext: (IN) Context identifying the instance
+ * @param pIn: (IN) Plane structure containing input Plane(s).
+ * @param pOut: (IN/OUT) Plane structure containing output Plane(s).
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+{
+ M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+ M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
+ M4OSA_UInt8 *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
+ M4OSA_UInt8 *pu8_src_top;
+ M4OSA_UInt8 *pu8_src_bottom;
+ M4OSA_UInt32 u32_temp_value;
+ M4OSA_Int32 i32_tmp_offset;
+ M4OSA_UInt32 nb_planes;
+
+
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+ /**< Check state */
+ if(M4AIR_kConfigured != pC->m_state)
+ {
+ return M4ERR_STATE;
+ }
+
+ if(M4AIR_kYUV420AP == pC->m_inputFormat)
+ {
+ nb_planes = 4;
+ }
+ else
+ {
+ nb_planes = 3;
+ }
+
+ /**< Loop on each Plane */
+ for(i=0;i<nb_planes;i++)
+ {
+
+ /* Set the working pointers at the beginning of the input/output data field */
+
+ u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
+
+ if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)\
+ ||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
+ {
+ /**< For input, take care about ROI */
+ pu8_data_in = pIn[i].pac_data + pIn[i].u_topleft \
+ + (pC->m_params.m_inputCoord.m_x>>u32_shift)
+ + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
+
+ /** Go at end of line/column in case X/Y scanning is flipped */
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+ pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
+ }
+ if(M4OSA_TRUE == pC->m_bFlipY)
+ {
+ pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1)\
+ * pIn[i].u_stride;
+ }
+
+ /**< Initialize accumulators in case we are using it (bilinear interpolation) */
+ if( M4OSA_FALSE == pC->m_bOnlyCopy)
+ {
+ pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
+ pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
+ }
+
+ }
+ else
+ {
+ /**< In case of stripe mode for other than first stripe, we need to recover input
+ pointer from internal context */
+ pu8_data_in = pC->pu8_data_in[i];
+ }
+
+ /**< In every mode, output data are at the beginning of the output plane */
+ pu8_data_out = pOut[i].pac_data + pOut[i].u_topleft;
+
+ /**< Initialize input offset applied after each pixel */
+ if(M4OSA_FALSE == pC->m_bFlipY)
+ {
+ i32_tmp_offset = pIn[i].u_stride;
+ }
+ else
+ {
+ i32_tmp_offset = -pIn[i].u_stride;
+ }
+
+ /**< In this case, no bilinear interpolation is needed as input and output dimensions
+ are the same */
+ if( M4OSA_TRUE == pC->m_bOnlyCopy)
+ {
+ /**< No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+ /**< No flip on X abscissa */
+ if(M4OSA_FALSE == pC->m_bFlipX)
+ {
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /**< Copy one whole line */
+ memcpy((void *)pu8_data_out, (void *)pu8_data_in,
+ pOut[i].u_width);
+
+ /**< Update pointers */
+ pu8_data_out += pOut[i].u_stride;
+ if(M4OSA_FALSE == pC->m_bFlipY)
+ {
+ pu8_data_in += pIn[i].u_stride;
+ }
+ else
+ {
+ pu8_data_in -= pIn[i].u_stride;
+ }
+ }
+ }
+ else
+ {
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /**< Loop on each pixel of 1 row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ *pu8_data_out++ = *pu8_data_in--;
+ }
+
+ /**< Update pointers */
+ pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+
+ pu8_data_in += pOut[i].u_width + i32_tmp_offset;
+
+ }
+ }
+ }
+ /**< Here we have a +-90° rotation */
+ else
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ pu8_data_in_tmp = pu8_data_in;
+
+ /**< Loop on each pixel of 1 row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ *pu8_data_out++ = *pu8_data_in_tmp;
+
+ /**< Update input pointer in order to go to next/past line */
+ pu8_data_in_tmp += i32_tmp_offset;
+ }
+
+ /**< Update pointers */
+ pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+ if(M4OSA_FALSE == pC->m_bFlipX)
+ {
+ pu8_data_in ++;
+ }
+ else
+ {
+ pu8_data_in --;
+ }
+ }
+ }
+ }
+ /**< Bilinear interpolation */
+ else
+ {
+
+ if(3 != i) /**< other than alpha plane */
+ {
+ /**No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* Vertical weight factor */
+ u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+ /* Reinit horizontal weight factor */
+ u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+ weight factor */
+
+ pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+ }
+
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+ weight factor */
+
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+
+ }
+
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update vertical accumulator */
+ pC->u32_y_accum[i] += pC->u32_y_inc[i];
+ if (pC->u32_y_accum[i]>>16)
+ {
+ pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+ pC->u32_y_accum[i] &= 0xffff;
+ }
+ }
+ }
+ /** +-90° rotation */
+ else
+ {
+ pu8_data_in_org = pu8_data_in;
+
+ /**< Loop on each output row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* horizontal weight factor */
+ u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+ /* Reinit accumulator */
+ u32_y_accum = pC->u32_y_accum_start[i];
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+ pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+
+ }
+ }
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+ pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+ }
+ }
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update horizontal accumulator */
+ pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+ pu8_data_in = pu8_data_in_org;
+ }
+
+ }
+ }/** 3 != i */
+ else
+ {
+ /**No +-90° rotation */
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+
+ /**< Loop on each row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* Vertical weight factor */
+ u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+ /* Reinit horizontal weight factor */
+ u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+ weight factor */
+
+ pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+ }
+
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+ weight factor */
+
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += pC->u32_x_inc[i];
+ }
+
+ }
+
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update vertical accumulator */
+ pC->u32_y_accum[i] += pC->u32_y_inc[i];
+ if (pC->u32_y_accum[i]>>16)
+ {
+ pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+ pC->u32_y_accum[i] &= 0xffff;
+ }
+ }
+
+ } /**< M4OSA_FALSE == pC->m_bRevertXY */
+ /** +-90° rotation */
+ else
+ {
+ pu8_data_in_org = pu8_data_in;
+
+ /**< Loop on each output row */
+ for(j=0;j<pOut[i].u_height;j++)
+ {
+ /* horizontal weight factor */
+ u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+ /* Reinit accumulator */
+ u32_y_accum = pC->u32_y_accum_start[i];
+
+ if(M4OSA_TRUE == pC->m_bFlipX)
+ {
+
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+ pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+ pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[1]*(16-u32_x_frac) +
+ pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+
+ }
+ }
+ else
+ {
+ /**< Loop on each output pixel in a row */
+ for(k=0;k<pOut[i].u_width;k++)
+ {
+
+ u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+ pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+ pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+ /* Weighted combination */
+ u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+ pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+ (pu8_src_bottom[0]*(16-u32_x_frac) +
+ pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+ *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+ /* Update vertical accumulator */
+ u32_y_accum += pC->u32_y_inc[i];
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+ u32_y_accum &= 0xffff;
+ }
+ }
+ }
+ pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+ /* Update horizontal accumulator */
+ pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+ pu8_data_in = pu8_data_in_org;
+
+ }
+ } /**< M4OSA_TRUE == pC->m_bRevertXY */
+ }/** 3 == i */
+ }
+ /**< In case of stripe mode, save current input pointer */
+ if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
+ {
+ pC->pu8_data_in[i] = pu8_data_in;
+ }
+ }
+
+ /**< Update number of processed rows, reset it if we have finished
+ with the whole processing */
+ pC->m_procRows += pOut[0].u_height;
+ if(M4OSA_FALSE == pC->m_bRevertXY)
+ {
+ if(pC->m_params.m_outputSize.m_height <= pC->m_procRows) pC->m_procRows = 0;
+ }
+ else
+ {
+ if(pC->m_params.m_outputSize.m_width <= pC->m_procRows) pC->m_procRows = 0;
+ }
+
+ return M4NO_ERROR ;
+
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4AMRR_CoreReader.c b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
new file mode 100755
index 0000000..630f9dc
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
@@ -0,0 +1,909 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4AMRR_CoreReader.c
+ * @brief Implementation of AMR parser
+ * @note This file contains the API Implementation for
+ * AMR Parser.
+ ******************************************************************************
+*/
+#include "M4AMRR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+/**
+ ******************************************************************************
+ * Maximum bitrate per amr type
+ ******************************************************************************
+*/
+#define M4AMRR_NB_MAX_BIT_RATE 12200
+#define M4AMRR_WB_MAX_BIT_RATE 23850
+
+/**
+ ******************************************************************************
+ * AMR reader context ID
+ ******************************************************************************
+*/
+#define M4AMRR_CONTEXTID 0x414d5252
+
+/**
+ ******************************************************************************
+ * An AMR frame is 20ms
+ ******************************************************************************
+*/
+#define M4AMRR_FRAME_LENGTH 20
+
+/**
+ ******************************************************************************
+ * For the seek, the file is splitted in 40 segments for faster search
+ ******************************************************************************
+*/
+#define M4AMRR_NUM_SEEK_ENTRIES 40
+
+#define M4AMRR_NB_SAMPLE_FREQUENCY 8000 /**< Narrow band sampling rate */
+#define M4AMRR_WB_SAMPLE_FREQUENCY 16000 /**< Wide band sampling rate */
+
+/**
+ ******************************************************************************
+ * AMR reader version numbers
+ ******************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AMRR_VERSION_MAJOR 1
+#define M4AMRR_VERSION_MINOR 11
+#define M4AMRR_VERSION_REVISION 3
+
+/**
+ ******************************************************************************
+ * structure M4_AMRR_Context
+ * @brief Internal AMR reader context structure
+ ******************************************************************************
+*/
+typedef struct
+{
+ M4OSA_UInt32 m_contextId ; /* Fixed Id. to check for valid Context*/
+ M4OSA_FileReadPointer* m_pOsaFilePtrFct; /* File function pointer */
+ M4SYS_StreamDescription* m_pStreamHandler; /* Stream Description */
+ M4OSA_UInt32* m_pSeekIndex; /* Seek Index Table */
+ M4OSA_UInt32 m_seekInterval; /* Stores the seek Interval stored in the Index */
+ M4OSA_UInt32 m_maxAuSize; /* Stores the max Au Size */
+ M4OSA_MemAddr32 m_pdataAddress; /* Pointer to store AU data */
+ M4SYS_StreamType m_streamType; /* Stores the stream type AMR NB or WB */
+ M4OSA_Context m_pAMRFile; /* Data storage */
+ M4AMRR_State m_status; /* AMR Reader Status */
+ M4OSA_Int32 m_structSize; /* size of structure*/
+} M4_AMRR_Context;
+
+/**
+ ******************************************************************************
+ * Parser internal functions, not usable from outside the reader context
+ ******************************************************************************
+*/
+M4OSA_UInt32 M4AMRR_getAuSize(M4OSA_UInt32 frameType, M4SYS_StreamType streamType);
+M4OSA_UInt32 M4AMRR_getBitrate(M4OSA_UInt32 frameType, M4SYS_StreamType streamType);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4AMRR_getAuSize(M4OSA_UInt32 frameType, M4SYS_StreamType streamType)
+ * @brief Internal function to the AMR Parser, returns the AU size of the Frame
+ * @note This function takes the stream type and the frametype and returns the
+ * frame lenght
+ * @param frameType(IN) : AMR frame type
+ * @param streamType(IN) : AMR stream type NB or WB
+ * @returns The frame size based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32 M4AMRR_getAuSize(M4OSA_UInt32 frameType, M4SYS_StreamType streamType)
+{
+ const M4OSA_UInt32 M4AMRR_NB_AUSIZE[]={13,14,16,18,20,21,27,32,6,6,6};
+ const M4OSA_UInt32 M4AMRR_WB_AUSIZE[]={18,24,33,37,41,47,51,59,61,6};
+
+ if ( streamType == M4SYS_kAMR )
+ {
+ return M4AMRR_NB_AUSIZE[frameType];
+ }
+ else /* M4SYS_kAMR_WB */
+ {
+ return M4AMRR_WB_AUSIZE[frameType];
+ }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4AMRR_getBitrate(M4OSA_UInt32 frameType, M4SYS_StreamType streamType)
+ * @brief Internal function to the AMR Parser, returns the Bit rate of the Frame
+ * @note This function takes the stream type and the frametype and returns the
+ * bit rate for the given frame.
+ * @param frameType(IN) : AMR frame type
+ * @param streamType(IN) : AMR stream type NB or WB
+ * @returns The frame's bit rate based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32 M4AMRR_getBitrate(M4OSA_UInt32 frameType, M4SYS_StreamType streamType)
+{
+ const M4OSA_UInt32 M4AMRR_NB_BITRATE[]=
+ {4750,5150,5900,6700,7400,7950,10200,12200,12200,12200,12200};
+ const M4OSA_UInt32 M4AMRR_WB_BITRATE[]=
+ {6600,8850,12650,14250,15850,18250,19850,23050,23850,12200};
+
+ if ( streamType == M4SYS_kAMR )
+ {
+ return M4AMRR_NB_BITRATE[frameType];
+ }
+ else /* M4SYS_kAMR_WB */
+ {
+ return M4AMRR_WB_BITRATE[frameType];
+ }
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_openRead(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+ M4OSA_FileReadPointer* pFileFunction)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext;
+ M4OSA_FilePosition filePos;
+
+ M4OSA_ERR err = M4ERR_FILE_NOT_FOUND ;
+ M4OSA_UInt32 size ;
+ M4OSA_UInt32 data ;
+ M4OSA_Char *M4_Token;
+ M4OSA_UInt32 *tokenPtr;
+
+ /* Header for AMR NB */
+ M4OSA_UInt32 M4_AMR_1 = 0x4d412123;
+ M4OSA_UInt32 M4_AMR_NB_2 = 0x00000a52;
+
+ /* Header for AMR WB */
+ M4OSA_UInt32 M4_AMR_WB_2 = 0x42572d52;
+ M4OSA_UInt32 M4_AMR_WB_3 = 0x0000000a;
+ *pContext = M4OSA_NULL ;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileDescriptor),M4ERR_PARAMETER,"File Desc. M4OSA_NULL");
+
+ M4_Token = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_MemAddr32)*3, M4AMR_READER,
+ (M4OSA_Char *)("M4_Token"));
+ if(M4OSA_NULL == M4_Token)
+ {
+ M4OSA_DEBUG_IF3((M4OSA_NULL == M4_Token),M4ERR_ALLOC,"Mem Alloc failed - M4_Token");
+ return M4ERR_ALLOC ;
+ }
+
+ pStreamContext= (M4_AMRR_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4_AMRR_Context), M4AMR_READER,
+ (M4OSA_Char *)("pStreamContext"));
+ if(M4OSA_NULL == pStreamContext)
+ {
+ free(M4_Token);
+ *pContext = M4OSA_NULL ;
+ return M4ERR_ALLOC ;
+ }
+
+ /* Initialize the context */
+ pStreamContext->m_contextId = M4AMRR_CONTEXTID;
+ pStreamContext->m_structSize=sizeof(M4_AMRR_Context);
+ pStreamContext->m_pOsaFilePtrFct=pFileFunction ;
+ pStreamContext->m_pStreamHandler = M4OSA_NULL ;
+ pStreamContext->m_pAMRFile = M4OSA_NULL ;
+ pStreamContext->m_status = M4AMRR_kOpening ;
+ pStreamContext->m_pSeekIndex = M4OSA_NULL ;
+ pStreamContext->m_seekInterval = 0;
+ pStreamContext->m_maxAuSize = 0 ;
+ pStreamContext->m_pdataAddress = M4OSA_NULL;
+ err=pStreamContext->m_pOsaFilePtrFct->openRead(&pStreamContext->m_pAMRFile,
+ (M4OSA_Char*)pFileDescriptor,M4OSA_kFileRead );
+ if ( err != M4NO_ERROR )
+ {
+ /* M4OSA_DEBUG_IF3((err != M4NO_ERROR),err,"File open failed"); */
+ free(pStreamContext);
+ free(M4_Token);
+ *pContext = M4OSA_NULL ;
+ return err ;
+ }
+
+ pStreamContext->m_status = M4AMRR_kOpening ;
+
+ size = 6;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)M4_Token, &size);
+ if(size != 6)
+ {
+ goto cleanup;
+ }
+
+ tokenPtr = (M4OSA_UInt32*)M4_Token ;
+ /* Check for the first 4 bytes of the header common to WB and NB*/
+ if (*tokenPtr != M4_AMR_1)
+ {
+ goto cleanup;
+ }
+
+ tokenPtr++;
+ data = *tokenPtr & 0x0000FFFF ;
+ /* Check if the next part is Narrow band header */
+ if (data!= M4_AMR_NB_2)
+ {
+ /* Stream is AMR Wide Band */
+ filePos = 4;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos);
+ size = 5;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)M4_Token, &size);
+ if(size != 5)
+ goto cleanup;
+ tokenPtr=(M4OSA_UInt32*)M4_Token;
+ /* Check for the Wide band hader */
+ if(*tokenPtr!= M4_AMR_WB_2)
+ goto cleanup;
+ tokenPtr++;
+ data = *tokenPtr & 0x000000FF ;
+ if(data!= M4_AMR_WB_3)
+ goto cleanup;
+ pStreamContext->m_streamType = M4SYS_kAMR_WB ;
+ }
+ else
+ {
+ /* Stream is a Narrow band stream */
+ pStreamContext->m_streamType = M4SYS_kAMR ;
+ }
+ /* No Profile level defined */
+ pStreamContext->m_status = M4AMRR_kOpened;
+
+ free(M4_Token);
+ *pContext = pStreamContext ;
+ return M4NO_ERROR;
+
+cleanup:
+
+ if(M4OSA_NULL != pStreamContext->m_pAMRFile)
+ {
+ pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+ }
+
+ free(M4_Token);
+ free(pStreamContext);
+
+ *pContext = M4OSA_NULL ;
+
+ return (M4OSA_ERR)M4ERR_AMR_NOT_COMPLIANT;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc )
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_Char frameHeader, frameType ;
+ M4OSA_UInt32 size, auCount=0;
+ M4OSA_FilePosition filePos;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDesc),M4ERR_PARAMETER,"Stream Desc. M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+ M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+ if (M4OSA_NULL != pStreamContext->m_pStreamHandler)
+ {
+ return M4WAR_NO_MORE_STREAM ;
+ }
+
+ size = 1;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)&frameHeader, &size);
+
+ /* XFFF FXXX -> F is the Frame type */
+ frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+ if ( frameType == 15 )
+ {
+ return M4WAR_NO_DATA_YET ;
+ }
+
+ if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 11 ))
+ {
+ return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+
+ if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+ {
+ return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+
+ /* Average bit rate is assigned the bitrate of the first frame */
+ pStreamDesc->averageBitrate = M4AMRR_getBitrate(frameType,pStreamContext->m_streamType);
+
+ filePos = -1;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekCurrent,
+ &filePos);
+
+ /* Initialize pStreamDesc */
+ pStreamDesc->profileLevel = 0xFF ;
+ pStreamDesc->decoderSpecificInfoSize = 0 ;
+ pStreamDesc->decoderSpecificInfo = M4OSA_NULL ;
+ pStreamDesc->maxBitrate = (pStreamContext->m_streamType ==
+ M4SYS_kAMR )?M4AMRR_NB_MAX_BIT_RATE:M4AMRR_WB_MAX_BIT_RATE;
+ pStreamDesc->profileLevel = 0xFF ;
+ pStreamDesc->streamID = 1;
+ pStreamDesc->streamType = pStreamContext->m_streamType;
+
+ /* Timescale equals Sampling Frequency: NB-8000 Hz, WB-16000 Hz */
+ pStreamDesc->timeScale = (pStreamContext->m_streamType == M4SYS_kAMR )?8000:16000;
+ pStreamDesc->duration = M4OSA_TIME_UNKNOWN;
+
+ pStreamContext->m_pStreamHandler =
+ (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription),
+ M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pStreamHandler"));
+ if(M4OSA_NULL == pStreamContext->m_pStreamHandler)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ /* Copy the Stream Desc. into the Context */
+ pStreamContext->m_pStreamHandler->averageBitrate = pStreamDesc->averageBitrate;
+ pStreamContext->m_pStreamHandler->decoderSpecificInfo = M4OSA_NULL ;
+ pStreamContext->m_pStreamHandler->decoderSpecificInfoSize = 0 ;
+ pStreamContext->m_pStreamHandler->duration = M4OSA_TIME_UNKNOWN;
+ pStreamContext->m_pStreamHandler->profileLevel = 0xFF ;
+ pStreamContext->m_pStreamHandler->streamID = 1;
+ pStreamContext->m_pStreamHandler->streamType = pStreamDesc->streamType ;
+ pStreamContext->m_pStreamHandler->timeScale = pStreamDesc->timeScale ;
+
+ /* Count the number of Access Unit in the File to get the */
+ /* duration of the stream = 20 ms * number of access unit */
+ while(1)
+ {
+ size = 1;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)&frameHeader, &size);
+ if ( size == 0)
+ break ;
+ frameType = (frameHeader & 0x78) >> 3 ;
+ /* Get the frame size and skip so many bytes */
+ if(frameType != 15){
+ /* GLA 20050628 when frametype is >10 we read over a table */
+ if(frameType > 10)
+ continue ;
+
+ size = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+ if(size > pStreamContext->m_maxAuSize )
+ {
+ pStreamContext->m_maxAuSize = size ;
+ }
+ filePos = size-1;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekCurrent, &filePos);
+ auCount++;
+ }
+ }
+
+ /* Each Frame is 20 m Sec. */
+ pStreamContext->m_pStreamHandler->duration = auCount * M4AMRR_FRAME_LENGTH ;
+ pStreamDesc->duration = pStreamContext->m_pStreamHandler->duration ;
+
+ /* Put the file pointer back at the first Access unit */
+ if( pStreamContext->m_streamType == M4SYS_kAMR )
+ {
+ filePos = 6;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos);
+ }
+ if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )
+ {
+ filePos = 9;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos);
+ }
+ return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs )
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_Int32 size = 0 ;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamIDs),M4ERR_PARAMETER,"Stream Ids. M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+ M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+ while( pStreamIDs[size] != 0 )
+ {
+ if( pStreamIDs[size++] != 1 )
+ {
+ return M4ERR_BAD_STREAM_ID ;
+ }
+ }
+
+ /* Allocate memory for data Address for use in NextAU() */
+ if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+ {
+ size = pStreamContext->m_maxAuSize ;
+ /* dataAddress is owned by Parser, application should not delete or free it */
+ pStreamContext->m_pdataAddress =(M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(size + (4 - size % 4),
+ M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pdataAddress"));
+ if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+ {
+ M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pdataAddress),M4ERR_ALLOC,
+ "Mem Alloc failed - dataAddress");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ /* Set the state of context to Reading */
+ pStreamContext->m_status = M4AMRR_kReading ;
+
+ return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_Char frameHeader ;
+ M4OSA_Char frameType ;
+ M4OSA_Int32 auSize;
+ M4OSA_UInt32 size ;
+ M4OSA_FilePosition filePos;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+ M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading), M4ERR_STATE, "Invalid State");
+
+ if ( StreamID != 1 )
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ /* Read the frame header byte */
+ size = pStreamContext->m_maxAuSize;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)pStreamContext->m_pdataAddress, &size);
+ if(size != pStreamContext->m_maxAuSize)
+ {
+ return M4WAR_NO_MORE_AU;
+ }
+
+ frameHeader = ((M4OSA_MemAddr8)pStreamContext->m_pdataAddress)[0];
+
+ frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+ if (( pStreamContext->m_streamType == M4SYS_kAMR ) &&
+ ( frameType > 11 ) && ( frameType != 15 ))
+ {
+ return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+
+ if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) &&
+ ( frameType > 9 ) && ( frameType != 15 ))
+ {
+ return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+
+ /* Get the frame size */
+ if(frameType == 15)
+ {
+ auSize = 1;
+ }
+ else
+ {
+ auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+ }
+
+ size -= auSize ;
+ if(size != 0)
+ {
+ filePos = -((M4OSA_FilePosition)size);
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekCurrent, &filePos);
+ }
+
+ pAu->size = auSize ;
+
+ /* even when frameType == 15 (no data frame), ARM core decoder outputs full PCM buffer */
+ /*if(frameType == 15 )
+ {
+ pAu->CTS += 0;
+ }*/
+ /*else*/
+ {
+ pAu->CTS += M4AMRR_FRAME_LENGTH ;
+ }
+
+
+ pAu->DTS = pAu->CTS ;
+ pAu->attribute = M4SYS_kFragAttrOk;
+
+ pAu->stream = pStreamContext->m_pStreamHandler;
+ pAu->dataAddress = pStreamContext->m_pdataAddress ;
+
+ if(frameHeader & 0x80)
+ {
+ return M4WAR_NO_MORE_AU;
+ }
+
+ /* Change the state to implement NextAu->freeAu->NextAu FSM */
+ pStreamContext->m_status = M4AMRR_kReading_nextAU ;
+
+ return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+ M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading_nextAU), M4ERR_STATE,
+ "Invalid State");
+
+ if (( StreamID != 1 ) && ( StreamID != 0))
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ /* Change the state to Reading so as to allow access to next AU */
+ pStreamContext->m_status = M4AMRR_kReading ;
+
+ return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+ M4SYS_SeekAccessMode seekMode, M4OSA_Time* pObtainCTS)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_UInt32 count, prevAU, nextAU ;
+ M4OSA_UInt32 size ;
+ M4OSA_UInt32 auSize ;
+ M4OSA_UInt32 position, partSeekTime;
+ M4OSA_UInt32 auCount = 0, skipAuCount = 0 ;
+ M4OSA_Char frameHeader ;
+ M4OSA_Char frameType ;
+ M4OSA_FilePosition filePos;
+ M4OSA_Double time_double;
+
+ /*Make explicit time cast, but take care that timescale is not used !!!*/
+ M4OSA_TIME_TO_MS(time_double, time, 1000);
+
+ *pObtainCTS = 0;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+ M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading) && \
+ ( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+ M4OSA_DEBUG_IF1((time_double < 0),M4ERR_PARAMETER,"negative time");
+
+ /* Coming to seek for the first time, need to build the seekIndex Table */
+ if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+ {
+ M4OSA_Double duration_double;
+
+ count = 0 ;
+ pStreamContext->m_pSeekIndex =
+ (M4OSA_UInt32*)M4OSA_32bitAlignedMalloc(M4AMRR_NUM_SEEK_ENTRIES * sizeof(M4OSA_UInt32),
+ M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pSeekIndex"));
+
+ if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+ {
+ M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pSeekIndex),M4ERR_ALLOC,
+ "Mem Alloc Failed - SeekIndex");
+ return M4ERR_ALLOC ;
+ }
+
+ /* point to the first AU */
+ if( pStreamContext->m_streamType == M4SYS_kAMR )
+ {
+ filePos = 6;
+ }
+ else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+ {
+ filePos = 9;
+ }
+
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos);
+
+ /* Set the postion to begining of first AU */
+ position = (pStreamContext->m_streamType != M4SYS_kAMR)?9:6;
+
+ /*Make explicit time cast, but take care that timescale is not used !!!*/
+ M4OSA_TIME_TO_MS(duration_double, pStreamContext->m_pStreamHandler->duration, 1000);
+
+ /* Calculate the seek Interval duration based on total dutation */
+ /* Interval = (duration / ENTRIES) in multiples of AU frame length */
+ pStreamContext->m_seekInterval =
+ (M4OSA_UInt32)(duration_double / M4AMRR_NUM_SEEK_ENTRIES) ;
+ pStreamContext->m_seekInterval /= M4AMRR_FRAME_LENGTH ;
+ pStreamContext->m_seekInterval *= M4AMRR_FRAME_LENGTH ;
+ skipAuCount = pStreamContext->m_seekInterval / M4AMRR_FRAME_LENGTH ;
+
+ pStreamContext->m_pSeekIndex[count++]=position;
+ while(count < M4AMRR_NUM_SEEK_ENTRIES )
+ {
+ size = 1;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)&frameHeader, &size);
+ if ( size == 0)
+ {
+ break ;
+ }
+ frameType = (frameHeader & 0x78) >> 3 ;
+ if(frameType != 15)
+ {
+ /**< bugfix Ronan Cousyn 05/04/2006: In the core reader AMR, the
+ * function M4AMRR_seek doesn't check the frameType */
+ if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 10 ))
+ {
+ return M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+ if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+ {
+ return M4ERR_AMR_INVALID_FRAME_TYPE;
+ }
+ auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+ position += auSize ;
+ filePos = auSize-1;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekCurrent, &filePos);
+ auCount++;
+ }
+ else
+ {
+ position ++;
+ }
+ /* Skip the number of AU's as per interval and store in the Index table */
+ if ( (skipAuCount != 0) && !(auCount % skipAuCount))
+ {
+ pStreamContext->m_pSeekIndex[count++] = position;
+ }
+ }
+ }/* End of Building the seek table */
+
+ /* Use the seek table to seek the required time in the stream */
+
+ /* If we are seeking the begining of the file point to first AU */
+ if ( seekMode == M4SYS_kBeginning )
+ {
+ if( pStreamContext->m_streamType == M4SYS_kAMR )
+ {
+ filePos = 6;
+ }
+ else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+ {
+ filePos = 9;
+ }
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos );
+ return M4NO_ERROR ;
+ }
+
+ /* Get the Nearest Second */
+ if (0 != pStreamContext->m_seekInterval)
+ {
+ position = (M4OSA_UInt32)(time_double / pStreamContext->m_seekInterval);
+ }
+ else
+ {
+ /*avoid division by 0*/
+ position = 0;
+ }
+
+ /* We have only 40 seek Index. */
+ position=(position >= M4AMRR_NUM_SEEK_ENTRIES)?M4AMRR_NUM_SEEK_ENTRIES-1:position;
+
+ /* SeekIndex will point to nearest Au, we need to search for the
+ required time form that position */
+ partSeekTime = (M4OSA_UInt32)time_double - position * pStreamContext->m_seekInterval;
+
+ position = pStreamContext->m_pSeekIndex[position];
+
+ if(!position)
+ {
+ return M4WAR_INVALID_TIME ;
+ }
+
+ /* point the file pointer to nearest AU */
+ filePos = position;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekBeginning,
+ &filePos );
+
+ if ( partSeekTime == 0)
+ {
+ *pObtainCTS = time;
+ return M4NO_ERROR;
+ }
+
+ *pObtainCTS = (M4OSA_Time)(time_double - (M4OSA_Double)partSeekTime);
+
+ switch(seekMode)
+ {
+ /* Get the AU before the target time */
+ case M4SYS_kPreviousRAP:
+ case M4SYS_kNoRAPprevious:
+ position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+ if ( !(partSeekTime % M4AMRR_FRAME_LENGTH) )
+ {
+ position -- ;
+ }
+ break;
+ /* Get the Closest AU following the target time */
+ case M4SYS_kNextRAP:
+ case M4SYS_kNoRAPnext:
+ position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+ break;
+ /* Get the closest AU to target time */
+ case M4SYS_kClosestRAP:
+ case M4SYS_kNoRAPclosest:
+ prevAU = partSeekTime-(partSeekTime/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH;
+ nextAU =
+ ((partSeekTime+M4AMRR_FRAME_LENGTH)/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH -\
+ partSeekTime ;
+ if(prevAU < nextAU)
+ {
+ position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+ }
+ else
+ {
+ position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+ }
+ break;
+ case M4SYS_kBeginning:
+ break;
+ }
+
+ count = 0 ;
+ /* Skip the Access unit in the stream to skip the part seek time,
+ to reach the required target time */
+ while(count < position )
+ {
+ size = 1;
+ pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+ (M4OSA_MemAddr8)&frameHeader, &size);
+ if ( size == 0)
+ {
+ /* If the target time is invalid, point to begining and return */
+ *pObtainCTS = 0;
+ filePos = pStreamContext->m_pSeekIndex[0];
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekBeginning, &filePos);
+ return M4WAR_INVALID_TIME ;
+ }
+ *pObtainCTS += M4AMRR_FRAME_LENGTH; /*Should use M4OSA_INT64_ADD !!*/
+ count++;
+ frameType = (frameHeader & 0x78) >> 3 ;
+ if(frameType == 15)
+ {
+ auSize = 1 ;
+ }
+ else
+ {
+ auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+ }
+
+ filePos = auSize-1;
+ pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+ M4OSA_kFileSeekCurrent, &filePos);
+ }
+
+ return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+
+ /* Close the AMR stream */
+ pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+
+ pStreamContext->m_status=M4AMRR_kClosed ;
+
+ /* Check if AU data Address is allocated memory and free it */
+ if(M4OSA_NULL != pStreamContext->m_pdataAddress)
+ {
+ free(pStreamContext->m_pdataAddress);
+ }
+
+ /* Check if the stream handler is allocated memory */
+ if(M4OSA_NULL != pStreamContext->m_pStreamHandler)
+ {
+ free(pStreamContext->m_pStreamHandler);
+ }
+
+ /* Seek table is created only when seek is used, so check if memory is allocated */
+ if(M4OSA_NULL != pStreamContext->m_pSeekIndex)
+ {
+ free(pStreamContext->m_pSeekIndex);
+ }
+
+ /* Free the context */
+ free(pStreamContext);
+
+ return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+ M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+ M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+ "Bad Context");
+
+ if (( streamId != 1 ) && ( streamId != 0))
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ *pState = pStreamContext->m_status ;
+
+ return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getVersion (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+ M4OSA_TRACE1_1("M4AMRR_getVersion called with pVersion: 0x%x\n", pVersion);
+ M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+ "pVersion is NULL in M4AMRR_getVersion");
+
+ pVersion->m_major = M4AMRR_VERSION_MAJOR;
+ pVersion->m_minor = M4AMRR_VERSION_MINOR;
+ pVersion->m_revision = M4AMRR_VERSION_REVISION;
+
+ return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
+/*********************************************************/
+{
+ M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == Context), M4ERR_PARAMETER,
+ "M4AMRR_getmaxAUsize: Context is M4OSA_NULL");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pMaxAuSize),M4ERR_PARAMETER,
+ "M4AMRR_getmaxAUsize: pMaxAuSize is M4OSA_NULL");
+
+ *pMaxAuSize = pStreamContext->m_maxAuSize;
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4ChannelConverter.c b/libvideoeditor/vss/src/M4ChannelConverter.c
new file mode 100755
index 0000000..fca5550
--- /dev/null
+++ b/libvideoeditor/vss/src/M4ChannelConverter.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4ChannelCoverter.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+
+void MonoTo2I_16( const short *src,
+ short *dst,
+ short n)
+{
+ short ii;
+ src += n-1;
+ dst += (n*2)-1;
+
+ for (ii = n; ii != 0; ii--){
+ *dst-- = *src;
+ *dst-- = *src--;
+ }
+
+ return;
+}
+
+void From2iToMono_16( const short *src,
+ short *dst,
+ short n)
+{
+ short ii;
+ long Temp;
+ for (ii = n; ii != 0; ii--){
+ Temp = (long)*(src++);
+ Temp += (long)*(src++);
+ *(dst++) = (short)(Temp >>1);
+ }
+
+ return;
+}
+
diff --git a/libvideoeditor/vss/src/M4DECODER_Null.c b/libvideoeditor/vss/src/M4DECODER_Null.c
new file mode 100755
index 0000000..a0dad30
--- /dev/null
+++ b/libvideoeditor/vss/src/M4DECODER_Null.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file M4DECODER_Null.c
+ * @brief Implementation of the Null decoder public interface
+ * @note This file implements a "null" video decoder, i.e. a decoder
+ * that does nothing
+*************************************************************************
+*/
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4DA_Types.h"
+#include "M4DECODER_Common.h"
+#include "M4DECODER_Null.h"
+
+/**
+ ************************************************************************
+ * NULL Video Decoder version information
+ ************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4DECODER_NULL_MAJOR 1
+#define M4DECODER_NULL_MINOR 0
+#define M4DECODER_NULL_REVISION 0
+
+/**
+ ************************************************************************
+ * structure M4_VideoHandler_Context
+ * @brief Defines the internal context of a video decoder instance
+ * @note The context is allocated and freed by the video decoder
+ ************************************************************************
+*/
+typedef struct {
+ void* m_pLibrary; // Core library identifier
+ M4OSA_Int32 m_DecoderId; // Core decoder identifier
+ M4OSA_Int32 m_RendererId; // Core renderer identifier
+ M4_VideoStreamHandler* m_pVideoStreamhandler; // Video stream description
+ M4_AccessUnit* m_pNextAccessUnitToDecode; // Access unit used to
+ // read and decode one frame
+ void* m_pUserData; // Pointer to any user data
+ M4READER_DataInterface* m_pReader; // Reader data interface
+ M4OSA_Bool m_bDoRendering; // Decides if render required
+ M4OSA_Int32 m_structSize; // Size of the structure
+
+ M4DECODER_OutputFilter* m_pVideoFilter; // Color conversion filter
+ M4VIFI_ImagePlane *pDecYuvData; // Pointer to Yuv data plane
+ M4VIFI_ImagePlane *pDecYuvWithEffect; // Pointer to Yuv plane with color effect
+ M4OSA_Bool bYuvWithEffectSet; // Original Yuv data OR Yuv with color effect
+
+} M4_VideoHandler_Context;
+
+/***********************************************************************/
+/************** M4DECODER_VideoInterface implementation ****************/
+/***********************************************************************/
+
+/**
+ ************************************************************************
+ * @brief Creates an instance of the decoder
+ * @note Allocates the context
+ *
+ * @param pContext: (OUT) Context of the decoder
+ * @param pStreamHandler: (IN) Pointer to a video stream description
+ * @param pSrcInterface: (IN) Pointer to the M4READER_DataInterface
+ * structure that must be used by the
+ * decoder to read data from the stream
+ * @param pAccessUnit (IN) Pointer to an access unit
+ * (allocated by the caller) where decoded data
+ * are stored
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC A memory allocation has failed
+ * @return M4ERR_PARAMETER At least one input parameter is not proper
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_create(M4OSA_Context *pContext,
+ M4_StreamHandler *pStreamHandler,
+ M4READER_GlobalInterface *pReaderGlobalInterface,
+ M4READER_DataInterface *pReaderDataInterface,
+ M4_AccessUnit* pAccessUnit,
+ M4OSA_Void* pUserData) {
+
+ M4_VideoHandler_Context* pStreamContext = M4OSA_NULL;
+
+ *pContext = M4OSA_NULL;
+ pStreamContext = (M4_VideoHandler_Context*)M4OSA_32bitAlignedMalloc (
+ sizeof(M4_VideoHandler_Context), M4DECODER_MPEG4,
+ (M4OSA_Char *)"M4_VideoHandler_Context");
+ if (pStreamContext == 0) {
+ return M4ERR_ALLOC;
+ }
+
+ pStreamContext->m_structSize = sizeof(M4_VideoHandler_Context);
+ pStreamContext->m_pNextAccessUnitToDecode = M4OSA_NULL;
+ pStreamContext->m_pLibrary = M4OSA_NULL;
+ pStreamContext->m_pVideoStreamhandler = M4OSA_NULL;
+ pStreamContext->m_DecoderId = -1;
+ pStreamContext->m_RendererId = -1;
+
+ pStreamContext->m_pUserData = M4OSA_NULL;
+ pStreamContext->m_bDoRendering = M4OSA_TRUE;
+ pStreamContext->m_pVideoFilter = M4OSA_NULL;
+ pStreamContext->bYuvWithEffectSet = M4OSA_FALSE;
+
+ *pContext=pStreamContext;
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Destroy the instance of the decoder
+ * @note After this call the context is invalid
+ *
+ * @param context: (IN) Context of the decoder
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_destroy(M4OSA_Context pContext) {
+
+ M4_VideoHandler_Context* pStreamContext = (M4_VideoHandler_Context*)pContext;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pStreamContext),
+ M4ERR_PARAMETER, "M4DECODER_NULL_destroy: invalid context pointer");
+
+ free(pStreamContext);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Get an option value from the decoder
+ * @note This function allows the caller to retrieve a property value:
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) Indicates the option to get
+ * @param pValue: (IN/OUT) Pointer to structure or value where
+ * option is stored
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER The context is invalid (in DEBUG only)
+ * @return M4ERR_BAD_OPTION_ID When the option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_NOT_IMPLEMENTED Function not implemented
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_getOption(M4OSA_Context context,
+ M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue) {
+
+ return M4ERR_NOT_IMPLEMENTED;
+}
+
+/**
+ ************************************************************************
+ * @brief Set an option value of the decoder
+ * @note Allows the caller to set a property value:
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) Identifier indicating the option to set
+ * @param pValue: (IN) Pointer to structure or value
+ * where option is stored
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_BAD_OPTION_ID The option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_PARAMETER The option parameter is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_setOption(M4OSA_Context context,
+ M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue) {
+
+ M4DECODER_OutputFilter *pFilterOption;
+
+ M4_VideoHandler_Context *pStreamContext =
+ (M4_VideoHandler_Context*)context;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 height = 0;
+ M4OSA_UInt8 *p_src,*p_des;
+ M4VIFI_ImagePlane* pTempDecYuvData = M4OSA_NULL;
+
+ switch (optionId) {
+ case M4DECODER_kOptionID_DecYuvData:
+ pStreamContext->pDecYuvData = (M4VIFI_ImagePlane *)pValue;
+ break;
+
+ case M4DECODER_kOptionID_YuvWithEffectContiguous:
+ pStreamContext->pDecYuvWithEffect = (M4VIFI_ImagePlane *)pValue;
+ break;
+
+ case M4DECODER_kOptionID_EnableYuvWithEffect:
+ pStreamContext->bYuvWithEffectSet = (M4OSA_Bool)pValue;
+ break;
+
+ case M4DECODER_kOptionID_YuvWithEffectNonContiguous:
+ pTempDecYuvData = (M4VIFI_ImagePlane *)pValue;
+
+ p_des = pStreamContext->pDecYuvWithEffect[0].pac_data +
+ pStreamContext->pDecYuvWithEffect[0].u_topleft;
+ p_src = pTempDecYuvData[0].pac_data +
+ pTempDecYuvData[0].u_topleft;
+
+ for (height = 0; height<pStreamContext->pDecYuvWithEffect[0].u_height;
+ height++) {
+ memcpy((void *)p_des, (void *)p_src,
+ pStreamContext->pDecYuvWithEffect[0].u_width);
+
+ p_des += pStreamContext->pDecYuvWithEffect[0].u_stride;
+ p_src += pTempDecYuvData[0].u_stride;
+ }
+
+ p_des = pStreamContext->pDecYuvWithEffect[1].pac_data +
+ pStreamContext->pDecYuvWithEffect[1].u_topleft;
+ p_src = pTempDecYuvData[1].pac_data +
+ pTempDecYuvData[1].u_topleft;
+
+ for (height = 0; height<pStreamContext->pDecYuvWithEffect[1].u_height;
+ height++) {
+ memcpy((void *)p_des, (void *)p_src,
+ pStreamContext->pDecYuvWithEffect[1].u_width);
+
+ p_des += pStreamContext->pDecYuvWithEffect[1].u_stride;
+ p_src += pTempDecYuvData[1].u_stride;
+ }
+
+ p_des = pStreamContext->pDecYuvWithEffect[2].pac_data +
+ pStreamContext->pDecYuvWithEffect[2].u_topleft;
+ p_src = pTempDecYuvData[2].pac_data +
+ pTempDecYuvData[2].u_topleft;
+
+ for (height = 0; height<pStreamContext->pDecYuvWithEffect[2].u_height;
+ height++) {
+ memcpy((void *)p_des, (void *)p_src,
+ pStreamContext->pDecYuvWithEffect[2].u_width);
+
+ p_des += pStreamContext->pDecYuvWithEffect[2].u_stride;
+ p_src += pTempDecYuvData[2].u_stride;
+ }
+ break;
+
+ case M4DECODER_kOptionID_OutputFilter:
+ pFilterOption = (M4DECODER_OutputFilter*)pValue;
+ break;
+
+ case M4DECODER_kOptionID_DeblockingFilter:
+ err = M4ERR_BAD_OPTION_ID;
+ break;
+
+ default:
+ err = M4ERR_BAD_OPTION_ID;
+ break;
+ }
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Decode video Access Units up to a target time
+ * @note Parse and decode the video until it can output a decoded image
+ * for which the composition time is equal or greater to the
+ * passed targeted time.
+ * The data are read from the reader data interface passed to
+ * M4DECODER_MPEG4_create.
+ *
+ * @param context: (IN) Context of the decoder
+ * @param pTime: (IN/OUT) IN: Time to decode up to (in msec)
+ * OUT:Time of the last decoded frame (in msec)
+ * @param bJump: (IN) 0 if no jump occured just before this call
+ * 1 if a a jump has just been made
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4WAR_NO_MORE_AU there is no more access unit to decode (EOS)
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_decode(M4OSA_Context context,
+ M4_MediaTime* pTime, M4OSA_Bool bJump,
+ M4OSA_UInt32 tolerance) {
+
+ // Do nothing; input time stamp itself returned
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Renders the video at the specified time.
+ * @note
+ * @param context: (IN) Context of the decoder
+ * @param pTime: (IN/OUT) IN: Time to render to (in msecs)
+ * OUT:Time of the rendered frame (in ms)
+ * @param pOutputPlane:(OUT) Output plane filled with decoded data
+ * @param bForceRender:(IN) 1 if the image must be rendered even it
+ * has been rendered already
+ * 0 if not
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_PARAMETER At least one parameter is not properly set
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_ALLOC There is no more available memory
+ * @return M4WAR_VIDEORENDERER_NO_NEW_FRAME If the frame has already been rendered
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_render(M4OSA_Context context, M4_MediaTime* pTime,
+ M4VIFI_ImagePlane* pOutputPlane,
+ M4OSA_Bool bForceRender) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 height;
+ M4OSA_UInt8 *p_src,*p_des;
+ M4_VideoHandler_Context* pStreamContext =
+ (M4_VideoHandler_Context*)context;
+
+ if (pStreamContext->bYuvWithEffectSet == M4OSA_TRUE) {
+
+ p_des = pOutputPlane[0].pac_data + pOutputPlane[0].u_topleft;
+ p_src = pStreamContext->pDecYuvWithEffect[0].pac_data +
+ pStreamContext->pDecYuvWithEffect[0].u_topleft;
+
+ for (height = 0; height<pOutputPlane[0].u_height; height++) {
+ memcpy((void *)p_des, (void *)p_src, pOutputPlane[0].u_width);
+ p_des += pOutputPlane[0].u_stride;
+ p_src += pStreamContext->pDecYuvWithEffect[0].u_stride;
+ }
+
+ p_des = pOutputPlane[1].pac_data + pOutputPlane[1].u_topleft;
+ p_src = pStreamContext->pDecYuvWithEffect[1].pac_data +
+ pStreamContext->pDecYuvWithEffect[1].u_topleft;
+
+ for (height = 0; height<pOutputPlane[1].u_height; height++) {
+ memcpy((void *)p_des, (void *)p_src, pOutputPlane[1].u_width);
+ p_des += pOutputPlane[1].u_stride;
+ p_src += pStreamContext->pDecYuvWithEffect[1].u_stride;
+ }
+
+ p_des = pOutputPlane[2].pac_data + pOutputPlane[2].u_topleft;
+ p_src = pStreamContext->pDecYuvWithEffect[2].pac_data +
+ pStreamContext->pDecYuvWithEffect[2].u_topleft;
+
+ for (height = 0; height<pOutputPlane[2].u_height; height++) {
+ memcpy((void *)p_des, (void *)p_src, pOutputPlane[2].u_width);
+ p_des += pOutputPlane[2].u_stride;
+ p_src += pStreamContext->pDecYuvWithEffect[2].u_stride;
+ }
+ } else {
+
+ p_des = pOutputPlane[0].pac_data + pOutputPlane[0].u_topleft;
+ p_src = pStreamContext->pDecYuvData[0].pac_data +
+ pStreamContext->pDecYuvData[0].u_topleft;
+
+ for (height = 0; height<pOutputPlane[0].u_height; height++) {
+ memcpy((void *)p_des, (void *)p_src, pOutputPlane[0].u_width);
+ p_des += pOutputPlane[0].u_stride;
+ p_src += pStreamContext->pDecYuvData[0].u_stride;
+ }
+
+ p_des = pOutputPlane[1].pac_data + pOutputPlane[1].u_topleft;
+ p_src = pStreamContext->pDecYuvData[1].pac_data +
+ pStreamContext->pDecYuvData[1].u_topleft;
+
+ for (height = 0; height<pOutputPlane[1].u_height; height++) {
+ memcpy((void *)p_des, (void *)p_src, pOutputPlane[1].u_width);
+ p_des += pOutputPlane[1].u_stride;
+ p_src += pStreamContext->pDecYuvData[1].u_stride;
+ }
+
+ p_des = pOutputPlane[2].pac_data + pOutputPlane[2].u_topleft;
+ p_src = pStreamContext->pDecYuvData[2].pac_data +
+ pStreamContext->pDecYuvData[2].u_topleft;
+
+ for (height = 0; height<pOutputPlane[2].u_height; height++) {
+ memcpy((void *)p_des,(void *)p_src,pOutputPlane[2].u_width);
+ p_des += pOutputPlane[2].u_stride;
+ p_src += pStreamContext->pDecYuvData[2].u_stride;
+ }
+ }
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType : Pointer to a M4DECODER_VideoType
+ * (allocated by the caller)
+ * that will be filled with the decoder type
+ * @param pDecoderInterface : Address of a pointer that will be set to
+ * the interface implemented by this decoder.
+ * The interface is a structure allocated by
+ * this function and must be freed by the caller.
+ *
+ * @returns : M4NO_ERROR if OK
+ * M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4DECODER_NULL_getInterface (M4DECODER_VideoType *pDecoderType,
+ M4DECODER_VideoInterface **pDecoderInterface) {
+
+ *pDecoderInterface =
+ (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4DECODER_VideoInterface),
+ M4DECODER_MPEG4, (M4OSA_Char *)"M4DECODER_VideoInterface");
+
+ if (M4OSA_NULL == *pDecoderInterface) {
+ return M4ERR_ALLOC;
+ }
+
+ *pDecoderType = M4DECODER_kVideoTypeYUV420P;
+
+ (*pDecoderInterface)->m_pFctCreate = M4DECODER_NULL_create;
+ (*pDecoderInterface)->m_pFctDestroy = M4DECODER_NULL_destroy;
+ (*pDecoderInterface)->m_pFctGetOption = M4DECODER_NULL_getOption;
+ (*pDecoderInterface)->m_pFctSetOption = M4DECODER_NULL_setOption;
+ (*pDecoderInterface)->m_pFctDecode = M4DECODER_NULL_decode;
+ (*pDecoderInterface)->m_pFctRender = M4DECODER_NULL_render;
+
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4PCMR_CoreReader.c b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
new file mode 100755
index 0000000..3343254
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4PCM_PCMReader.c
+ * @brief PCM reader implementation
+ * @note This file implements functions of the PCM reader
+ ************************************************************************
+ */
+#include "M4OSA_CharStar.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+/**
+ ******************************************************************************
+ * PCM reader version numbers
+ ******************************************************************************
+ */
+/* CHANGE_VERSION_HERE */
+#define M4PCMR_VERSION_MAJOR 1
+#define M4PCMR_VERSION_MINOR 0
+#define M4PCMR_VERSION_REVISION 0
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ * M4OSA_FileReaderPointer* pFileFunction)
+ * @brief This function opens a PCM file
+ * @note This function :
+ * - opens a PCM file
+ * - initializes PCM context,
+ * - verifies PCM file format
+ * - Fill decoder config structure
+ * - Changes state of the reader in 'Opening'
+ * @param pContext: (OUT) Pointer on the PCM Reader context
+ * @param pUrl: (IN) Name of the PCM file
+ * @param pFileFunctions: (IN) Pointer on the file access functions
+ * @return M4NO_ERROR there is no error during the opening
+ * @return M4ERR_PARAMETER pContext and/or pUrl and/or pFileFunction is NULL
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_FILE_NOT_FOUND the file cannot be found
+ * @return M4PCMC_ERR_PCM_NOT_COMPLIANT the file does not seem to be compliant, no RIFF,
+ * or lack of any mandatory chunk.
+ * @return M4PCMC_ERR_PCM_NOT_SUPPORTED the PCM format of this file is not supported by the
+ * reader
+ * @return Any M4OSA_FILE errors see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ M4OSA_FileReadPointer* pFileFunction)
+{
+ M4OSA_ERR err;
+ M4PCMR_Context *context;
+ M4OSA_Char* pTempURL;
+ M4OSA_Char value[6];
+
+ /* Check parameters */
+ if((M4OSA_NULL == pContext)|| (M4OSA_NULL == pUrl) ||(M4OSA_NULL == pFileFunction))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Allocates the context */
+ context = M4OSA_NULL;
+ context = (M4PCMR_Context *)M4OSA_32bitAlignedMalloc(sizeof(M4PCMR_Context), M4WAV_READER,
+ (M4OSA_Char *)"M4PCMR_openRead");
+ if (M4OSA_NULL == context)
+ {
+ return M4ERR_ALLOC;
+ }
+ *pContext = (M4OSA_Context)context;
+
+ /* Initialize the context */
+ context->m_offset = 0;
+
+ context->m_state = M4PCMR_kInit;
+ context->m_microState = M4PCMR_kInit;
+ context->m_pFileReadFunc = M4OSA_NULL;
+ context->m_fileContext = M4OSA_NULL;
+ context->m_pAuBuffer = M4OSA_NULL;
+ context->m_pDecoderSpecInfo = M4OSA_NULL;
+
+ /* Set sample frequency */
+ pTempURL = (M4OSA_Char*)pUrl + (strlen((const char *)pUrl)-11);
+ M4OSA_chrNCopy(value, pTempURL, 5);
+ M4OSA_chrGetUInt32(pTempURL, &(context->m_decoderConfig.SampleFrequency),
+ M4OSA_NULL, M4OSA_kchrDec);
+
+ /* Set number of channels */
+ pTempURL += 6;
+ M4OSA_chrNCopy(value, pTempURL, 1);
+ M4OSA_chrGetUInt16(pTempURL, &(context->m_decoderConfig.nbChannels),
+ M4OSA_NULL, M4OSA_kchrDec);
+
+ M4OSA_chrNCopy(pUrl,pUrl, (strlen((const char *)pUrl)-12));
+ /* Open the file */
+ context->m_fileContext = M4OSA_NULL;
+ err = pFileFunction->openRead(&(context->m_fileContext), pUrl, M4OSA_kFileRead);
+ if(M4NO_ERROR != err)
+ {
+ return err;
+ }
+ context->m_decoderConfig.BitsPerSample = 16;
+ context->m_decoderConfig.AvgBytesPerSec = context->m_decoderConfig.SampleFrequency * 2 \
+ * context->m_decoderConfig.nbChannels;
+ err = pFileFunction->getOption(context->m_fileContext, M4OSA_kFileReadGetFileSize,
+ (M4OSA_DataOption*)&(context->m_decoderConfig.DataLength));
+ if(M4NO_ERROR != err)
+ {
+ return err;
+ }
+ context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels; // Raw PCM. Hence, get a
+ // chunk of data
+
+ if(context->m_decoderConfig.SampleFrequency == 8000)
+ {
+ /* AMR case, no pb */
+ context->m_blockSize = context->m_decoderConfig.nbChannels *\
+ (context->m_decoderConfig.SampleFrequency / 50) * \
+ (context->m_decoderConfig.BitsPerSample / 8);
+ }
+ if(context->m_decoderConfig.SampleFrequency == 16000)
+ {
+ /* AAC case, we can't read only 20 ms blocks */
+ context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;
+ }
+ context->m_dataStartOffset = 0;
+ context->m_pFileReadFunc = pFileFunction;
+
+ context->m_pAuBuffer = (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc(context->m_blockSize, M4WAV_READER,
+ (M4OSA_Char *)"Core PCM reader Access Unit");
+ if (M4OSA_NULL == context->m_pAuBuffer)
+ {
+ err = M4ERR_ALLOC;
+ goto cleanup;
+ }
+
+ /* Change state */
+ context->m_state = M4PCMR_kOpening;
+
+ return M4NO_ERROR;
+
+cleanup:
+
+ /* Close the file */
+ if(context->m_pFileReadFunc != M4OSA_NULL)
+ context->m_pFileReadFunc->closeRead(context->m_fileContext);
+
+ /* Free internal context */
+ free(context);
+ *pContext = M4OSA_NULL;
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+ * @brief This function get the (unique) stream of a PCM file
+ * @note This function :
+ * - Allocates and fills the decoder specific info structure
+ * - Fills decoder specific infos structure
+ * - Fills pStreamDesc structure allocated by the caller
+ * @param context: (IN/OUT) PCM Reader context
+ * @param pStreamDesc: (IN) Stream Description context
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_STATE this function cannot be called now
+ * @return Any M4OSA_FILE errors see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+ /* Check parameters */
+ if((M4OSA_NULL == context)|| (M4OSA_NULL == pStreamDesc))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ if (c->m_state == M4PCMR_kOpening_streamRetrieved)
+ {
+ return M4WAR_NO_MORE_STREAM;
+ }
+ /* Check Reader's m_state */
+ if(c->m_state != M4PCMR_kOpening)
+ {
+ return M4ERR_STATE;
+ }
+
+ /* Only one stream is contained in PCM file */
+ pStreamDesc->streamID = 1;
+ /* Not used */
+ pStreamDesc->profileLevel = 0;
+ pStreamDesc->decoderSpecificInfoSize = sizeof(M4PCMC_DecoderSpecificInfo);
+
+ /* Allocates decoder specific info structure */
+ pStreamDesc->decoderSpecificInfo = M4OSA_NULL;
+ pStreamDesc->decoderSpecificInfo =
+ (M4OSA_MemAddr32)M4OSA_32bitAlignedMalloc( sizeof(M4PCMC_DecoderSpecificInfo), M4WAV_READER,
+ (M4OSA_Char *)"M4PCMR_getNextStream");
+ if(pStreamDesc->decoderSpecificInfo == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+ /* Fill decoderSpecificInfo structure, with decoder config structure filled in 'openread'
+ function */
+ memcpy((void *)pStreamDesc->decoderSpecificInfo,
+ (void *)&c->m_decoderConfig, sizeof(M4PCMC_DecoderSpecificInfo));
+
+ /* Fill other fields of pStreamDesc structure */
+ pStreamDesc->timeScale = 1000;
+ pStreamDesc->duration = (M4OSA_Time)(((M4OSA_Double)(c->m_decoderConfig.DataLength)\
+ / (M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec))*pStreamDesc->timeScale);
+ pStreamDesc->averageBitrate = c->m_decoderConfig.AvgBytesPerSec * 8;/* in bits, multiply by 8*/
+ pStreamDesc->maxBitrate = pStreamDesc->averageBitrate; /* PCM stream has constant bitrate */
+
+ /* Determines Stream type */
+ switch(c->m_decoderConfig.BitsPerSample)
+ {
+ case 8:
+ switch(c->m_decoderConfig.nbChannels)
+ {
+ case 1:
+ pStreamDesc->streamType = M4SYS_kPCM_8bitsU;
+ break;
+// case 2:
+// pStreamDesc->streamType = M4SYS_kPCM_8bitsS; /* ??? 8bits stereo not
+ // defined ? */
+// break;
+ default:
+ pStreamDesc->streamType = M4SYS_kAudioUnknown;
+ }
+ break;
+
+ case 16:
+ switch(c->m_decoderConfig.nbChannels)
+ {
+ case 1:
+ pStreamDesc->streamType = M4SYS_kPCM_16bitsU;
+ break;
+ case 2:
+ pStreamDesc->streamType = M4SYS_kPCM_16bitsS;
+ break;
+ default:
+ pStreamDesc->streamType = M4SYS_kAudioUnknown;
+ }
+ break;
+
+ default:
+ pStreamDesc->streamType = M4SYS_kAudioUnknown;
+ }
+
+ c->m_pDecoderSpecInfo = pStreamDesc->decoderSpecificInfo;
+
+ c->m_state = M4PCMR_kOpening_streamRetrieved;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+ * @brief This function starts reading the unique stream of a PCM file
+ * @note This function :
+ * - Verifies that the current reader's state allows to start reading a stream
+ * - Check that provided StreamId is correct (always true, only one stream...)
+ * In the player application, a StreamId table is initialized as follow:
+ * M4SYS_StreamID pStreamID[2]={1,0};
+ * - Change state of the reader in 'Reading'
+ * @param context: (IN/OUT) PCM Reader context
+ * @param streamID: (IN) Stream selection
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_STATE this function cannot be called now
+ * @return M4ERR_BAD_STREAM_ID at least one of the streamID does not exist
+ * (should never happen if table pStreamID is correctly initialized as above)
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+ /* Check parameters */
+ if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamIDs))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check Reader's state */
+ if(c->m_state != M4PCMR_kOpening_streamRetrieved)
+ {
+ return M4ERR_STATE;
+ }
+
+ /* Check pStreamID and if they're OK, change reader's state */
+ if(pStreamIDs[0] == 1 || pStreamIDs[0] == 0)
+ /* First and unique stream contained in PCM file */
+ {
+ c->m_state = M4PCMR_kReading;
+ c->m_microState = M4PCMR_kReading;
+ }
+ else
+ {
+ return M4ERR_BAD_STREAM_ID;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief This function reads the next AU contained in the PCM file
+ * @note This function :
+ * - Verifies that the current reader's state allows to read an AU
+ * - Allocates memory to store read AU
+ * - Read data from file and store them into previously allocated memory
+ * - Fill AU structure fileds (CTS...)
+ * - Change state of the reader in 'Reading' (not useful...)
+ * - Change Micro state 'Reading' in M4PCMR_kReading_nextAU
+ * (AU is read and can be deleted)
+ * - Check if the last AU has been read or if we're about to read it
+ * @param context: (IN/OUT) PCM Reader context
+ * @param streamID: (IN) Stream selection
+ * @param pAU: (IN/OUT) Acces Unit Structure
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_STATE this function cannot be called now
+ * @return M4M4WAR_NO_DATA_YET there is no enough data in the file to provide a new access unit.
+ * @return M4WAR_END_OF_STREAM There is no more access unit in the stream,
+ * or the sample number is bigger the maximum one.
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 size_read;
+
+ /* Check parameters */
+ if((M4OSA_NULL == context) || (M4OSA_NULL == pAU))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check Reader's state */
+ if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading)
+ {
+ return M4ERR_STATE;
+ }
+
+ /* Allocates AU dataAdress */
+ pAU->dataAddress = c->m_pAuBuffer;
+ size_read = c->m_blockSize;
+
+ if((c->m_offset + size_read) >= c->m_decoderConfig.DataLength)
+ {
+ size_read = c->m_decoderConfig.DataLength - c->m_offset;
+ }
+
+ /* Read data in file, and copy it to AU Structure */
+ err = c->m_pFileReadFunc->readData(c->m_fileContext, (M4OSA_MemAddr8)pAU->dataAddress,
+ (M4OSA_UInt32 *)&size_read);
+ if(M4NO_ERROR != err)
+ {
+ return err;
+ }
+
+ /* Calculates the new m_offset, used to determine whether we're at end of reading or not */
+ c->m_offset = c->m_offset + size_read;
+
+ /* Fill others parameters of AU structure */
+ pAU->CTS =
+ (M4OSA_Time)(((M4OSA_Double)c->m_offset/(M4OSA_Double)c->m_decoderConfig.AvgBytesPerSec)\
+ *1000);
+ pAU->DTS = pAU->CTS;
+
+ pAU->attribute = 0;
+ pAU->frag = M4OSA_NULL;
+ pAU->nbFrag = 0;
+ pAU->stream = M4OSA_NULL;
+ pAU->size = size_read;
+
+ /* Change states */
+ c->m_state = M4PCMR_kReading; /* Not changed ... */
+ c->m_microState = M4PCMR_kReading_nextAU; /* AU is read and can be deleted */
+
+ /* Check if there is another AU to read */
+ /* ie: if decoded nb of bytes = nb of bytes to decode,
+ it means there is no more AU to decode */
+ if(c->m_offset >= c->m_decoderConfig.DataLength)
+ {
+ return M4WAR_NO_MORE_AU;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief This function frees the AU provided in parameter
+ * @note This function :
+ * - Verifies that the current reader's state allows to free an AU
+ * - Free dataAddress field of AU structure
+ * - Change state of the reader in 'Reading' (not useful...)
+ * - Change Micro state 'Reading' in M4PCMR_kReading (another AU can be read)
+ * @param context: (IN/OUT) PCM Reader context
+ * @param streamID: (IN) Stream selection
+ * @param pAU: (IN) Acces Unit Structure
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_STATE this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+ /* Check parameters */
+ if((M4OSA_NULL == context ) || (M4OSA_NULL == pAU))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check Reader's state */
+ if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading_nextAU)
+ {
+ return M4ERR_STATE;
+ }
+
+ pAU->dataAddress = M4OSA_NULL;
+
+ /* Change states */
+ c->m_state = M4PCMR_kReading; /* Not changed ... */
+ c->m_microState = M4PCMR_kReading; /* AU is deleted, another AU can be read */
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID,
+ M4OSA_Time time, M4SYS_seekAccessMode seekAccessMode,
+ M4OSA_Time* pObtainCTS[])
+ * @brief This function seeks into the PCM file at the provided time
+ * @note This function :
+ * - Verifies that the current reader's state allows to seek
+ * - Determines from provided time m_offset to seek in file
+ * - If m_offset is correct, seek in file
+ * - Update new m_offset in PCM reader context
+ * @param context: (IN/OUT) PCM Reader context
+ * @param pStreamID: (IN) Stream selection (not used, only 1 stream)
+ * @param time: (IN) Targeted time
+ * @param seekMode: (IN) Selects the seek access mode
+ * @param pObtainCTS[]: (OUT) Returned Time (not used)
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_STATE this function cannot be called now
+ * @return M4WAR_INVALID_TIME Specified time is not reachable
+ * @param M4ERR_NOT_IMPLEMENTED This seek mode is not implemented yet
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+ M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 offset;
+ M4OSA_UInt32 alignment;
+ M4OSA_UInt32 size_read;
+
+ /* Check parameters */
+ if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamID))
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check Reader's state */
+ if(c->m_state != M4PCMR_kOpening_streamRetrieved && c->m_state != M4PCMR_kReading)
+ {
+ return M4ERR_STATE;
+ }
+
+ switch(seekAccessMode)
+ {
+ case M4SYS_kBeginning:
+ /* Determine m_offset from time*/
+ offset =
+ (M4OSA_UInt32)(time * ((M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec) / 1000));
+ /** check the alignment on sample boundary */
+ alignment = c->m_decoderConfig.nbChannels*c->m_decoderConfig.BitsPerSample/8;
+ if (offset%alignment != 0)
+ {
+ offset -= offset%alignment;
+ }
+ /*add the header offset*/
+ offset += c->m_dataStartOffset;
+ /* If m_offset is over file size -> Invalid time */
+ if (offset > (c->m_dataStartOffset + c->m_decoderConfig.DataLength))
+ {
+ return M4WAR_INVALID_TIME;
+ }
+ else
+ {
+ /* Seek file */
+ size_read = offset;
+ err = c->m_pFileReadFunc->seek(c->m_fileContext, M4OSA_kFileSeekBeginning,
+ (M4OSA_FilePosition *) &size_read);
+ if(M4NO_ERROR != err)
+ {
+ return err;
+ }
+ /* Update m_offset in M4PCMR_context */
+ c->m_offset = offset - c->m_dataStartOffset;
+ }
+ break;
+
+ default:
+ return M4ERR_NOT_IMPLEMENTED;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+ * @brief This function closes PCM file, and frees context
+ * @note This function :
+ * - Verifies that the current reader's state allows close the PCM file
+ * - Closes the file
+ * - Free structures
+ * @param context: (IN/OUT) PCM Reader context
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is NULL
+ * @return M4ERR_STATE this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+{
+ M4PCMR_Context *c = (M4PCMR_Context *)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check parameters */
+ if(M4OSA_NULL == context)
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ if(c->m_pDecoderSpecInfo != M4OSA_NULL)
+ {
+ free(c->m_pDecoderSpecInfo);
+ }
+
+ /* Check Reader's state */
+ if(c->m_state != M4PCMR_kReading)
+ {
+ return M4ERR_STATE;
+ }
+ else if(c->m_microState == M4PCMR_kReading_nextAU)
+ {
+ return M4ERR_STATE;
+ }
+
+ if (M4OSA_NULL != c->m_pAuBuffer)
+ {
+ free(c->m_pAuBuffer);
+ }
+
+ /* Close the file */
+ if (M4OSA_NULL != c->m_pFileReadFunc)
+ {
+ err = c->m_pFileReadFunc->closeRead(c->m_fileContext);
+ }
+
+ /* Free internal context */
+ if (M4OSA_NULL != c)
+ {
+ free(c);
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ * M4OSA_DataOption* pValue)
+ * @brief This function get option of the PCM Reader
+ * @note This function :
+ * - Verifies that the current reader's state allows to get an option
+ * - Return corresponding option value
+ * @param context: (IN/OUT) PCM Reader context
+ * @param optionID: (IN) ID of the option to get
+ * @param pValue: (OUT) Variable where the option value is returned
+ * @return M4NO_ERROR there is no error.
+ * @return M4ERR_PARAMETER at least one parameter is NULL.
+ * @return M4ERR_BAD_OPTION_ID the optionID is not a valid one.
+ * @return M4ERR_STATE this option is not available now.
+ * @return M4ERR_NOT_IMPLEMENTED this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ M4OSA_DataOption* pValue)
+{
+ M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+ /* Check parameters */
+ if(M4OSA_NULL == context)
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check reader's state */
+ if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+ && (c->m_state != M4PCMR_kReading))
+ {
+ return M4ERR_STATE;
+ }
+
+ /* Depend of the OptionID, the value to return is different */
+ switch(optionID)
+ {
+ case M4PCMR_kPCMblockSize:
+ *pValue = &c->m_blockSize;
+ break;
+
+ default:
+ return M4ERR_BAD_OPTION_ID;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ * M4OSA_DataOption Value)
+ * @brief This function set option of the PCM Reader
+ * @note This function :
+ * - Verifies that the current reader's state allows to set an option
+ * - Set corresponding option value
+ * @param context: (IN/OUT) PCM Reader context
+ * @param optionID: (IN) ID of the option to get
+ * @param Value: (IN) Variable where the option value is stored
+ * @return M4NO_ERROR there is no error.
+ * @return M4ERR_PARAMETER at least one parameter is NULL.
+ * @return M4ERR_BAD_OPTION_ID the optionID is not a valid one.
+ * @return M4ERR_STATE this option is not available now.
+ * @return M4ERR_NOT_IMPLEMENTED this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID, M4OSA_DataOption Value)
+{
+ M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+ /* Check parameters */
+ if(context == M4OSA_NULL)
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check reader's state */
+ if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+ && (c->m_state != M4PCMR_kReading))
+ {
+ return M4ERR_STATE;
+ }
+
+ /* Depend of the OptionID, the value to set is different */
+ switch(optionID)
+ {
+ case M4PCMR_kPCMblockSize:
+ c->m_blockSize = (M4OSA_UInt32)Value;
+ break;
+
+ default:
+ return M4ERR_BAD_OPTION_ID;
+ }
+
+ return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4PCMR_getVersion (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+ M4OSA_TRACE1_1("M4PCMR_getVersion called with pVersion: 0x%x", pVersion);
+ M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+ "pVersion is NULL in M4PCMR_getVersion");
+
+ pVersion->m_major = M4PCMR_VERSION_MAJOR;
+ pVersion->m_minor = M4PCMR_VERSION_MINOR;
+ pVersion->m_revision = M4PCMR_VERSION_REVISION;
+
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_API.c b/libvideoeditor/vss/src/M4PTO3GPP_API.c
new file mode 100755
index 0000000..042ffb7
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_API.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4PTO3GPP_API.c
+ * @brief Picture to 3gpp Service implementation.
+ * @note
+ ******************************************************************************
+*/
+
+/*16 bytes signature to be written in the generated 3gp files */
+#define M4PTO3GPP_SIGNATURE "NXP-SW : PTO3GPP"
+
+/****************/
+/*** Includes ***/
+/****************/
+
+/**
+ * Our header */
+#include "M4PTO3GPP_InternalTypes.h"
+#include "M4PTO3GPP_API.h"
+
+/**
+ * Our errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+#include "VideoEditorVideoEncoder.h"
+#endif
+
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h" /* OSAL debug management */
+
+
+/************************/
+/*** Various Magicals ***/
+/************************/
+
+#define M4PTO3GPP_WRITER_AUDIO_STREAM_ID 1
+#define M4PTO3GPP_WRITER_VIDEO_STREAM_ID 2
+#define M4PTO3GPP_QUANTIZER_STEP 4 /**< Quantizer step */
+#define M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL 0xFF /**< No specific profile and
+ level */
+#define M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE 8000 /**< AMR */
+#define M4PTO3GPP_BITRATE_REGULATION_CTS_PERIOD_IN_MS 500 /**< MAGICAL */
+#define M4PTO3GPP_MARGE_OF_FILE_SIZE 25000 /**< MAGICAL */
+/**
+ ******************************************************************************
+ * define AMR 12.2 kbps silence frame
+ ******************************************************************************
+*/
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE 32
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_122_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE]=
+{ 0x3C, 0x91, 0x17, 0x16, 0xBE, 0x66, 0x78, 0x00, 0x00, 0x01, 0xE7, 0xAF,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_048_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+{ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00 };
+
+/***************************/
+/*** "Private" functions ***/
+/***************************/
+static M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+
+/****************************/
+/*** "External" functions ***/
+/****************************/
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces(M4WRITER_OutputFileType* pType,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+extern M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+extern M4OSA_ERR M4READER_3GP_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+
+/****************************/
+/*** "Static" functions ***/
+/****************************/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(
+ M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU,
+ M4OSA_Time mtIncCts);
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(
+ M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU,
+ M4OSA_Time mtIncCts);
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+ * @brief Get the M4PTO3GPP version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo)
+/*********************************************************/
+{
+ M4OSA_TRACE3_1("M4PTO3GPP_GetVersion called with pVersionInfo=0x%x", pVersionInfo);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pVersionInfo),M4ERR_PARAMETER,
+ "M4PTO3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+ pVersionInfo->m_major = M4PTO3GPP_VERSION_MAJOR;
+ pVersionInfo->m_minor = M4PTO3GPP_VERSION_MINOR;
+ pVersionInfo->m_revision = M4PTO3GPP_VERSION_REVISION;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
+ * @brief Initializes the M4PTO3GPP (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the M4PTO3GPP context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Init( M4PTO3GPP_Context* pContext,
+ M4OSA_FileReadPointer* pFileReadPtrFct,
+ M4OSA_FileWriterPointer* pFileWritePtrFct)
+/*********************************************************/
+{
+ M4PTO3GPP_InternalContext *pC;
+ M4OSA_UInt32 i;
+
+ M4OSA_TRACE3_1("M4PTO3GPP_Init called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4PTO3GPP_Init: pContext is M4OSA_NULL");
+
+ /**
+ * Allocate the M4PTO3GPP context and return it to the user */
+ pC = (M4PTO3GPP_InternalContext*)M4OSA_32bitAlignedMalloc(sizeof(M4PTO3GPP_InternalContext), M4PTO3GPP,
+ (M4OSA_Char *)"M4PTO3GPP_InternalContext");
+ *pContext = pC;
+ if (M4OSA_NULL == pC)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Step(): unable to allocate M4PTO3GPP_InternalContext,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Init the context. All pointers must be initialized to M4OSA_NULL because CleanUp()
+ can be called just after Init(). */
+ pC->m_State = M4PTO3GPP_kState_CREATED;
+ pC->m_VideoState = M4PTO3GPP_kStreamState_NOSTREAM;
+ pC->m_AudioState = M4PTO3GPP_kStreamState_NOSTREAM;
+
+ /**
+ * Reader stuff */
+ pC->m_pReaderAudioAU = M4OSA_NULL;
+ pC->m_pReaderAudioStream = M4OSA_NULL;
+
+ /**
+ * Writer stuff */
+ pC->m_pEncoderHeader = M4OSA_NULL;
+ pC->m_pWriterVideoStream = M4OSA_NULL;
+ pC->m_pWriterAudioStream = M4OSA_NULL;
+ pC->m_pWriterVideoStreamInfo= M4OSA_NULL;
+ pC->m_pWriterAudioStreamInfo= M4OSA_NULL;
+
+ /**
+ * Contexts of the used modules */
+ pC->m_pAudioReaderContext = M4OSA_NULL;
+ pC->m_p3gpWriterContext = M4OSA_NULL;
+ pC->m_pMp4EncoderContext = M4OSA_NULL;
+ pC->m_eEncoderState = M4PTO3GPP_kNoEncoder;
+
+ /**
+ * Interfaces of the used modules */
+ pC->m_pReaderGlobInt = M4OSA_NULL;
+ pC->m_pReaderDataInt = M4OSA_NULL;
+ pC->m_pWriterGlobInt = M4OSA_NULL;
+ pC->m_pWriterDataInt = M4OSA_NULL;
+ pC->m_pEncoderInt = M4OSA_NULL;
+ pC->m_pEncoderExternalAPI = M4OSA_NULL;
+ pC->m_pEncoderUserData = M4OSA_NULL;
+
+ /**
+ * Fill the OSAL file function set */
+ pC->pOsalFileRead = pFileReadPtrFct;
+ pC->pOsalFileWrite = pFileWritePtrFct;
+
+ /**
+ * Video rate control stuff */
+ pC->m_mtCts = 0.0F;
+ pC->m_mtNextCts = 0.0F;
+ pC->m_mtAudioCts = 0.0F;
+ pC->m_AudioOffSet = 0.0F;
+ pC->m_dLastVideoRegulCts= 0.0F;
+ pC->m_PrevAudioCts = 0.0F;
+ pC->m_DeltaAudioCts = 0.0F;
+
+ pC->m_MaxFileSize = 0;
+ pC->m_CurrentFileSize = 0;
+
+ pC->m_IsLastPicture = M4OSA_FALSE;
+ pC->m_bAudioPaddingSilence = M4OSA_FALSE;
+ pC->m_bLastInternalCallBack = M4OSA_FALSE;
+ pC->m_NbCurrentFrame = 0;
+
+ pC->pSavedPlane = M4OSA_NULL;
+ pC->uiSavedDuration = 0;
+
+ M4OSA_TRACE3_0("M4PTO3GPP_Init(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+ * @brief Set the M4PTO3GPP input and output files.
+ * @note It opens the input file, but the output file may not be created yet.
+ * @param pContext (IN) M4PTO3GPP context
+ * @param pParams (IN) Pointer to the parameters for the PTO3GPP.
+ * @note The pointed structure can be de-allocated after this function returns because
+ * it is internally copied by the PTO3GPP
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state for this function to be
+ called
+ * @return M4ERR_ALLOC: There is no more available memory
+ * @return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
+ * size parameter is incompatible with H263 encoding
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT The output video format
+ parameter is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE The output video bit-rate parameter
+ is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE The output video frame size parameter
+ is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE The output file size parameter
+ is undefined
+ * @return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING The output audio padding parameter
+ is undefined
+ * @return ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE The input audio file contains
+ a track format not handled by PTO3GPP
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams)
+/*********************************************************/
+{
+ M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4READER_MediaFamily mediaFamily;
+ M4_StreamHandler* pStreamHandler;
+ M4READER_MediaType readerMediaType;
+
+ M4OSA_TRACE2_2("M4PTO3GPP_Open called with pContext=0x%x, pParams=0x%x", pContext, pParams);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER, \
+ "M4PTO3GPP_Open: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams), M4ERR_PARAMETER, \
+ "M4PTO3GPP_Open: pParams is M4OSA_NULL");
+
+ /**
+ * Check parameters correctness */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackFct),
+ M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackFct is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackCtxt),
+ M4ERR_PARAMETER,
+ "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackCtxt is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pOutput3gppFile),
+ M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pOutput3gppFile is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pTemporaryFile),
+ M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pTemporaryFile is M4OSA_NULL");
+
+ /**
+ * Video Format */
+ if( (M4VIDEOEDITING_kH263 != pParams->OutputVideoFormat) &&
+ (M4VIDEOEDITING_kMPEG4 != pParams->OutputVideoFormat) &&
+ (M4VIDEOEDITING_kH264 != pParams->OutputVideoFormat)) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video format");
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+
+ /**
+ * Video Bitrate */
+ if(!((M4VIDEOEDITING_k16_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k24_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k32_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k48_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k64_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k96_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k128_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k192_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k256_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k288_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k384_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k512_KBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k800_KBPS == pParams->OutputVideoBitrate) ||
+ /*+ New Encoder bitrates */
+ (M4VIDEOEDITING_k2_MBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k5_MBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_k8_MBPS == pParams->OutputVideoBitrate) ||
+ (M4VIDEOEDITING_kVARIABLE_KBPS == pParams->OutputVideoBitrate))) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video bitrate");
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+ }
+
+ /**
+ * Video frame size */
+ if (!((M4VIDEOEDITING_kSQCIF == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kQQVGA == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kQCIF == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kQVGA == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kCIF == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kVGA == pParams->OutputVideoFrameSize) ||
+
+ (M4VIDEOEDITING_kNTSC == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_kWVGA == pParams->OutputVideoFrameSize) ||
+
+ (M4VIDEOEDITING_k640_360 == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_k854_480 == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_k1280_720 == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_k1080_720 == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_k960_720 == pParams->OutputVideoFrameSize) ||
+ (M4VIDEOEDITING_k1920_1080 == pParams->OutputVideoFrameSize))) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video frame size");
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+ }
+
+ /**
+ * Maximum size of the output 3GPP file */
+ if (!((M4PTO3GPP_k50_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k75_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k100_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k150_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k200_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k300_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k400_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_k500_KB == pParams->OutputFileMaxSize) ||
+ (M4PTO3GPP_kUNLIMITED == pParams->OutputFileMaxSize))) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output 3GPP file size");
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE;
+ }
+
+ /* Audio padding */
+ if (M4OSA_NULL != pParams->pInputAudioTrackFile) {
+ if ((!( (M4PTO3GPP_kAudioPaddingMode_None == pParams->AudioPaddingMode) ||
+ (M4PTO3GPP_kAudioPaddingMode_Silence== pParams->AudioPaddingMode) ||
+ (M4PTO3GPP_kAudioPaddingMode_Loop == pParams->AudioPaddingMode)))) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined audio padding");
+ return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING;
+ }
+ }
+
+ /**< Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
+ if ((M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat) &&
+ (M4VIDEOEDITING_kSQCIF != pParams->OutputVideoFrameSize) &&
+ (M4VIDEOEDITING_kQCIF != pParams->OutputVideoFrameSize) &&
+ (M4VIDEOEDITING_kCIF != pParams->OutputVideoFrameSize)) {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open():\
+ returning ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
+ return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
+ }
+
+ /**
+ * Check state automaton */
+ if (M4PTO3GPP_kState_CREATED != pC->m_State) {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * Copy the M4PTO3GPP_Params structure */
+ memcpy((void *)(&pC->m_Params),
+ (void *)pParams, sizeof(M4PTO3GPP_Params));
+ M4OSA_TRACE1_1("M4PTO3GPP_Open: outputVideoBitrate = %d", pC->m_Params.OutputVideoBitrate);
+
+ /***********************************/
+ /* Open input file with the reader */
+ /***********************************/
+ if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile) {
+ /**
+ * Get the reader interface according to the input audio file type */
+ switch(pC->m_Params.AudioFileFormat)
+ {
+#ifdef M4VSS_SUPPORT_READER_AMR
+ case M4VIDEOEDITING_kFileType_AMR:
+ err = M4READER_AMR_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+ &pC->m_pReaderDataInt);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_AMR_getInterfaces returns 0x%x", err);
+ return err;
+ }
+ break;
+#endif
+
+#ifdef AAC_SUPPORTED
+ case M4VIDEOEDITING_kFileType_3GPP:
+ err = M4READER_3GP_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+ &pC->m_pReaderDataInt);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_3GP_getInterfaces returns 0x%x", err);
+ return err;
+ }
+ break;
+#endif
+
+ default:
+ return ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE;
+ }
+
+ /**
+ * Initializes the reader shell */
+ err = pC->m_pReaderGlobInt->m_pFctCreate(&pC->m_pAudioReaderContext);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctCreate returns 0x%x", err);
+ return err;
+ }
+
+ pC->m_pReaderDataInt->m_readerContext = pC->m_pAudioReaderContext;
+ /**< Link the reader interface to the reader context */
+
+ /**
+ * Set the reader shell file access functions */
+ err = pC->m_pReaderGlobInt->m_pFctSetOption(pC->m_pAudioReaderContext,
+ M4READER_kOptionID_SetOsaFileReaderFctsPtr, (M4OSA_DataOption)pC->pOsalFileRead);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctSetOption returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Open the input audio file */
+ err = pC->m_pReaderGlobInt->m_pFctOpen(pC->m_pAudioReaderContext,
+ pC->m_Params.pInputAudioTrackFile);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctOpen returns 0x%x", err);
+ pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+ pC->m_pAudioReaderContext = M4OSA_NULL;
+ return err;
+ }
+
+ /**
+ * Get the audio streams from the input file */
+ err = M4NO_ERROR;
+ while (M4NO_ERROR == err)
+ {
+ err = pC->m_pReaderGlobInt->m_pFctGetNextStream(pC->m_pAudioReaderContext,
+ &mediaFamily, &pStreamHandler);
+
+ if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE)) ||
+ (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
+ {
+ err = M4NO_ERROR;
+ continue;
+ }
+
+ if (M4NO_ERROR == err) /**< One stream found */
+ {
+ /**< Found an audio stream */
+ if ((M4READER_kMediaFamilyAudio == mediaFamily)
+ && (M4OSA_NULL == pC->m_pReaderAudioStream))
+ {
+ pC->m_pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
+ /**< Keep pointer to the audio stream */
+ M4OSA_TRACE3_0("M4PTO3GPP_Open(): Found an audio stream in input");
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+ /**
+ * Allocate audio AU used for read operations */
+ pC->m_pReaderAudioAU = (M4_AccessUnit*)M4OSA_32bitAlignedMalloc(sizeof(M4_AccessUnit),
+ M4PTO3GPP,(M4OSA_Char *)"pReaderAudioAU");
+ if (M4OSA_NULL == pC->m_pReaderAudioAU)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Open(): unable to allocate pReaderAudioAU, \
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pC->m_pReaderGlobInt->m_pFctFillAuStruct(pC->m_pAudioReaderContext,
+ pStreamHandler, pC->m_pReaderAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+ pReaderGlobInt->m_pFctFillAuStruct(audio)returns 0x%x", err);
+ return err;
+ }
+ }
+ else
+ {
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ else if (M4WAR_NO_MORE_STREAM != err) /**< Unexpected error code */
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+ pReaderGlobInt->m_pFctGetNextStream returns 0x%x",
+ err);
+ return err;
+ }
+ } /* while*/
+ } /*if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)*/
+
+ pC->m_VideoState = M4PTO3GPP_kStreamState_STARTED;
+
+ /**
+ * Init the audio stream */
+ if (M4OSA_NULL != pC->m_pReaderAudioStream)
+ {
+ pC->m_AudioState = M4PTO3GPP_kStreamState_STARTED;
+ err = pC->m_pReaderGlobInt->m_pFctReset(pC->m_pAudioReaderContext,
+ (M4_StreamHandler*)pC->m_pReaderAudioStream);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderDataInt->m_pFctReset(audio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Update state automaton */
+ pC->m_State = M4PTO3GPP_kState_OPENED;
+
+ /**
+ * Get the max File size */
+ switch(pC->m_Params.OutputFileMaxSize)
+ {
+ case M4PTO3GPP_k50_KB: pC->m_MaxFileSize = 50000; break;
+ case M4PTO3GPP_k75_KB: pC->m_MaxFileSize = 75000; break;
+ case M4PTO3GPP_k100_KB: pC->m_MaxFileSize = 100000; break;
+ case M4PTO3GPP_k150_KB: pC->m_MaxFileSize = 150000; break;
+ case M4PTO3GPP_k200_KB: pC->m_MaxFileSize = 200000; break;
+ case M4PTO3GPP_k300_KB: pC->m_MaxFileSize = 300000; break;
+ case M4PTO3GPP_k400_KB: pC->m_MaxFileSize = 400000; break;
+ case M4PTO3GPP_k500_KB: pC->m_MaxFileSize = 500000; break;
+ case M4PTO3GPP_kUNLIMITED:
+ default: break;
+ }
+
+ M4OSA_TRACE3_0("M4PTO3GPP_Open(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+ * @brief Perform one step of trancoding.
+ * @note
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR No error
+ * @return M4ERR_PARAMETER pContext is M4OSA_NULL
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state for this function
+ * to be called
+ * @return M4PTO3GPP_WAR_END_OF_PROCESSING Encoding completed
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+ M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 l_uiAudioStepCount = 0;
+ M4OSA_Int32 JumpToTime = 0;
+ M4OSA_Time mtIncCts;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER,
+ "M4PTO3GPP_Step: pContext is M4OSA_NULL");
+
+ /**
+ * Check state automaton */
+ if ( !((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)) )
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+ return M4ERR_STATE;
+ }
+
+ /******************************************************************/
+ /**
+ * In case this is the first step, we prepare the decoder, the encoder and the writer */
+ if (M4PTO3GPP_kState_OPENED == pC->m_State)
+ {
+ M4OSA_TRACE2_0("M4PTO3GPP_Step(): This is the first step, \
+ calling M4PTO3GPP_Ready4Processing");
+
+ /**
+ * Prepare the reader, the decoder, the encoder, the writer... */
+ err = M4PTO3GPP_Ready4Processing(pC);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_Ready4Processing() returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Update state automaton */
+ pC->m_State = M4PTO3GPP_kState_READY;
+
+ M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (a)");
+ return M4NO_ERROR; /**< we only do that in the first step, \
+ first REAL step will be the next one */
+ }
+
+
+ /*
+ * Check if we reached the targeted file size.
+ * We do that before the encoding, because the core encoder has to know if this is
+ * the last frame to encode */
+ err = pC->m_pWriterGlobInt->pFctGetOption(pC->m_p3gpWriterContext,
+ M4WRITER_kFileSizeAudioEstimated, (M4OSA_DataOption) &pC->m_CurrentFileSize);
+ if ((0 != pC->m_MaxFileSize) &&
+ /**< Add a marge to the file size in order to never exceed the max file size */
+ ((pC->m_CurrentFileSize + M4PTO3GPP_MARGE_OF_FILE_SIZE) >= pC->m_MaxFileSize))
+ {
+ pC->m_IsLastPicture = M4OSA_TRUE;
+ }
+
+ /******************************************************************
+ * At that point we are in M4PTO3GPP_kState_READY state
+ * We perform one step of video encoding
+ ******************************************************************/
+
+ /************* VIDEO ENCODING ***************/
+ if (M4PTO3GPP_kStreamState_STARTED == pC->m_VideoState) /**<If the video encoding is going on*/
+ { /**
+ * Call the encoder */
+ pC->m_NbCurrentFrame++;
+
+ /* Check if it is the last frame the to encode */
+ if((pC->m_Params.NbVideoFrames > 0) \
+ && (pC->m_NbCurrentFrame >= pC->m_Params.NbVideoFrames))
+ {
+ pC->m_IsLastPicture = M4OSA_TRUE;
+ }
+
+ M4OSA_TRACE2_2("M4PTO3GPP_Step(): Calling pEncoderInt->pFctEncode with videoCts = %.2f\
+ nb = %lu", pC->m_mtCts, pC->m_NbCurrentFrame);
+
+ err = pC->m_pEncoderInt->pFctEncode(pC->m_pMp4EncoderContext, M4OSA_NULL,
+ /**< The input plane is null because the input Picture will be obtained by the\
+ VPP filter from the context */
+ pC->m_mtCts,
+ (pC->m_IsLastPicture ?
+ M4ENCODER_kLastFrame : M4ENCODER_kNormalFrame) );
+ /**< Last param set to M4OSA_TRUE signals that this is the last frame to be encoded,\
+ M4OSA_FALSE else */
+
+ M4OSA_TRACE3_2("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns 0x%x, vidFormat =0x%x",
+ err, pC->m_Params.OutputVideoFormat);
+ if((M4NO_ERROR == err) && (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+ {
+ /* Check if last frame.*
+ * */
+ if(M4OSA_TRUE == pC->m_IsLastPicture)
+ {
+ M4OSA_TRACE3_0("M4PTO3GPP_Step(): Last picture");
+ pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+ }
+
+ }
+
+ if (M4WAR_NO_MORE_AU == err) /**< The video encoding is finished */
+ {
+ M4OSA_TRACE3_0("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns M4WAR_NO_MORE_AU");
+ pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+ }
+ else if (M4NO_ERROR != err) /**< Unexpected error code */
+ {
+ if( (((M4OSA_UInt32)M4WAR_WRITER_STOP_REQ) == err) ||
+ (((M4OSA_UInt32)M4ERR_ALLOC) == err) )
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Step: returning ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR");
+ return ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR;
+ }
+ else
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): pEncoderInt->pFctEncode(last) (a) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ } /**< End of video encoding */
+
+
+ /****** AUDIO TRANSCODING (read + null encoding + write) ******/
+ if (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState)
+ {
+ while ( (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState) &&
+ (pC->m_mtAudioCts < pC->m_mtNextCts))
+
+ {
+ l_uiAudioStepCount++;
+ if (M4OSA_FALSE == pC->m_bAudioPaddingSilence)
+ {
+ /**< Read the next audio AU in the input Audio file */
+ err = pC->m_pReaderDataInt->m_pFctGetNextAu(pC->m_pAudioReaderContext,
+ (M4_StreamHandler*)pC->m_pReaderAudioStream, pC->m_pReaderAudioAU);
+ pC->m_mtAudioCts = pC->m_pReaderAudioAU->m_CTS + pC->m_AudioOffSet;
+
+ if (M4WAR_NO_MORE_AU == err) /* The audio transcoding is finished */
+ {
+ M4OSA_TRACE2_0("M4PTO3GPP_Step():\
+ pReaderDataInt->m_pFctGetNextAu(audio) returns \
+ M4WAR_NO_MORE_AU");
+ switch(pC->m_Params.AudioPaddingMode)
+ {
+ case M4PTO3GPP_kAudioPaddingMode_None:
+
+ pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+ break;
+
+ case M4PTO3GPP_kAudioPaddingMode_Silence:
+
+ if (M4DA_StreamTypeAudioAmrNarrowBand
+ != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+ /**< Do nothing if the input audio file format is not AMR */
+ {
+ pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+ }
+ else
+ {
+ pC->m_bAudioPaddingSilence = M4OSA_TRUE;
+ }
+ break;
+
+ case M4PTO3GPP_kAudioPaddingMode_Loop:
+
+ /**< Jump to the beginning of the audio file */
+ err = pC->m_pReaderGlobInt->m_pFctJump(pC->m_pAudioReaderContext,
+ (M4_StreamHandler*)pC->m_pReaderAudioStream, &JumpToTime);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+ pReaderDataInt->m_pFctReset(audio returns 0x%x",
+ err);
+ return err;
+ }
+
+ if (M4DA_StreamTypeAudioAmrNarrowBand
+ == pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+ {
+ pC->m_mtAudioCts += 20; /*< SEMC bug fixed at Lund */
+ pC->m_AudioOffSet = pC->m_mtAudioCts;
+
+ /**
+ * 'BZZZ' bug fix:
+ * add a silence frame */
+ mtIncCts = (M4OSA_Time)((pC->m_mtAudioCts) *
+ (pC->m_pWriterAudioStream->timeScale / 1000.0));
+ err = M4PTO3GPP_writeAmrSilence122Frame(pC->m_pWriterDataInt,
+ pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+ M4PTO3GPP_AddAmrSilenceSid returns 0x%x", err);
+ return err;
+ }/**< Add => no audio cts increment...*/
+ }
+ else
+ {
+ pC->m_AudioOffSet = pC->m_mtAudioCts + pC->m_DeltaAudioCts;
+ }
+ break;
+ } /* end of: switch */
+ }
+ else if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): pReaderDataInt->m_pFctGetNextAu(Audio)\
+ returns 0x%x", err);
+ return err;
+ }
+ else
+ {
+ /**
+ * Save the delta Cts (AAC only) */
+ pC->m_DeltaAudioCts = pC->m_pReaderAudioAU->m_CTS - pC->m_PrevAudioCts;
+ pC->m_PrevAudioCts = pC->m_pReaderAudioAU->m_CTS;
+
+ /**
+ * Prepare the writer AU */
+ err = pC->m_pWriterDataInt->pStartAU(pC->m_p3gpWriterContext, 1,
+ &pC->m_WriterAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pStartAU(Audio)\
+ returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Copy audio data from reader AU to writer AU */
+ M4OSA_TRACE2_1("M4PTO3GPP_Step(): Copying audio AU: size=%d",
+ pC->m_pReaderAudioAU->m_size);
+ memcpy((void *)pC->m_WriterAudioAU.dataAddress,
+ (void *)pC->m_pReaderAudioAU->m_dataAddress,
+ pC->m_pReaderAudioAU->m_size);
+ pC->m_WriterAudioAU.size = pC->m_pReaderAudioAU->m_size;
+
+ /**
+ * Convert CTS unit from milliseconds to timescale */
+ if (M4DA_StreamTypeAudioAmrNarrowBand
+ != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+ {
+ pC->m_WriterAudioAU.CTS = (M4OSA_Time)
+ ((pC->m_AudioOffSet + pC->m_pReaderAudioAU->m_CTS)
+ * pC->m_pWriterAudioStream->timeScale / 1000.0);
+ }
+ else
+ {
+ pC->m_WriterAudioAU.CTS = (M4OSA_Time)(pC->m_mtAudioCts *
+ (pC->m_pWriterAudioStream->timeScale / 1000.0));
+ }
+ pC->m_WriterAudioAU.nbFrag = 0;
+ M4OSA_TRACE2_1("M4PTO3GPP_Step(): audio AU: CTS=%d ms", pC->m_mtAudioCts
+ /*pC->m_pReaderAudioAU->m_CTS*/);
+
+ /**
+ * Write it to the output file */
+ err = pC->m_pWriterDataInt->pProcessAU(pC->m_p3gpWriterContext, 1,
+ &pC->m_WriterAudioAU);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pProcessAU(Audio)\
+ returns 0x%x", err);
+ return err;
+ }
+ }
+ }
+ else /**< M4OSA_TRUE == pC->m_bAudioPaddingSilence */
+ {
+ if (M4DA_StreamTypeAudioAmrNarrowBand ==
+ pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+ {
+ /**
+ * Fill in audio au with silence */
+ pC->m_mtAudioCts += 20;
+
+ /**
+ * Padd with silence */
+ mtIncCts = (M4OSA_Time)(pC->m_mtAudioCts
+ * (pC->m_pWriterAudioStream->timeScale / 1000.0));
+ err = M4PTO3GPP_writeAmrSilence048Frame(pC->m_pWriterDataInt,
+ pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_AddAmrSilenceSid returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else /**< Do nothing if the input audio file format is not AMR */
+ {
+ pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+ }
+
+ }
+ } /**< while */
+ } /**< End of audio encoding */
+
+ pC->m_mtCts = pC->m_mtNextCts;
+
+ /**
+ * The transcoding is finished when no stream is being encoded anymore */
+ if (M4PTO3GPP_kStreamState_FINISHED == pC->m_VideoState)
+ {
+ pC->m_State = M4PTO3GPP_kState_FINISHED;
+ M4OSA_TRACE2_0("M4PTO3GPP_Step(): transcoding finished, returning M4WAR_NO_MORE_AU");
+ return M4PTO3GPP_WAR_END_OF_PROCESSING;
+ }
+
+ M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (b)");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+ * @brief Finish the M4PTO3GPP transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return M4ERR_STATE: M4PTO3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+ M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+ M4OSA_ERR osaErr = M4NO_ERROR;
+ M4OSA_UInt32 lastCTS;
+ M4ENCODER_Header* encHeader;
+ M4SYS_StreamIDmemAddr streamHeader;
+
+ M4OSA_TRACE3_1("M4PTO3GPP_Close called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER, "M4PTO3GPP_Close:\
+ pContext is M4OSA_NULL");
+
+ /* Check state automaton */
+ if ((pC->m_State != M4PTO3GPP_kState_OPENED) &&
+ (pC->m_State != M4PTO3GPP_kState_READY) &&
+ (pC->m_State != M4PTO3GPP_kState_FINISHED))
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Close(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+ return M4ERR_STATE;
+ }
+
+ /*************************************/
+ /******** Finish the encoding ********/
+ /*************************************/
+ if (M4PTO3GPP_kState_READY == pC->m_State)
+ {
+ pC->m_State = M4PTO3GPP_kState_FINISHED;
+ }
+
+ if (M4PTO3GPP_kEncoderRunning == pC->m_eEncoderState)
+ {
+ if (pC->m_pEncoderInt->pFctStop != M4OSA_NULL)
+ {
+ osaErr = pC->m_pEncoderInt->pFctStop(pC->m_pMp4EncoderContext);
+ if (M4NO_ERROR != osaErr)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctStop returns 0x%x", osaErr);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+ }
+
+ pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+ }
+
+ /* Has the encoder actually been opened? Don't close it if that's not the case. */
+ if (M4PTO3GPP_kEncoderStopped == pC->m_eEncoderState)
+ {
+ osaErr = pC->m_pEncoderInt->pFctClose(pC->m_pMp4EncoderContext);
+ if (M4NO_ERROR != osaErr)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctClose returns 0x%x", osaErr);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+
+ pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+ }
+
+ /*******************************/
+ /******** Close 3GP out ********/
+ /*******************************/
+
+ if (M4OSA_NULL != pC->m_p3gpWriterContext) /* happens in state _SET */
+ {
+ /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
+ closing it. */
+ if ((M4VIDEOEDITING_kMPEG4 == pC->m_Params.OutputVideoFormat)
+ || (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+ {
+ osaErr = pC->m_pEncoderInt->pFctGetOption(pC->m_pMp4EncoderContext,
+ M4ENCODER_kOptionID_EncoderHeader,
+ (M4OSA_DataOption)&encHeader);
+ if ( (M4NO_ERROR != osaErr) || (M4OSA_NULL == encHeader->pBuf) )
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_close: failed to get the encoder header (err 0x%x)",
+ osaErr);
+ /**< no return here, we still have stuff to deallocate after close, even if \
+ it fails. */
+ }
+ else
+ {
+ /* set this header in the writer */
+ streamHeader.streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+ streamHeader.size = encHeader->Size;
+ streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
+ osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ M4WRITER_kDSI, &streamHeader);
+ if (M4NO_ERROR != osaErr)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_close: failed to set the DSI in the writer \
+ (err 0x%x) ", osaErr);
+ }
+ }
+ }
+
+ /* Update last Video CTS */
+ lastCTS = (M4OSA_UInt32)pC->m_mtCts;
+
+ osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+ if (M4NO_ERROR != osaErr)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Close: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+ osaErr);
+ }
+
+ /* Write and close the 3GP output file */
+ osaErr = pC->m_pWriterGlobInt->pFctCloseWrite(pC->m_p3gpWriterContext);
+ if (M4NO_ERROR != osaErr)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Close: pWriterGlobInt->pFctCloseWrite returns 0x%x", osaErr);
+ /**< don't return yet, we have to close other things */
+ }
+ pC->m_p3gpWriterContext = M4OSA_NULL;
+ }
+
+ /**
+ * State transition */
+ pC->m_State = M4PTO3GPP_kState_CLOSED;
+
+ M4OSA_TRACE3_1("M4PTO3GPP_Close(): returning 0x%x", osaErr);
+ return osaErr;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+ * @brief Free all resources used by the M4PTO3GPP.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) M4PTO3GPP context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+ M4OSA_TRACE3_1("M4PTO3GPP_CleanUp called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pContext),M4ERR_PARAMETER, "M4PTO3GPP_CleanUp: pContext \
+ is M4OSA_NULL");
+
+ /**
+ * First call Close, if needed, to clean the video encoder */
+
+ if ((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)
+ || (M4PTO3GPP_kState_FINISHED == pC->m_State))
+ {
+ err = M4PTO3GPP_Close(pContext);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: M4PTO3GPP_Close returns 0x%x", err);
+ /**< don't return, we have to free other components */
+ }
+ }
+
+ /**
+ * Free Audio reader stuff, if needed */
+
+ if (M4OSA_NULL != pC->m_pAudioReaderContext) /**< may be M4OSA_NULL if M4PTO3GPP_Open was not\
+ called */
+ {
+
+ err = pC->m_pReaderGlobInt->m_pFctClose(pC->m_pAudioReaderContext);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctClose returns 0x%x", err);
+ /**< don't return, we have to free other components */
+ }
+ err = pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+ pC->m_pAudioReaderContext = M4OSA_NULL;
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctDestroy returns 0x%x", err);
+ /**< don't return, we have to free other components */
+ }
+ }
+
+ if (M4OSA_NULL != pC->m_pReaderAudioAU)
+ {
+ free(pC->m_pReaderAudioAU);
+ pC->m_pReaderAudioAU = M4OSA_NULL;
+ }
+
+ /**
+ * Free video encoder stuff, if needed */
+ if (M4OSA_NULL != pC->m_pMp4EncoderContext)
+ {
+ err = pC->m_pEncoderInt->pFctCleanup(pC->m_pMp4EncoderContext);
+ pC->m_pMp4EncoderContext = M4OSA_NULL;
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pEncoderInt->pFctDestroy returns 0x%x", err);
+ /**< don't return, we have to free other components */
+ }
+ }
+
+ if (M4OSA_NULL != pC->m_pWriterVideoStream)
+ {
+ free(pC->m_pWriterVideoStream);
+ pC->m_pWriterVideoStream = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->m_pWriterAudioStream)
+ {
+ free(pC->m_pWriterAudioStream);
+ pC->m_pWriterAudioStream = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->m_pWriterVideoStreamInfo)
+ {
+ free(pC->m_pWriterVideoStreamInfo);
+ pC->m_pWriterVideoStreamInfo = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->m_pWriterAudioStreamInfo)
+ {
+ free(pC->m_pWriterAudioStreamInfo);
+ pC->m_pWriterAudioStreamInfo = M4OSA_NULL;
+ }
+
+
+ /**
+ * Free the shells interfaces */
+ if (M4OSA_NULL != pC->m_pReaderGlobInt)
+ {
+ free(pC->m_pReaderGlobInt);
+ pC->m_pReaderGlobInt = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->m_pReaderDataInt)
+ {
+ free(pC->m_pReaderDataInt);
+ pC->m_pReaderDataInt = M4OSA_NULL;
+ }
+
+ if(M4OSA_NULL != pC->m_pEncoderInt)
+ {
+ free(pC->m_pEncoderInt);
+ pC->m_pEncoderInt = M4OSA_NULL;
+ }
+ if(M4OSA_NULL != pC->m_pWriterGlobInt)
+ {
+ free(pC->m_pWriterGlobInt);
+ pC->m_pWriterGlobInt = M4OSA_NULL;
+ }
+ if(M4OSA_NULL != pC->m_pWriterDataInt)
+ {
+ free(pC->m_pWriterDataInt);
+ pC->m_pWriterDataInt = M4OSA_NULL;
+ }
+ /**< Do not free pC->pOsaMemoryPtrFct and pC->pOsaMemoryPtrFct, because it's owned by the \
+ application */
+
+ /**
+ * Free the context itself */
+ free(pC);
+ pC = M4OSA_NULL;
+
+ M4OSA_TRACE3_0("M4PTO3GPP_CleanUp(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/********************* INTERNAL FUNCTIONS *********************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+ * @brief Prepare all resources and interfaces for the transcoding.
+ * @note It is called by the first M4OSA_Step() call
+ * @param pC (IN) M4PTO3GPP private context
+ * @return M4NO_ERROR: No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+*/
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC)
+/******************************************************/
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4WRITER_OutputFileType outputFileType;
+ M4OSA_UInt32 uiVersion;
+ M4ENCODER_Format encFormat;
+ M4ENCODER_AdvancedParams EncParams; /**< Encoder advanced parameters */
+ M4SYS_StreamIDValue optionValue;
+
+ M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing called with pC=0x%x", pC);
+
+ /******************************/
+ /******************************/
+
+ /********************************************/
+ /******** ********/
+ /******** Video Encoder Parames init ********/
+ /******** ********/
+ /********************************************/
+
+ /**
+ * Get the correct encoder interface */
+ switch(pC->m_Params.OutputVideoFormat)
+ {
+ case M4VIDEOEDITING_kMPEG4:
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ err = VideoEditorVideoEncoder_getInterface_MPEG4(&encFormat, &pC->m_pEncoderInt,
+ M4ENCODER_OPEN_ADVANCED);
+#else /* software MPEG4 encoder not available! */
+ M4OSA_TRACE1_0("No MPEG4 encoder available! Did you forget to register one?");
+ err = M4ERR_STATE;
+#endif /* software MPEG4 encoder available? */
+ break;
+ case M4VIDEOEDITING_kH263:
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ err = VideoEditorVideoEncoder_getInterface_H263(&encFormat, &pC->m_pEncoderInt,
+ M4ENCODER_OPEN_ADVANCED);
+#else /* software H263 encoder not available! */
+ M4OSA_TRACE1_0("No H263 encoder available! Did you forget to register one?");
+ err = M4ERR_STATE;
+#endif /* software H263 encoder available? */
+ break;
+ case M4VIDEOEDITING_kH264:
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+ err = VideoEditorVideoEncoder_getInterface_H264(&encFormat, &pC->m_pEncoderInt,
+ M4ENCODER_OPEN_ADVANCED);
+#else /* software H264 encoder not available! */
+ M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No H264 encoder available!\
+ Did you forget to register one?");
+ err = M4ERR_STATE;
+#endif /* software H264 encoder available? */
+ break;
+ default:
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pC->m_Params.OutputVideoFormat);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("switch(pC->m_Params.OutputVideoFormat): getInterfaces returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Fill encoder parameters according to M4PTO3GPP settings */
+
+ /**
+ * Video frame size */
+ switch(pC->m_Params.OutputVideoFrameSize)
+ {
+ case M4VIDEOEDITING_kSQCIF :
+ EncParams.FrameHeight = M4ENCODER_SQCIF_Height;
+ EncParams.FrameWidth = M4ENCODER_SQCIF_Width;
+ break;
+ case M4VIDEOEDITING_kQQVGA :
+ EncParams.FrameHeight = M4ENCODER_QQVGA_Height;
+ EncParams.FrameWidth = M4ENCODER_QQVGA_Width;
+ break;
+ case M4VIDEOEDITING_kQCIF :
+ EncParams.FrameHeight = M4ENCODER_QCIF_Height;
+ EncParams.FrameWidth = M4ENCODER_QCIF_Width;
+ break;
+ case M4VIDEOEDITING_kQVGA :
+ EncParams.FrameHeight = M4ENCODER_QVGA_Height;
+ EncParams.FrameWidth = M4ENCODER_QVGA_Width;
+ break;
+ case M4VIDEOEDITING_kCIF :
+ EncParams.FrameHeight = M4ENCODER_CIF_Height;
+ EncParams.FrameWidth = M4ENCODER_CIF_Width;
+ break;
+ case M4VIDEOEDITING_kVGA :
+ EncParams.FrameHeight = M4ENCODER_VGA_Height;
+ EncParams.FrameWidth = M4ENCODER_VGA_Width;
+ break;
+/* +PR LV5807 */
+ case M4VIDEOEDITING_kWVGA :
+ EncParams.FrameHeight = M4ENCODER_WVGA_Height;
+ EncParams.FrameWidth = M4ENCODER_WVGA_Width;
+ break;
+ case M4VIDEOEDITING_kNTSC:
+ EncParams.FrameHeight = M4ENCODER_NTSC_Height;
+ EncParams.FrameWidth = M4ENCODER_NTSC_Width;
+ break;
+/* -PR LV5807 */
+/* +CR Google */
+ case M4VIDEOEDITING_k640_360:
+ EncParams.FrameHeight = M4ENCODER_640_360_Height;
+ EncParams.FrameWidth = M4ENCODER_640_360_Width;
+ break;
+
+ case M4VIDEOEDITING_k854_480:
+ EncParams.FrameHeight = M4ENCODER_854_480_Height;
+ EncParams.FrameWidth = M4ENCODER_854_480_Width;
+ break;
+
+ case M4VIDEOEDITING_k1280_720:
+ EncParams.FrameHeight = M4ENCODER_1280_720_Height;
+ EncParams.FrameWidth = M4ENCODER_1280_720_Width;
+ break;
+
+ case M4VIDEOEDITING_k1080_720:
+ EncParams.FrameHeight = M4ENCODER_1080_720_Height;
+ EncParams.FrameWidth = M4ENCODER_1080_720_Width;
+ break;
+
+ case M4VIDEOEDITING_k960_720:
+ EncParams.FrameHeight = M4ENCODER_960_720_Height;
+ EncParams.FrameWidth = M4ENCODER_960_720_Width;
+ break;
+
+ case M4VIDEOEDITING_k1920_1080:
+ EncParams.FrameHeight = M4ENCODER_1920_1080_Height;
+ EncParams.FrameWidth = M4ENCODER_1920_1080_Width;
+ break;
+/* -CR Google */
+ default :
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
+ pC->m_Params.OutputVideoFrameSize);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+ }
+
+ EncParams.InputFormat = M4ENCODER_kIYUV420;
+
+ /**
+ * Video bitrate */
+ switch(pC->m_Params.OutputVideoBitrate)
+ {
+ case M4VIDEOEDITING_k16_KBPS:
+ case M4VIDEOEDITING_k24_KBPS:
+ case M4VIDEOEDITING_k32_KBPS:
+ case M4VIDEOEDITING_k48_KBPS:
+ case M4VIDEOEDITING_k64_KBPS:
+ case M4VIDEOEDITING_k96_KBPS:
+ case M4VIDEOEDITING_k128_KBPS:
+ case M4VIDEOEDITING_k192_KBPS:
+ case M4VIDEOEDITING_k256_KBPS:
+ case M4VIDEOEDITING_k288_KBPS:
+ case M4VIDEOEDITING_k384_KBPS:
+ case M4VIDEOEDITING_k512_KBPS:
+ case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+ case M4VIDEOEDITING_k2_MBPS:
+ case M4VIDEOEDITING_k5_MBPS:
+ case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+ EncParams.Bitrate = pC->m_Params.OutputVideoBitrate;
+ break;
+
+ case M4VIDEOEDITING_kVARIABLE_KBPS:
+/*+ New Encoder bitrates */
+ EncParams.Bitrate = M4VIDEOEDITING_k8_MBPS;
+/*- New Encoder bitrates */
+ break;
+
+ default :
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+ pC->m_Params.OutputVideoBitrate);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+ }
+
+ /**
+ * Video format */
+ switch(pC->m_Params.OutputVideoFormat)
+ {
+ case M4VIDEOEDITING_kMPEG4 :
+ EncParams.Format = M4ENCODER_kMPEG4;
+ break;
+ case M4VIDEOEDITING_kH263 :
+ EncParams.Format = M4ENCODER_kH263;
+ break;
+ case M4VIDEOEDITING_kH264:
+ EncParams.Format = M4ENCODER_kH264;
+ break;
+ default :
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pC->m_Params.OutputVideoFormat);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+
+ /**
+ * Video frame rate (set it to max = 30 fps) */
+ EncParams.uiTimeScale = 30;
+ EncParams.uiRateFactor = 1;
+
+ EncParams.FrameRate = M4ENCODER_k30_FPS;
+
+
+ /******************************/
+ /******** 3GP out init ********/
+ /******************************/
+
+ /* Get the 3GPP writer interface */
+ err = M4WRITER_3GP_getInterfaces(&outputFileType, &pC->m_pWriterGlobInt, &pC->m_pWriterDataInt);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4WRITER_3GP_getInterfaces: M4WRITER_3GP_getInterfaces returns 0x%x", err);
+ return err;
+ }
+
+ /* Init the 3GPP writer */
+ err = pC->m_pWriterGlobInt->pFctOpen(&pC->m_p3gpWriterContext, pC->m_Params.pOutput3gppFile,
+ pC->pOsalFileWrite, pC->m_Params.pTemporaryFile, pC->pOsalFileRead);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctOpen returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Link to the writer context in the writer interface */
+ pC->m_pWriterDataInt->pWriterContext = pC->m_p3gpWriterContext;
+
+ /**
+ * Set the product description string in the written file */
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedString,
+ (M4OSA_DataOption)M4PTO3GPP_SIGNATURE);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+ pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set the product version in the written file */
+ uiVersion = M4VIDEOEDITING_VERSION_MAJOR*100 + M4VIDEOEDITING_VERSION_MINOR*10
+ + M4VIDEOEDITING_VERSION_REVISION;
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedVersion,
+ (M4OSA_DataOption)&uiVersion);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+ pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Allocate and fill the video stream structures for the writer */
+ pC->m_pWriterVideoStream =
+ (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+ (M4OSA_Char *)"pWriterVideoStream");
+ if (M4OSA_NULL == pC->m_pWriterVideoStream)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStream, \
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->m_pWriterVideoStreamInfo =
+ (M4WRITER_StreamVideoInfos*)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_StreamVideoInfos), M4PTO3GPP,
+ (M4OSA_Char *)"pWriterVideoStreamInfo");
+ if (M4OSA_NULL == pC->m_pWriterVideoStreamInfo)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStreamInfo,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Fill Video properties structure for the AddStream method */
+ pC->m_pWriterVideoStreamInfo->height = EncParams.FrameHeight;
+ pC->m_pWriterVideoStreamInfo->width = EncParams.FrameWidth;
+ pC->m_pWriterVideoStreamInfo->fps = 0; /**< Not used by the core writer */
+ pC->m_pWriterVideoStreamInfo->Header.pBuf = M4OSA_NULL;
+ /** No header, will be set by setOption */
+ pC->m_pWriterVideoStreamInfo->Header.Size = 0;
+
+ /**
+ * Fill Video stream description structure for the AddStream method */
+ pC->m_pWriterVideoStream->streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+
+ /**
+ * Video format */
+ switch(pC->m_Params.OutputVideoFormat)
+ {
+ case M4VIDEOEDITING_kMPEG4:
+ pC->m_pWriterVideoStream->streamType = M4SYS_kMPEG_4; break;
+ case M4VIDEOEDITING_kH263:
+ pC->m_pWriterVideoStream->streamType = M4SYS_kH263; break;
+ case M4VIDEOEDITING_kH264:
+ pC->m_pWriterVideoStream->streamType = M4SYS_kH264; break;
+ default :
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+ pC->m_Params.OutputVideoFormat);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+ }
+
+ /**
+ * Video bitrate */
+ switch(pC->m_Params.OutputVideoBitrate)
+ {
+ case M4VIDEOEDITING_k16_KBPS:
+ case M4VIDEOEDITING_k24_KBPS:
+ case M4VIDEOEDITING_k32_KBPS:
+ case M4VIDEOEDITING_k48_KBPS:
+ case M4VIDEOEDITING_k64_KBPS:
+ case M4VIDEOEDITING_k96_KBPS:
+ case M4VIDEOEDITING_k128_KBPS:
+ case M4VIDEOEDITING_k192_KBPS:
+ case M4VIDEOEDITING_k256_KBPS:
+ case M4VIDEOEDITING_k288_KBPS:
+ case M4VIDEOEDITING_k384_KBPS:
+ case M4VIDEOEDITING_k512_KBPS:
+ case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+ case M4VIDEOEDITING_k2_MBPS:
+ case M4VIDEOEDITING_k5_MBPS:
+ case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+ pC->m_pWriterVideoStream->averageBitrate = pC->m_Params.OutputVideoBitrate;
+ break;
+
+ case M4VIDEOEDITING_kVARIABLE_KBPS :
+ pC->m_pWriterVideoStream->averageBitrate = 0;
+ break;
+
+ default :
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+ ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+ pC->m_Params.OutputVideoBitrate);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+ }
+
+ pC->m_pWriterVideoStream->duration = 0; /**< Duration is not known */
+ pC->m_pWriterVideoStream->timeScale = 0; /**< Not used by the core writer */
+ pC->m_pWriterVideoStream->maxBitrate = pC->m_pWriterVideoStream->averageBitrate;
+ pC->m_pWriterVideoStream->profileLevel = 0; /**< Not used by the core writer */
+ pC->m_pWriterVideoStream->decoderSpecificInfo = (M4OSA_MemAddr32)
+ (pC->m_pWriterVideoStreamInfo);
+ pC->m_pWriterVideoStream->decoderSpecificInfoSize = sizeof(M4WRITER_StreamVideoInfos);
+
+ /**
+ * Update AU properties for video stream */
+ pC->m_WriterVideoAU.CTS = pC->m_WriterVideoAU.DTS = 0; /** Reset time */
+ pC->m_WriterVideoAU.size = 0;
+ pC->m_WriterVideoAU.frag = M4OSA_NULL;
+ pC->m_WriterVideoAU.nbFrag = 0; /** No fragment */
+ pC->m_WriterVideoAU.stream = pC->m_pWriterVideoStream;
+ pC->m_WriterVideoAU.attribute = AU_RAP;
+ pC->m_WriterVideoAU.dataAddress = M4OSA_NULL;
+
+ /**
+ * If there is an audio input, allocate and fill the audio stream structures for the writer */
+ if(M4OSA_NULL != pC->m_pReaderAudioStream)
+ {
+ pC->m_pWriterAudioStream =
+ (M4SYS_StreamDescription*)M4OSA_32bitAlignedMalloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+ (M4OSA_Char *)"pWriterAudioStream");
+ if (M4OSA_NULL == pC->m_pWriterAudioStream)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterAudioStream, \
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->m_pWriterAudioStreamInfo =
+ (M4WRITER_StreamAudioInfos*)M4OSA_32bitAlignedMalloc(sizeof(M4WRITER_StreamAudioInfos), M4PTO3GPP,
+ (M4OSA_Char *)"pWriterAudioStreamInfo");
+ if (M4OSA_NULL == pC->m_pWriterAudioStreamInfo)
+ {
+ M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate \
+ pWriterAudioStreamInfo, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ pC->m_pWriterAudioStreamInfo->nbSamplesPerSec = 0; /**< unused by our shell writer */
+ pC->m_pWriterAudioStreamInfo->nbBitsPerSample = 0; /**< unused by our shell writer */
+ pC->m_pWriterAudioStreamInfo->nbChannels = 1; /**< unused by our shell writer */
+
+ if( (M4OSA_NULL != pC->m_pReaderAudioStream) && /* audio could have been discarded */
+ (M4OSA_NULL != pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo) )
+ {
+ /* If we copy the stream from the input, we copy its DSI */
+ pC->m_pWriterAudioStreamInfo->Header.Size =
+ pC->m_pReaderAudioStream->m_basicProperties.m_decoderSpecificInfoSize;
+ pC->m_pWriterAudioStreamInfo->Header.pBuf =
+ (M4OSA_MemAddr8)pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo;
+ }
+ else
+ {
+ /* Writer will put a default DSI */
+ pC->m_pWriterAudioStreamInfo->Header.Size = 0;
+ pC->m_pWriterAudioStreamInfo->Header.pBuf = M4OSA_NULL;
+ }
+
+ /**
+ * Add the audio stream */
+ switch (pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+ pC->m_pWriterAudioStream->streamType = M4SYS_kAMR;
+ break;
+ case M4DA_StreamTypeAudioAac:
+ pC->m_pWriterAudioStream->streamType = M4SYS_kAAC;
+ break;
+ case M4DA_StreamTypeAudioEvrc:
+ pC->m_pWriterAudioStream->streamType = M4SYS_kEVRC;
+ break;
+ default:
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unhandled audio format (0x%x),\
+ returning ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+ pC->m_pReaderAudioStream->m_basicProperties.m_streamType);
+ return ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+ }
+
+ /*
+ * Fill Audio stream description structure for the AddStream method */
+ pC->m_pWriterAudioStream->streamID = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+ pC->m_pWriterAudioStream->duration = 0;/**< Duration is not known yet */
+ pC->m_pWriterAudioStream->timeScale = M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE;
+ pC->m_pWriterAudioStream->profileLevel = M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL;
+ pC->m_pWriterAudioStream->averageBitrate =
+ pC->m_pReaderAudioStream->m_basicProperties.m_averageBitRate;
+ pC->m_pWriterAudioStream->maxBitrate =
+ pC->m_pWriterAudioStream->averageBitrate;
+
+ /**
+ * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos \
+ in the DSI pointer... */
+ pC->m_pWriterAudioStream->decoderSpecificInfo =
+ (M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo;
+
+ /**
+ * Update AU properties for audio stream */
+ pC->m_WriterAudioAU.CTS = pC->m_WriterAudioAU.DTS = 0; /** Reset time */
+ pC->m_WriterAudioAU.size = 0;
+ pC->m_WriterAudioAU.frag = M4OSA_NULL;
+ pC->m_WriterAudioAU.nbFrag = 0; /** No fragment */
+ pC->m_WriterAudioAU.stream = pC->m_pWriterAudioStream;
+ pC->m_WriterAudioAU.attribute = AU_RAP;
+ pC->m_WriterAudioAU.dataAddress = M4OSA_NULL;
+ }
+
+ /************************************/
+ /******** Video Encoder Init ********/
+ /************************************/
+
+ /**
+ * PTO uses its own bitrate regulation, not the "true" core regulation */
+ EncParams.bInternalRegulation = M4OSA_TRUE; //M4OSA_FALSE;
+ EncParams.uiStartingQuantizerValue = M4PTO3GPP_QUANTIZER_STEP;
+
+ EncParams.videoProfile = pC->m_Params.videoProfile;
+ EncParams.videoLevel = pC->m_Params.videoLevel;
+
+ /**
+ * Other encoder settings */
+
+ EncParams.uiHorizontalSearchRange = 0; /* use default */
+ EncParams.uiVerticalSearchRange = 0; /* use default */
+ EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+ EncParams.uiIVopPeriod = 15; /* use default */
+ EncParams.uiMotionEstimationTools = 0; /* M4V_MOTION_EST_TOOLS_ALL */
+ EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */
+ EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+
+ /**
+ * Create video encoder */
+ err = pC->m_pEncoderInt->pFctInit(&pC->m_pMp4EncoderContext, pC->m_pWriterDataInt,
+ M4PTO3GPP_applyVPP, pC, pC->m_pEncoderExternalAPI,
+ pC->m_pEncoderUserData);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctInit returns 0x%x", err);
+ return err;
+ }
+
+ pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+
+ err = pC->m_pEncoderInt->pFctOpen(pC->m_pMp4EncoderContext, &pC->m_WriterVideoAU, &EncParams);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctOpen returns 0x%x", err);
+ return err;
+ }
+
+ pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+
+ if (M4OSA_NULL != pC->m_pEncoderInt->pFctStart)
+ {
+ err = pC->m_pEncoderInt->pFctStart(pC->m_pMp4EncoderContext);
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctStart returns 0x%x", err);
+ return err;
+ }
+ }
+
+ pC->m_eEncoderState = M4PTO3GPP_kEncoderRunning;
+
+ /**
+ * No more setoption on "M4ENCODER_kVideoFragmentSize" here.
+ * It is now automaticly and "smartly" set in the encoder shell. */
+
+ /**************************************/
+ /******** 3GP out add streams ********/
+ /**************************************/
+
+ err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterVideoStream);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(video) returns\
+ 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set video max au size */
+ optionValue.streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+ optionValue.value = (M4OSA_UInt32)(1.5F * (M4OSA_Float)(pC->m_pWriterVideoStreamInfo->width
+ * pC->m_pWriterVideoStreamInfo->height)
+ * M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+ M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxAUSize: %u",optionValue.value);
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+ M4WRITER_kMaxAUSize) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set video max chunck size */
+ optionValue.value = (M4OSA_UInt32)((M4OSA_Float)optionValue.value
+ * M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO);
+ M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxChunckSize: %u",optionValue.value);
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+ M4WRITER_kMaxChunckSize) returns 0x%x", err);
+ return err;
+ }
+
+ if (M4OSA_NULL != pC->m_pReaderAudioStream)
+ {
+ err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterAudioStream);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(audio) \
+ returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set audio max au size */
+ optionValue.value = M4PTO3GPP_AUDIO_MAX_AU_SIZE;
+ optionValue.streamID = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+ M4WRITER_kMaxAUSize) returns 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Set audio max chunck size */
+ optionValue.value = M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE; /**< Magical */
+ err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+ M4WRITER_kMaxChunckSize) returns 0x%x", err);
+ return err;
+ }
+ }
+
+ /*
+ * Close the stream registering in order to be ready to write data */
+ err = pC->m_pWriterGlobInt->pFctStartWriting(pC->m_p3gpWriterContext);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctStartWriting returns 0x%x",
+ err);
+ return err;
+ }
+
+
+ M4OSA_TRACE3_0("M4PTO3GPP_Ready4Processing: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief Write an AMR 12.2kbps silence FRAME into the writer
+ * @note Mainly used to fix the 'bzz' bug...
+ * @param pWriterDataIntInterface (IN) writer data interfaces
+ * pWriterContext (IN/OUT)writer context
+ * pWriterAudioAU (OUT) writer audio access unit
+ * mtIncCts (IN) writer CTS
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU,
+ M4OSA_Time mtIncCts)
+{
+ M4OSA_ERR err;
+
+ err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+ pWriterAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pStartAU(audio) returns \
+ 0x%x!", err);
+ return err;
+ }
+
+ memcpy((void *)pWriterAudioAU->dataAddress,
+ (void *)M4PTO3GPP_AMR_AU_SILENCE_122_FRAME, M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE);
+ pWriterAudioAU->size = M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE;
+ pWriterAudioAU->CTS = mtIncCts;
+ pWriterAudioAU->nbFrag = 0;
+
+ err = pWriterDataIntInterface->pProcessAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+ pWriterAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pProcessAU(silence) \
+ returns 0x%x!", err);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief Write an AMR 12.2kbps silence FRAME into the writer
+ * @note Mainly used to fix the 'bzz' bug...
+ * @param pWriterDataIntInterface (IN) writer data interfaces
+ * pWriterContext (IN/OUT)writer context
+ * pWriterAudioAU (OUT) writer audio access unit
+ * mtIncCts (IN) writer CTS
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+ M4WRITER_Context* pWriterContext,
+ M4SYS_AccessUnit* pWriterAudioAU,
+ M4OSA_Time mtIncCts)
+{
+ M4OSA_ERR err;
+
+ err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+ pWriterAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: pWriterDataInt->pStartAU(audio)\
+ returns 0x%x!", err);
+ return err;
+ }
+
+ memcpy((void *)pWriterAudioAU->dataAddress,
+ (void *)M4PTO3GPP_AMR_AU_SILENCE_048_FRAME,
+ M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+ pWriterAudioAU->size = M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pWriterAudioAU->CTS = mtIncCts;
+ pWriterAudioAU->nbFrag = 0;
+
+ err = pWriterDataIntInterface->pProcessAU(pWriterContext,
+ M4PTO3GPP_WRITER_AUDIO_STREAM_ID, pWriterAudioAU);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: \
+ pWriterDataInt->pProcessAU(silence) returns 0x%x!", err);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
new file mode 100755
index 0000000..96a6498
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4PTO3GPP_VideoPreProcessing.c
+ * @brief Picture to 3gpp Service video preprocessing management.
+ ******************************************************************************
+ */
+
+/**
+ * OSAL Debug utilities */
+#include "M4OSA_Debug.h"
+
+/**
+ * OSAL Memory management */
+#include "M4OSA_Memory.h"
+
+/**
+ * Definition of the M4PTO3GPP internal context */
+#include "M4PTO3GPP_InternalTypes.h"
+
+/**
+ * Definition of the M4PTO3GPP errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+/* If time increment is too low then we have an infinite alloc loop into M4ViEncCaptureFrame() */
+/* Time increment should match 30 fps maximum */
+#define M4PTO3GPP_MIN_TIME_INCREMENT 33.3333334
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ * M4VIFI_ImagePlane* pPlaneOut)
+ * @brief Call an external callback to get the picture to encode
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the M4PTO3GPP internal context
+ * in our case
+ * @param pPlaneIn (IN) Contains the image
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the
+ * output YUV420 image read with the m_pPictureCallbackFct
+ * @return M4NO_ERROR: No error
+ * @return Any error returned by an underlaying module
+ ******************************************************************************
+ */
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut)
+/******************************************************/
+{
+ M4OSA_ERR err;
+ M4OSA_Double mtDuration;
+ M4OSA_UInt32 i;
+
+ /*** NOTE ***/
+ /* It's OK to get pPlaneIn == M4OSA_NULL here */
+ /* since it has been given NULL in the pFctEncode() call. */
+ /* It's because we use the M4PTO3GPP internal context to */
+ /* transmit the encoder input data. */
+ /* The input data is the image read from the m_pPictureCallbackFct */
+
+ /**
+ * The VPP context is actually the M4PTO3GPP context! */
+ M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+ /**
+ * Get the picture to encode */
+ if (M4OSA_FALSE == pC->m_bLastInternalCallBack)
+ {
+ err = pC->m_Params.pPictureCallbackFct(pC->m_Params.pPictureCallbackCtxt, pPlaneOut,
+ &mtDuration);
+
+ /* In case of error when getting YUV to encode (ex: error when decoding a JPEG) */
+ if((M4NO_ERROR != err) && (((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) != err))
+ {
+ return err;
+ }
+
+ /**
+ * If end of encoding is asked by the size limitation system,
+ * we must end the encoding the same way that when it is asked by the
+ * picture callback (a.k.a. the integrator).
+ * Thus we simulate the LastPicture code return: */
+ if (M4OSA_TRUE == pC->m_IsLastPicture)
+ {
+ err = M4PTO3GPP_WAR_LAST_PICTURE;
+ }
+
+ if(((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) == err)
+ {
+ pC->m_bLastInternalCallBack = M4OSA_TRUE; /* Toggle flag for the final call of the CB*/
+ pC->m_IsLastPicture = M4OSA_TRUE; /* To stop the encoder */
+ pC->pSavedPlane = pPlaneOut; /* Save the last YUV plane ptr */
+ pC->uiSavedDuration = (M4OSA_UInt32)mtDuration; /* Save the last duration */
+ }
+ }
+ else
+ {
+ /**< Not necessary here because the last frame duration is set to the-last-but-one by
+ the light writer */
+ /**< Only necessary for pC->m_mtNextCts below...*/
+ mtDuration = pC->uiSavedDuration;
+
+
+ /** Copy the last YUV plane into the current one
+ * (the last pic is splited due to the callback extra-call... */
+ for (i=0; i<3; i++)
+ {
+ memcpy((void *)pPlaneOut[i].pac_data,
+ (void *)pC->pSavedPlane[i].pac_data,
+ pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+ }
+ }
+
+ /* TimeIncrement should be 30 fps maximum */
+ if(mtDuration < M4PTO3GPP_MIN_TIME_INCREMENT)
+ {
+ mtDuration = M4PTO3GPP_MIN_TIME_INCREMENT;
+ }
+
+ pC->m_mtNextCts += mtDuration;
+
+ M4OSA_TRACE3_0("M4PTO3GPP_applyVPP: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Amr.c b/libvideoeditor/vss/src/M4READER_Amr.c
new file mode 100755
index 0000000..0859157
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Amr.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file M4READER_Amr.c
+ * @brief Generic encapsulation of the core amr reader
+ * @note This file implements the generic M4READER interface
+ * on top of the AMR reader
+ ************************************************************************
+*/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4_Utils.h"
+
+#include "M4AMRR_CoreReader.h"
+#include "M4READER_Amr.h"
+
+/**
+ ************************************************************************
+ * structure M4READER_AMR_Context
+ * @brief This structure defines the internal context of a amr reader instance
+ * @note The context is allocated and de-allocated by the reader
+ ************************************************************************
+*/
+typedef struct _M4READER_AMR_Context
+{
+ M4OSA_Context m_pCoreContext; /**< core amr reader context */
+ M4_AudioStreamHandler* m_pAudioStream; /**< pointer on the audio stream
+ description returned by the core */
+ M4SYS_AccessUnit m_audioAu; /**< audio access unit to be filled by the core */
+ M4OSA_Time m_maxDuration; /**< duration of the audio stream */
+ M4OSA_FileReadPointer* m_pOsaFileReaderFcts; /**< OSAL file read functions */
+
+} M4READER_AMR_Context;
+
+
+/**
+ ************************************************************************
+ * @brief create an instance of the reader
+ * @note allocates the context
+ * @param pContext: (OUT) pointer on a reader context
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_create(M4OSA_Context *pContext)
+{
+ M4READER_AMR_Context* pReaderContext;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_create: invalid context pointer");
+
+ pReaderContext = (M4READER_AMR_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4READER_AMR_Context),
+ M4READER_AMR, (M4OSA_Char *)"M4READER_AMR_Context");
+ if (pReaderContext == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ pReaderContext->m_pAudioStream = M4OSA_NULL;
+ pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+ pReaderContext->m_maxDuration = 0;
+ pReaderContext->m_pCoreContext = M4OSA_NULL;
+ pReaderContext->m_pOsaFileReaderFcts = M4OSA_NULL;
+
+ *pContext = pReaderContext;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief destroy the instance of the reader
+ * @note after this call the context is invalid
+ *
+ * @param context: (IN) Context of the reader
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_destroy(M4OSA_Context context)
+{
+ M4READER_AMR_Context* pC=(M4READER_AMR_Context*)context;
+
+ /* Check function parameters*/
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_AMR_destroy: invalid context pointer");
+
+ /**
+ * Check input parameter */
+ if (M4OSA_NULL == pC)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_destroy(): M4READER_AMR_destroy: context is M4OSA_NULL,\
+ returning M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ free(pC);
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief open the reader and initializes its created instance
+ * @note this function opens the AMR file
+ * @param context: (IN) Context of the reader
+ * @param pFileDescriptor: (IN) Pointer to proprietary data identifying the media to open
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4OSA_ERR err;
+
+ /* Check function parameters*/
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_AMR_open: invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+ "M4READER_AMR_open: invalid pointer pFileDescriptor");
+
+ err = M4AMRR_openRead( &pC->m_pCoreContext, pFileDescriptor, pC->m_pOsaFileReaderFcts);
+
+ return err;
+}
+
+
+
+/**
+ ************************************************************************
+ * @brief close the reader
+ * @note
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_close(M4OSA_Context context)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4OSA_ERR err;
+ M4AMRR_State State;
+
+ /* Check function parameters*/
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_AMR_close: invalid context pointer");
+
+ /**
+ * Check input parameter */
+ if (M4OSA_NULL == pC)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_close(): M4READER_AMR_close: context is M4OSA_NULL,\
+ returning M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ if (M4OSA_NULL != pC->m_pAudioStream)
+ {
+ err = M4AMRR_getState(pC->m_pCoreContext, &State,
+ ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId);
+ if(M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_close: error when calling M4AMRR_getState\n");
+ return err;
+ }
+
+ if (M4AMRR_kReading_nextAU == State)
+ {
+ err = M4AMRR_freeAU(pC->m_pCoreContext,
+ ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId, &pC->m_audioAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_close: error when freeing access unit\n");
+ return err;
+ }
+ }
+
+ /* Delete the DSI if needed */
+ if(M4OSA_NULL != pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo)
+ {
+ free(\
+ pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo);
+
+ pC->m_pAudioStream->m_basicProperties.m_decoderSpecificInfoSize = 0;
+ pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ /* Finally destroy the stream handler */
+ free(pC->m_pAudioStream);
+ pC->m_pAudioStream = M4OSA_NULL;
+ }
+
+ if (M4OSA_NULL != pC->m_pCoreContext)
+ {
+ err = M4AMRR_closeRead(pC->m_pCoreContext);
+ pC->m_pCoreContext = M4OSA_NULL;
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Get the next stream found in the media
+ * @note current version needs to translate M4SYS_Stream to M4_StreamHandler
+ *
+ * @param context: (IN) Context of the reader
+ * @param pMediaFamily: (OUT) pointer to a user allocated M4READER_MediaFamily
+ * that will be filled with the media family of the found stream
+ * @param pStreamHandler: (OUT) pointer to a stream handler that will be
+ * allocated and filled with the found stream description
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4WAR_NO_MORE_STREAM no more available stream in the media (all streams found)
+ * @return M4ERR_PARAMETER at least one parameter is not properly set (in DEBUG mode only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandlerParam)
+{
+ M4READER_AMR_Context* pC=(M4READER_AMR_Context*)context;
+ M4OSA_ERR err;
+ M4SYS_StreamID streamIdArray[2];
+ M4SYS_StreamDescription streamDesc;
+ M4_AudioStreamHandler* pAudioStreamHandler;
+ M4_StreamHandler* pStreamHandler;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextStream: invalid context");
+ M4OSA_DEBUG_IF1((pMediaFamily == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextStream: invalid pointer to MediaFamily");
+ M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextStream: invalid pointer to StreamHandler");
+
+ err = M4AMRR_getNextStream( pC->m_pCoreContext, &streamDesc);
+ if (err == M4WAR_NO_MORE_STREAM)
+ {
+ streamIdArray[0] = 0;
+ streamIdArray[1] = 0;
+ err = M4AMRR_startReading(pC->m_pCoreContext, streamIdArray);
+ if ((M4OSA_UInt32)M4ERR_ALLOC == err)
+ {
+ M4OSA_TRACE2_0("M4READER_AMR_getNextStream: M4AMRR_startReading returns M4ERR_ALLOC!");
+ return err;
+ }
+ return M4WAR_NO_MORE_STREAM;
+ }
+ else if (err != M4NO_ERROR)
+ {
+ return err;
+ }
+
+ *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+ pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(sizeof(M4_AudioStreamHandler),
+ M4READER_AMR, (M4OSA_Char *)"M4_AudioStreamHandler");
+ if (pAudioStreamHandler == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+ pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
+ *pStreamHandlerParam = pStreamHandler;
+ pC->m_pAudioStream = pAudioStreamHandler;
+
+ pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+
+ /*
+ * Audio stream handler fields are initialised with 0 value.
+ * They will be properly set by the AMR decoder
+ */
+ pAudioStreamHandler->m_samplingFrequency = 0;
+ pAudioStreamHandler->m_byteFrameLength = 0;
+ pAudioStreamHandler->m_byteSampleSize = 0;
+ pAudioStreamHandler->m_nbChannels = 0;
+
+ pStreamHandler->m_pDecoderSpecificInfo = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+ pStreamHandler->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+ pStreamHandler->m_streamId = streamDesc.streamID;
+ pStreamHandler->m_duration = streamDesc.duration;
+ pStreamHandler->m_pUserData = (void*)streamDesc.timeScale; /*trick to change*/
+
+ if (streamDesc.duration > pC->m_maxDuration)
+ {
+ pC->m_maxDuration = streamDesc.duration;
+ }
+ pStreamHandler->m_averageBitRate = streamDesc.averageBitrate;
+
+ M4AMRR_getmaxAUsize(pC->m_pCoreContext, &pStreamHandler->m_maxAUSize);
+
+ switch (streamDesc.streamType)
+ {
+ case M4SYS_kAMR:
+ pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrNarrowBand;
+ break;
+ case M4SYS_kAMR_WB:
+ pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrWideBand;
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief fill the access unit structure with initialization values
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler: (IN) pointer to the stream handler to
+ * which the access unit will be associated
+ * @param pAccessUnit: (IN/OUT) pointer to the access unit (allocated by the caller)
+ * to initialize
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4SYS_AccessUnit* pAu;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_fillAuStruct: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_fillAuStruct: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_fillAuStruct: passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ pAu->dataAddress = M4OSA_NULL;
+ pAu->size = 0;
+ /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+ pAu->CTS = -20;
+ pAu->DTS = -20;
+ pAu->attribute = 0;
+ pAu->nbFrag = 0;
+
+ pAccessUnit->m_size = 0;
+ /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+ pAccessUnit->m_CTS = -20;
+ pAccessUnit->m_DTS = -20;
+ pAccessUnit->m_attribute = 0;
+ pAccessUnit->m_dataAddress = M4OSA_NULL;/*pBuffer;*/
+ pAccessUnit->m_maxsize = pStreamHandler->m_maxAUSize;
+ pAccessUnit->m_streamID = pStreamHandler->m_streamId;
+ pAccessUnit->m_structSize = sizeof(M4_AccessUnit);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief get an option value from the reader
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to retrieve a property value:
+ * - the duration of the longest stream of the media
+ * - the version number of the reader (not implemented yet)
+ *
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) indicates the option to get
+ * @param pValue: (OUT) pointer to structure or value (allocated by user)
+ * where option is stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getOption(M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue)
+
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+ switch(optionId)
+ {
+ case M4READER_kOptionID_Duration :
+ {
+ *(M4OSA_Time*)pValue = pC->m_maxDuration;
+ }
+ break;
+
+ case M4READER_kOptionID_Bitrate:
+ {
+ M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+ if (M4OSA_NULL != pC->m_pAudioStream)
+ {
+ *pBitrate = pC->m_pAudioStream->m_basicProperties.m_averageBitRate;
+ }
+ else
+ {
+ pBitrate = 0;
+ err = M4ERR_PARAMETER;
+ }
+
+ }
+ break;
+ case M4READER_kOptionID_Version:
+ {
+ err = M4AMRR_getVersion((M4_VersionInfo*)pValue);
+ }
+ break;
+
+ default :
+ {
+ err = M4ERR_PARAMETER;
+ }
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief set en option value of the readder
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to set a property value:
+ * - the OSAL file read functions
+ *
+ * @param context: (IN) Context of the decoder
+ * @param optionId: (IN) Identifier indicating the option to set
+ * @param pValue: (IN) Pointer to structure or value (allocated by user)
+ * where option is stored
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_BAD_OPTION_ID The option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_PARAMETER The option parameter is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_setOption(M4OSA_Context context, M4OSA_OptionID optionId,
+ M4OSA_DataOption pValue)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+ switch(optionId)
+ {
+ case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+ {
+ pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+ }
+ break;
+ default :
+ {
+ err = M4ERR_PARAMETER;
+ }
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief reset the stream, that is seek it to beginning and make it ready to be read
+ * @note this function is to be deprecated in next versions
+ *
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to reset
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_BAD_STREAM_ID the streamID does not exist
+ * @return M4ERR_STATE this function cannot be called now
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4WAR_INVALID_TIME beginning of the stream can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4SYS_StreamID streamIdArray[2];
+ M4OSA_ERR err;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = 0;
+ M4AMRR_State State;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_reset: passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+ if (M4AMRR_kReading_nextAU == State)
+ {
+ err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_reset: error when freeing access unit\n");
+ return err;
+ }
+ pAu->dataAddress = M4OSA_NULL;
+ }
+
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+
+ err = M4NO_ERROR;
+
+ /* for reset during playback */
+ /* (set CTS to -20 in order the first AU CTS is 0) */
+ pAu->CTS = -20;
+ pAu->DTS = -20;
+
+ err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_reset: error when calling M4AMRR_seek()\n");
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief jump into the stream at the specified time
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) the stream description of the stream to make jump
+ * @param pTime (IN/OUT) IN: the time to jump to (in ms)
+ * OUT: the time to which the stream really jumped
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4WAR_INVALID_TIME the time can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4OSA_Int32* pTime)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4SYS_StreamID streamIdArray[2];
+ M4OSA_ERR err;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = (M4OSA_Time)*pTime;
+ M4AMRR_State State;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_3GP_jump: invalid time pointer");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_jump: passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+ if (M4AMRR_kReading_nextAU == State)
+ {
+ err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_jump: error when freeing access unit\n");
+ return err;
+ }
+ pAu->dataAddress = M4OSA_NULL;
+ }
+
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+ err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kNoRAPprevious, &time64);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_jump: error when calling M4AMRR_seek()\n");
+ return err;
+ }
+
+ *pTime = (M4OSA_Int32)time64;
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Gets an access unit (AU) from the stream handler source.
+ * @note An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
+ * In the current version, we need to translate M4OSA_AccessUnit to M4_AccessUnit
+ *
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param pAccessUnit (IN/OUT) Pointer to an access unit to fill with read data (the au
+ structure is allocated by the user, and must be
+ initialized by calling M4READER_fillAuStruct_fct after
+ creation)
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC memory allocation failed
+ * @return M4ERR_BAD_STREAM_ID at least one of the stream Id. does not exist.
+ * @return M4WAR_NO_MORE_AU there are no more access unit in the stream (end of stream)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit)
+{
+ M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+ M4_MediaTime timeScale;
+ M4AMRR_State State;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextAu: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextAu: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getNextAu: invalid pointer to M4_AccessUnit");
+
+ /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+ but be aware that system is risky and would need upgrade if more than
+ one video and one audio AU is needed */
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_AMR_getNextAu: passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+ if (M4AMRR_kReading_nextAU == State)
+ {
+ err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_AVI_getNextAu: error when freeing access unit\n");
+ return err;
+ }
+ pAu->dataAddress = M4OSA_NULL;
+ }
+
+ pAu->nbFrag = 0;
+ err = M4AMRR_nextAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+
+ if (err == M4NO_ERROR)
+ {
+ timeScale = (M4OSA_Float)(M4OSA_Int32)(pStreamHandler->m_pUserData)/1000;
+ pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+ pAccessUnit->m_size = pAu->size;
+ pAccessUnit->m_CTS = (M4_MediaTime)pAu->CTS/*/timeScale*/;
+ pAccessUnit->m_DTS = (M4_MediaTime)pAu->DTS/*/timeScale*/;
+ pAccessUnit->m_attribute = pAu->attribute;
+ }
+ else
+ {
+ pAccessUnit->m_size=0;
+ }
+
+ return err;
+}
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType : Pointer on a M4READER_MediaType (allocated by the caller)
+* that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface implemented
+* by this reader. The interface is a structure allocated by the function and must
+* be un-allocated by the caller.
+* @param pRdrDataInterface : Address of a pointer that will be set to the data interface implemented
+* by this reader. The interface is a structure allocated by the function and must
+* be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR if OK
+* ERR_ALLOC if an allocation failed
+* ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface)
+{
+ M4OSA_DEBUG_IF1((pMediaType == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getInterfaces: invalid pointer to MediaType");
+ M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+ M4OSA_DEBUG_IF1((pRdrDataInterface == 0), M4ERR_PARAMETER,
+ "M4READER_AMR_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+ *pRdrGlobalInterface =
+ (M4READER_GlobalInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_GlobalInterface),
+ M4READER_AMR, (M4OSA_Char *)"M4READER_GlobalInterface" );
+ if (M4OSA_NULL == *pRdrGlobalInterface)
+ {
+ *pRdrDataInterface = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+ *pRdrDataInterface = (M4READER_DataInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_DataInterface),
+ M4READER_AMR, (M4OSA_Char *)"M4READER_DataInterface");
+ if (M4OSA_NULL == *pRdrDataInterface)
+ {
+ free(*pRdrGlobalInterface);
+ *pRdrGlobalInterface = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+
+ *pMediaType = M4READER_kMediaTypeAMR;
+
+ (*pRdrGlobalInterface)->m_pFctCreate = M4READER_AMR_create;
+ (*pRdrGlobalInterface)->m_pFctDestroy = M4READER_AMR_destroy;
+ (*pRdrGlobalInterface)->m_pFctOpen = M4READER_AMR_open;
+ (*pRdrGlobalInterface)->m_pFctClose = M4READER_AMR_close;
+ (*pRdrGlobalInterface)->m_pFctGetOption = M4READER_AMR_getOption;
+ (*pRdrGlobalInterface)->m_pFctSetOption = M4READER_AMR_setOption;
+ (*pRdrGlobalInterface)->m_pFctGetNextStream = M4READER_AMR_getNextStream;
+ (*pRdrGlobalInterface)->m_pFctFillAuStruct = M4READER_AMR_fillAuStruct;
+ (*pRdrGlobalInterface)->m_pFctStart = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctStop = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctJump = M4READER_AMR_jump;
+ (*pRdrGlobalInterface)->m_pFctReset = M4READER_AMR_reset;
+ (*pRdrGlobalInterface)->m_pFctGetPrevRapTime = M4OSA_NULL; /*all AUs are RAP*/
+
+ (*pRdrDataInterface)->m_pFctGetNextAu = M4READER_AMR_getNextAu;
+
+ (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Pcm.c b/libvideoeditor/vss/src/M4READER_Pcm.c
new file mode 100755
index 0000000..833930b
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Pcm.c
@@ -0,0 +1,720 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4READER_Wav.c
+ * @brief Generic encapsulation of the core pcm reader
+ * @note This file implements the generic M4READER interface
+ * on top of the PCM reader
+ ************************************************************************
+*/
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4READER_Pcm.h"
+/**
+ ************************************************************************
+ * structure M4READER_WAV_Context
+ * @brief This structure defines the internal context of a wav reader instance
+ * @note The context is allocated and de-allocated by the reader
+ ************************************************************************
+ */
+typedef struct _M4READER_PCM_Context
+{
+ M4OSA_Context m_coreContext; /**< core wav reader context */
+ M4_StreamHandler* m_pAudioStream; /**< pointer on the audio stream description
+ returned by the core */
+ M4SYS_AccessUnit m_audioAu; /**< audio access unit to be filled by the core */
+ M4OSA_FileReadPointer* m_pOsaFileReaderFcts; /**< OSAL file read functions */
+
+} M4READER_PCM_Context;
+
+
+/**
+ ************************************************************************
+ * @brief Creates a wav reader instance
+ * @note allocates the context
+ * @param pContext: (OUT) Pointer to a wav reader context
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_ALLOC: a memory allocation has failed
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_create(M4OSA_Context* pContext)
+{
+ M4READER_PCM_Context* pReaderContext;
+
+ M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_create: invalid context pointer");
+
+ pReaderContext = (M4READER_PCM_Context*)M4OSA_32bitAlignedMalloc(sizeof(M4READER_PCM_Context),
+ M4READER_WAV, (M4OSA_Char *)"M4READER_PCM_Context");
+ if (pReaderContext == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+
+ pReaderContext->m_coreContext = M4OSA_NULL;
+ pReaderContext->m_pAudioStream = M4OSA_NULL;
+ pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+ pReaderContext->m_pOsaFileReaderFcts = M4OSA_NULL;
+
+ *pContext = pReaderContext;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Destroy the instance of the reader
+ * @note the context is un-allocated
+ * @param context: (IN) context of the network reader
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_destroy(M4OSA_Context context)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_PCM_destroy: invalid context pointer");
+
+ free(pC);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief Initializes the reader instance
+ * @param context: (IN) context of the network reader
+ * @param pFileDescriptor: (IN) Pointer to proprietary data identifying the media to open
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4OSA_ERR err;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_PCM_open: invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+ "M4READER_PCM_open: invalid pointer pFileDescriptor");
+
+ err = M4PCMR_openRead(&(pC->m_coreContext), (M4OSA_Char*)pFileDescriptor,
+ pC->m_pOsaFileReaderFcts);
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief close the reader
+ * @note
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_close(M4OSA_Context context)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4OSA_ERR err;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_PCM_close: invalid context pointer");
+
+ /* Free audio AU and audio stream */
+ if (M4OSA_NULL != pC->m_pAudioStream)
+ {
+ if (M4OSA_NULL != pC->m_audioAu.dataAddress)
+ {
+ err = M4PCMR_freeAU(pC->m_coreContext, pC->m_pAudioStream->m_streamId,
+ &pC->m_audioAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_close: Error when freeing audio access unit");
+ return err;
+ }
+ }
+ free(pC->m_pAudioStream);
+ pC->m_pAudioStream = M4OSA_NULL;
+ }
+
+
+ if (M4OSA_NULL != pC->m_coreContext)
+ {
+ /* Close tha PCM file */
+ err = M4PCMR_closeRead(pC->m_coreContext);
+ pC->m_coreContext = M4OSA_NULL;
+ }
+
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief set en option value of the reader
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to set a property value:
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) indicates the option to set
+ * @param pValue: (IN) pointer to structure or value (allocated by user)
+ * where option is stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_setOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "M4READER_PCM_setOption: invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+ "M4READER_PCM_setOption: invalid value pointer");
+
+ switch(optionId)
+ {
+ case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+ {
+ pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+ }
+ break;
+ default :
+ {
+ err = M4ERR_PARAMETER;
+ }
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Retrieves the an option value from the reader, given an option ID.
+ * @note this function follows the set/get option mechanism described in OSAL 3.0
+ * it allows the caller to retrieve a property value:
+ *
+ * @param context: (IN) context of the network reader
+ * @param optionId: (IN) option identificator whose option value is to be retrieved.
+ * @param pValue: (OUT) option value retrieved.
+ *
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ * @return M4ERR_BAD_OPTION_ID: the required option identificator is unknown
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+ M4READER_PCM_Context* pContext = (M4READER_PCM_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* no check of context at this level because some option does not need it */
+ M4OSA_DEBUG_IF1((pValue == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getOption: invalid pointer on value");
+
+ switch (optionId)
+ {
+ case M4READER_kOptionID_Duration:
+ *((M4OSA_UInt32*)pValue) = pContext->m_pAudioStream->m_duration;
+ break;
+
+ case M4READER_kOptionID_Version:
+ err = M4PCMR_getVersion((M4_VersionInfo*)pValue);
+ break;
+
+ case M4READER_kOptionID_Copyright:
+ return M4ERR_NOT_IMPLEMENTED;
+ break;
+
+ case M4READER_kOptionID_Bitrate:
+ {
+ M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+ if (M4OSA_NULL != pContext->m_pAudioStream)
+ {
+ *pBitrate = pContext->m_pAudioStream->m_averageBitRate;
+ }
+ else
+ {
+ pBitrate = 0;
+ err = M4ERR_PARAMETER;
+ }
+ }
+ break;
+
+ default:
+ err = M4ERR_BAD_OPTION_ID;
+ M4OSA_TRACE1_0("M4READER_PCM_getOption: unsupported optionId");
+ break;
+ }
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Get the next stream found in the media
+ * @note
+ *
+ * @param context: (IN) context of the network reader
+ * @param pMediaFamily: (OUT) pointer to a user allocated M4READER_MediaFamily that will
+ * be filled
+ * @param pStreamHandler: (OUT) pointer to a stream handler that will be allocated and filled
+ * with the found stream description
+ *
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ * @return M4WAR_NO_MORE_STREAM no more available stream in the media (all streams found)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandler)
+{
+ M4READER_PCM_Context* pC=(M4READER_PCM_Context*)context;
+ M4OSA_ERR err;
+/* M4_StreamHandler* pStreamHandler = M4OSA_NULL;*/
+ M4SYS_StreamDescription streamDesc;
+ M4_AudioStreamHandler* pAudioStreamHandler;
+ M4OSA_Double fDuration;
+ M4SYS_StreamID streamIdArray[2];
+ M4PCMC_DecoderSpecificInfo* pDsi;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextStream: invalid context");
+ M4OSA_DEBUG_IF1((pMediaFamily == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextStream: invalid pointer to MediaFamily");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextStream: invalid pointer to StreamHandler");
+
+ err = M4PCMR_getNextStream( pC->m_coreContext, &streamDesc);
+ if (err == M4WAR_NO_MORE_STREAM)
+ {
+ streamIdArray[0] = 0;
+ streamIdArray[1] = 0;
+ err = M4PCMR_startReading(pC->m_coreContext, streamIdArray); /*to put in open function*/
+
+ return M4WAR_NO_MORE_STREAM;
+ }
+ else if (M4NO_ERROR != err)
+ {
+ return err; /*also return M4WAR_NO_MORE_STREAM*/
+ }
+
+ switch (streamDesc.streamType)
+ {
+ case M4SYS_kAudioUnknown:
+ case M4SYS_kPCM_16bitsS:
+ case M4SYS_kPCM_16bitsU:
+ case M4SYS_kPCM_8bitsU:
+ *pMediaFamily = M4READER_kMediaFamilyAudio;
+ M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found audio stream");
+ break;
+ default:
+ *pMediaFamily = M4READER_kMediaFamilyUnknown;
+ M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found UNKNOWN stream");
+ return M4NO_ERROR;
+ }
+
+ pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(sizeof(M4_AudioStreamHandler),
+ M4READER_WAV, (M4OSA_Char *)"M4_AudioStreamHandler");
+ if (pAudioStreamHandler == M4OSA_NULL)
+ {
+ return M4ERR_ALLOC;
+ }
+ pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+ pC->m_pAudioStream = (M4_StreamHandler*)(pAudioStreamHandler);
+
+ pDsi = (M4PCMC_DecoderSpecificInfo*)(streamDesc.decoderSpecificInfo);
+ M4OSA_DEBUG_IF1((pDsi == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextStream: invalid decoder specific info in stream");
+
+ pAudioStreamHandler->m_samplingFrequency = pDsi->SampleFrequency;
+ pAudioStreamHandler->m_byteSampleSize = (M4OSA_UInt32)(pDsi->BitsPerSample/8);
+ /* m_byteFrameLength is badly named: it is not in bytes but in samples number */
+ if(pAudioStreamHandler->m_samplingFrequency == 8000)
+ {
+ /* AMR case */
+ pAudioStreamHandler->m_byteFrameLength =
+ (((streamDesc.averageBitrate/8)/50)/pDsi->nbChannels)\
+ /pAudioStreamHandler->m_byteSampleSize;/*/50 to get around 20 ms of audio*/
+ }
+ else
+ {
+ /* AAC Case */
+ pAudioStreamHandler->m_byteFrameLength =
+ (M4OSA_UInt32)(((streamDesc.averageBitrate/8)/15.625)/pDsi->nbChannels)\
+ /pAudioStreamHandler->m_byteSampleSize;
+ }
+
+ pAudioStreamHandler->m_nbChannels = pDsi->nbChannels;
+
+ M4OSA_TIME_TO_MS( fDuration, streamDesc.duration, streamDesc.timeScale);
+ pC->m_pAudioStream->m_duration = (M4OSA_Int32)fDuration;
+ pC->m_pAudioStream->m_pDecoderSpecificInfo = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+ pC->m_pAudioStream->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+ pC->m_pAudioStream->m_streamId = streamDesc.streamID;
+ pC->m_pAudioStream->m_pUserData =
+ (void*)streamDesc.timeScale; /*trick to change*/
+ pC->m_pAudioStream->m_averageBitRate = streamDesc.averageBitrate;
+ pC->m_pAudioStream->m_maxAUSize =
+ pAudioStreamHandler->m_byteFrameLength*pAudioStreamHandler->m_byteSampleSize\
+ *pAudioStreamHandler->m_nbChannels;
+ pC->m_pAudioStream->m_streamType = M4DA_StreamTypeAudioPcm;
+
+ *pStreamHandler = pC->m_pAudioStream;
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief fill the access unit structure with initialization values
+ * @note
+ *
+ * @param context: (IN) context of the network reader
+ * @param pStreamHandler: (IN) pointer to the stream handler to which the access unit will
+ * be associated
+ * @param pAccessUnit: (IN) pointer to the access unit(allocated by the caller) to initialize
+ * @return M4NO_ERROR: there is no error.
+ * @return M4ERR_PARAMETER: at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4SYS_AccessUnit* pAu;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_fillAuStruct: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_fillAuStruct: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_fillAuStruct: passed StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ pAu->dataAddress = M4OSA_NULL;
+ pAu->size = 0;
+ pAu->CTS = 0;
+ pAu->DTS = 0;
+ pAu->attribute = 0;
+ pAu->nbFrag = 0;
+
+ pAccessUnit->m_size = 0;
+ pAccessUnit->m_CTS = 0;
+ pAccessUnit->m_DTS = 0;
+ pAccessUnit->m_attribute = 0;
+ pAccessUnit->m_dataAddress = M4OSA_NULL;/*pBuffer;*/
+ pAccessUnit->m_maxsize = pStreamHandler->m_maxAUSize;
+ pAccessUnit->m_streamID = pStreamHandler->m_streamId;
+ pAccessUnit->m_structSize = sizeof(M4_AccessUnit);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief reset the stream, that is: seek it to beginning and make it ready to be read
+ * @note
+ * @param context: (IN) context of the network reader
+ * @param pStreamHandler: (IN) The stream handler of the stream to reset
+ * @return M4NO_ERROR: there is no error.
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4SYS_StreamID streamIdArray[2];
+ M4OSA_ERR err;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = 0;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_reset: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_reset: invalid pointer to M4_StreamHandler");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_reset: passed StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pAu->dataAddress != M4OSA_NULL)
+ {
+ err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_reset: error when freeing access unit");
+ return err;
+ }
+ pAu->dataAddress = M4OSA_NULL;
+ }
+
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+
+ pAu->CTS = 0;
+ pAu->DTS = 0;
+
+ /* This call is needed only when replay during playback */
+ err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+ return err;
+}
+
+/**
+ ************************************************************************
+ * @brief Get the next access unit of the specified stream
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param pAccessUnit (IN/OUT) Pointer to an access unit to fill with read data
+ * (the au structure is allocated by the user, and must be
+ * initialized
+ * by calling M4READER_fillAuStruct_fct after creation)
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @returns M4ERR_ALLOC memory allocation failed
+ * @returns M4ERR_BAD_STREAM_ID at least one of the stream Id. does not exist.
+ * @returns M4WAR_NO_DATA_YET there is no enough data on the stream for new access unit
+ * @returns M4WAR_NO_MORE_AU there are no more access unit in the stream (end of stream)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4_AccessUnit *pAccessUnit)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextAu: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextAu: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getNextAu: invalid pointer to M4_AccessUnit");
+
+ /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+ but be aware that system is risky and would need upgrade if more than
+ one video and one audio AU is needed */
+ if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_getNextAu: passed StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pAu->dataAddress != M4OSA_NULL)
+ {
+ err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_getNextAu: error when freeing access unit");
+ return err;
+ }
+ }
+
+ pAu->nbFrag = 0;
+ err = M4PCMR_nextAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+
+ if (err == M4NO_ERROR)
+ {
+ pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+ pAccessUnit->m_size = pAu->size;
+ pAccessUnit->m_CTS = (M4OSA_Double)pAu->CTS;
+ pAccessUnit->m_DTS = (M4OSA_Double)pAu->DTS;
+ pAccessUnit->m_attribute = pAu->attribute;
+ }
+ else
+ {
+ pAccessUnit->m_size=0;
+ }
+
+ return err;
+}
+
+
+/**
+ ************************************************************************
+ * @brief jump into the stream at the specified time
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) the stream handler of the stream to make jump
+ * @param pTime (IN/OUT) IN: the time to jump to (in ms)
+ * OUT: the time to which the stream really jumped
+ * But in this reader, we do not modify the time
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_ALLOC there is no more memory available
+ * @return M4ERR_BAD_STREAM_ID the streamID does not exist
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+ M4OSA_Int32* pTime)
+{
+ M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+ M4SYS_StreamID streamIdArray[2];
+ M4OSA_ERR err;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_jump: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid time pointer");
+
+ time64 = (M4OSA_Time)*pTime;
+
+ if (pStreamHandler == pC->m_pAudioStream)
+ {
+ pAu = &pC->m_audioAu;
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_jump: passed StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pAu->dataAddress != M4OSA_NULL)
+ {
+ err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_0("M4READER_PCM_jump: Error when freeing access unit");
+ return err;
+ }
+ pAu->dataAddress = M4OSA_NULL;
+ }
+
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+ *pTime = (M4OSA_Int32)time64;
+
+ return err;
+}
+
+/**
+ *************************************************************************
+ * @brief Retrieves the generic interfaces implemented by the reader
+ *
+ * @param pMediaType : Pointer on a M4READER_MediaType (allocated by the caller)
+ * that will be filled with the media type supported by this reader
+ * @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+ * implemented by this reader. The interface is a structure allocated
+ * by the function and must be un-allocated by the caller.
+ * @param pRdrDataInterface : Address of a pointer that will be set to the data interface
+ * implemented by this reader. The interface is a structure allocated
+ * by the function and must be un-allocated by the caller.
+ *
+ * @returns : M4NO_ERROR if OK
+ * ERR_ALLOC if an allocation failed
+ * ERR_PARAMETER at least one parameter is not properly set (in DEBUG only)
+ *************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface)
+/************************************************************************/
+{
+ M4OSA_DEBUG_IF1((pMediaType == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getInterfaces: invalid pointer to MediaType passed");
+ M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+ M4OSA_DEBUG_IF1((pRdrDataInterface == 0), M4ERR_PARAMETER,
+ "M4READER_PCM_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+ *pRdrGlobalInterface =
+ (M4READER_GlobalInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_GlobalInterface), M4READER_WAV,
+ (M4OSA_Char *)"M4READER_PCM GlobalInterface");
+ if (M4OSA_NULL == *pRdrGlobalInterface)
+ {
+ return M4ERR_ALLOC;
+ }
+ *pRdrDataInterface =
+ (M4READER_DataInterface*)M4OSA_32bitAlignedMalloc( sizeof(M4READER_DataInterface), M4READER_WAV,
+ (M4OSA_Char *) "M4READER_PCM DataInterface");
+ if (M4OSA_NULL == *pRdrDataInterface)
+ {
+ free(*pRdrGlobalInterface);
+ return M4ERR_ALLOC;
+ }
+
+ *pMediaType = M4READER_kMediaTypePCM;
+
+ (*pRdrGlobalInterface)->m_pFctCreate = M4READER_PCM_create;
+ (*pRdrGlobalInterface)->m_pFctDestroy = M4READER_PCM_destroy;
+ (*pRdrGlobalInterface)->m_pFctOpen = M4READER_PCM_open;
+ (*pRdrGlobalInterface)->m_pFctClose = M4READER_PCM_close;
+ (*pRdrGlobalInterface)->m_pFctStart = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctStop = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctGetOption = M4READER_PCM_getOption;
+ (*pRdrGlobalInterface)->m_pFctSetOption = M4READER_PCM_setOption;
+ (*pRdrGlobalInterface)->m_pFctGetNextStream = M4READER_PCM_getNextStream;
+ (*pRdrGlobalInterface)->m_pFctFillAuStruct = M4READER_PCM_fillAuStruct;
+ (*pRdrGlobalInterface)->m_pFctJump = M4READER_PCM_jump;
+ (*pRdrGlobalInterface)->m_pFctReset = M4READER_PCM_reset;
+ (*pRdrGlobalInterface)->m_pFctGetPrevRapTime = M4OSA_NULL; /*all AUs are RAP*/
+
+ (*pRdrDataInterface)->m_pFctGetNextAu = M4READER_PCM_getNextAu;
+
+ (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
new file mode 100755
index 0000000..cc67e72
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "utils/Log.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_EXTERNAL_Interface.h"
+#include "M4VD_Tools.h"
+#include "M4_VideoEditingCommon.h"
+#include "OMX_Video.h"
+/**
+ ************************************************************************
+ * @file M4VD_EXTERNAL_BitstreamParser.c
+ * @brief
+ * @note This file implements external Bitstream parser
+ ************************************************************************
+ */
+
+typedef struct {
+ M4OSA_UInt8 code;
+ M4OSA_Int32 profile;
+ M4OSA_Int32 level;
+} codeProfileLevel;
+
+static codeProfileLevel mpeg4ProfileLevelTable[] = {
+ {0x01, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1},
+ {0x02, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level2},
+ {0x03, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level3},
+ {0x04, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level4a},
+ {0x05, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level5},
+ {0x08, OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0},
+ {0x11, OMX_VIDEO_MPEG4ProfileSimpleScalable,OMX_VIDEO_MPEG4Level1},
+ {0x12, OMX_VIDEO_MPEG4ProfileSimpleScalable,OMX_VIDEO_MPEG4Level2},
+ {0x21, OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level1},
+ {0x22, OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level2},
+ {0x32, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level2},
+ {0x33, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level3},
+ {0x34, OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level4},
+ {0x42, OMX_VIDEO_MPEG4ProfileNbit, OMX_VIDEO_MPEG4Level2},
+ {0x51, OMX_VIDEO_MPEG4ProfileScalableTexture, OMX_VIDEO_MPEG4Level1},
+ {0x61, OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level1},
+ {0x62, OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level2},
+ {0x71, OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level1},
+ {0x72, OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level2},
+ {0x81, OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level1},
+ {0x82, OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level2},
+ {0x91, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level1},
+ {0x92, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level2},
+ {0x93, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level3},
+ {0x94, OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level4},
+ {0xa1, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level1},
+ {0xa2, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level2},
+ {0xa3, OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level3},
+ {0xb1, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level1},
+ {0xb2, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level2},
+ {0xb3, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level3},
+ {0xb4, OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level4},
+ {0xc1, OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level1},
+ {0xc2, OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level2},
+ {0xd1, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level1},
+ {0xd2, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level2},
+ {0xd3, OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level3},
+ {0xf0, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level0},
+ {0xf1, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level1},
+ {0xf2, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level2},
+ {0xf3, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level3},
+ {0xf4, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level4},
+ {0xf5, OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level5}
+};
+
+M4OSA_UInt32 M4VD_EXTERNAL_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+ M4OSA_UInt32 nb_bits)
+{
+ return(M4VD_Tools_GetBitsFromMemory(parsingCtxt,nb_bits));
+}
+
+M4OSA_ERR M4VD_EXTERNAL_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+ M4OSA_MemAddr32 dest_bits,
+ M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+ return (M4VD_Tools_WriteBitsToMemory( bitsToWrite,dest_bits,
+ offset, nb_bits));
+}
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
+ M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+ M4DECODER_VideoSize* pVideoSize)
+{
+ M4VS_Bitstream_ctxt parsingCtxt;
+ M4OSA_UInt32 code, j;
+ M4OSA_MemAddr8 start;
+ M4OSA_UInt8 i;
+ M4OSA_UInt32 time_incr_length;
+ M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+ /* Parsing variables */
+ M4OSA_UInt8 video_object_layer_shape = 0;
+ M4OSA_UInt8 sprite_enable = 0;
+ M4OSA_UInt8 reduced_resolution_vop_enable = 0;
+ M4OSA_UInt8 scalability = 0;
+ M4OSA_UInt8 enhancement_type = 0;
+ M4OSA_UInt8 complexity_estimation_disable = 0;
+ M4OSA_UInt8 interlaced = 0;
+ M4OSA_UInt8 sprite_warping_points = 0;
+ M4OSA_UInt8 sprite_brightness_change = 0;
+ M4OSA_UInt8 quant_precision = 0;
+
+ /* Fill the structure with default parameters */
+ pVideoSize->m_uiWidth = 0;
+ pVideoSize->m_uiHeight = 0;
+
+ pDci->uiTimeScale = 0;
+ pDci->uiProfile = 0;
+ pDci->uiUseOfResynchMarker = 0;
+ pDci->bDataPartition = M4OSA_FALSE;
+ pDci->bUseOfRVLC = M4OSA_FALSE;
+
+ /* Reset the bitstream context */
+ parsingCtxt.stream_byte = 0;
+ parsingCtxt.stream_index = 8;
+ parsingCtxt.in = (M4OSA_Int8 *)pVol;
+
+ start = (M4OSA_Int8 *)pVol;
+
+ /* Start parsing */
+ while (parsingCtxt.in - start < aVolSize)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 1)
+ {
+ /* start code found */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+
+ /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
+
+ if ((code > 0x1F) && (code < 0x30))
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* random accessible vol */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 8);/* video object type indication */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* is object layer identifier */
+ if (code == 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 4); /* video object layer verid */
+ vol_verid = (M4OSA_UInt8)code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 3); /* video object layer priority */
+ }
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 4);/* aspect ratio */
+ if (code == 15)
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 16); /* par_width and par_height (8+8) */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* vol control parameters */
+ if (code == 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 3);/* chroma format + low delay (3+1) */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* vbv parameters */
+ if (code == 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 32);/* first and latter half bitrate + 2 marker bits
+ (15 + 1 + 15 + 1) */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 31);/* first and latter half vbv buffer size + first
+ half vbv occupancy + marker bits (15+1+3+11+1)*/
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 16);/* first half vbv occupancy + marker bits (15+1)*/
+ }
+ }
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 2); /* video object layer shape */
+ /* Need to save it for vop parsing */
+ video_object_layer_shape = (M4OSA_UInt8)code;
+
+ if (code != 0) return 0; /* only rectangular case supported */
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1); /* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 16); /* VOP time increment resolution */
+ pDci->uiTimeScale = code;
+
+ /* Computes time increment length */
+ j = code - 1;
+ for (i = 0; (i < 32) && (j != 0); j >>=1)
+ {
+ i++;
+ }
+ time_incr_length = (i == 0) ? 1 : i;
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Fixed VOP rate */
+ if (code == 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ time_incr_length);/* Fixed VOP time increment */
+ }
+
+ if(video_object_layer_shape != 1) /* 1 = Binary */
+ {
+ if(video_object_layer_shape == 0) /* 0 = rectangular */
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* Width */
+ pVideoSize->m_uiWidth = code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* Height */
+ pVideoSize->m_uiHeight = code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ }
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* interlaced */
+ interlaced = (M4OSA_UInt8)code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* OBMC disable */
+
+ if(vol_verid == 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* sprite enable */
+ sprite_enable = (M4OSA_UInt8)code;
+ }
+ else
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 2);/* sprite enable */
+ sprite_enable = (M4OSA_UInt8)code;
+ }
+ if ((sprite_enable == 1) || (sprite_enable == 2))
+ /* Sprite static = 1 and Sprite GMC = 2 */
+ {
+ if (sprite_enable != 2)
+ {
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* sprite width */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* sprite height */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* sprite l coordinate */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 13);/* sprite top coordinate */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* Marker bit */
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 6);/* sprite warping points */
+ sprite_warping_points = (M4OSA_UInt8)code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 2);/* sprite warping accuracy */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* sprite brightness change */
+ sprite_brightness_change = (M4OSA_UInt8)code;
+ if (sprite_enable != 2)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* low latency sprite enable */
+ }
+ }
+ if ((vol_verid != 1) && (video_object_layer_shape != 0))
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* sadct disable */
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1); /* not 8 bits */
+ if (code)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 4);/* quant precision */
+ quant_precision = (M4OSA_UInt8)code;
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 4);/* bits per pixel */
+ }
+
+ /* greyscale not supported */
+ if(video_object_layer_shape == 3)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 3); /* nogray quant update + composition method +
+ linear composition */
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* quant type */
+ if (code)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* load intra quant mat */
+ if (code)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+ i = 1;
+ while (i < 64)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0)
+ break;
+ i++;
+ }
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* load non intra quant mat */
+ if (code)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+ i = 1;
+ while (i < 64)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0)
+ break;
+ i++;
+ }
+ }
+ }
+
+ if (vol_verid != 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* quarter sample */
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* complexity estimation disable */
+ complexity_estimation_disable = (M4OSA_UInt8)code;
+ if (!code)
+ {
+ //return M4ERR_NOT_IMPLEMENTED;
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* resync marker disable */
+ pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+ 1);/* data partitionned */
+ pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+ if (code)
+ {
+ /* reversible VLC */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+ }
+
+ if (vol_verid != 1)
+ {
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* newpred */
+ if (code)
+ {
+ //return M4ERR_PARAMETER;
+ }
+ /* reduced resolution vop enable */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ reduced_resolution_vop_enable = (M4OSA_UInt8)code;
+ }
+
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* scalability */
+ scalability = (M4OSA_UInt8)code;
+ if (code)
+ {
+ /* hierarchy type */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ b_hierarchy_type = (M4OSA_UInt8)code;
+ /* ref layer id */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+ /* ref sampling direct */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ /* hor sampling factor N */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* hor sampling factor M */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* vert sampling factor N */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* vert sampling factor M */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* enhancement type */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ enhancement_type = (M4OSA_UInt8)code;
+ if ((!b_hierarchy_type) && (video_object_layer_shape == 1))
+ {
+ /* use ref shape */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ /* use ref texture */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ /* shape hor sampling factor N */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* shape hor sampling factor M */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* shape vert sampling factor N */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ /* shape vert sampling factor M */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+ }
+ }
+ break;
+ }
+
+ /* ----- 0xB0 : visual_object_sequence_start_code ----- */
+
+ else if(code == 0xB0)
+ {
+ /* profile_and_level_indication */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+ pDci->uiProfile = (M4OSA_UInt8)code;
+ }
+
+ /* ----- 0xB5 : visual_object_start_code ----- */
+
+ else if(code == 0xB5)
+ {
+ /* is object layer identifier */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+ if (code == 1)
+ {
+ /* visual object verid */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+ vol_verid = (M4OSA_UInt8)code;
+ /* visual object layer priority */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 3);
+ }
+ else
+ {
+ /* Realign on byte */
+ code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 7);
+ vol_verid = 1;
+ }
+ }
+
+ /* ----- end ----- */
+ }
+ else
+ {
+ if ((code >> 2) == 0x20)
+ {
+ /* H263 ...-> wrong*/
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR getAVCProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
+
+ M4OSA_UInt16 index = 28; /* the 29th byte is SPS start */
+ M4OSA_Bool constraintSet3;
+
+ if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
+ return M4ERR_PARAMETER;
+ }
+
+ if ((DSISize <= index) || (pDSI == M4OSA_NULL)) {
+ ALOGE("getAVCProfileAndLevel: DSI is invalid");
+ *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ return M4ERR_PARAMETER;
+ }
+
+ constraintSet3 = (pDSI[index+2] & 0x10);
+ ALOGV("getAVCProfileAndLevel profile_byte %d, level_byte: %d constrain3flag",
+ pDSI[index+1], pDSI[index+3], constraintSet3);
+
+ switch (pDSI[index+1]) {
+ case 66:
+ *pProfile = OMX_VIDEO_AVCProfileBaseline;
+ break;
+ case 77:
+ *pProfile = OMX_VIDEO_AVCProfileMain;
+ break;
+ case 88:
+ *pProfile = OMX_VIDEO_AVCProfileExtended;
+ break;
+ case 100:
+ *pProfile = OMX_VIDEO_AVCProfileHigh;
+ break;
+ case 110:
+ *pProfile = OMX_VIDEO_AVCProfileHigh10;
+ break;
+ case 122:
+ *pProfile = OMX_VIDEO_AVCProfileHigh422;
+ break;
+ case 244:
+ *pProfile = OMX_VIDEO_AVCProfileHigh444;
+ break;
+ default:
+ *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ }
+
+ switch (pDSI[index+3]) {
+ case 10:
+ *pLevel = OMX_VIDEO_AVCLevel1;
+ break;
+ case 11:
+ if (constraintSet3)
+ *pLevel = OMX_VIDEO_AVCLevel1b;
+ else
+ *pLevel = OMX_VIDEO_AVCLevel11;
+ break;
+ case 12:
+ *pLevel = OMX_VIDEO_AVCLevel12;
+ break;
+ case 13:
+ *pLevel = OMX_VIDEO_AVCLevel13;
+ break;
+ case 20:
+ *pLevel = OMX_VIDEO_AVCLevel2;
+ break;
+ case 21:
+ *pLevel = OMX_VIDEO_AVCLevel21;
+ break;
+ case 22:
+ *pLevel = OMX_VIDEO_AVCLevel22;
+ break;
+ case 30:
+ *pLevel = OMX_VIDEO_AVCLevel3;
+ break;
+ case 31:
+ *pLevel = OMX_VIDEO_AVCLevel31;
+ break;
+ case 32:
+ *pLevel = OMX_VIDEO_AVCLevel32;
+ break;
+ case 40:
+ *pLevel = OMX_VIDEO_AVCLevel4;
+ break;
+ case 41:
+ *pLevel = OMX_VIDEO_AVCLevel41;
+ break;
+ case 42:
+ *pLevel = OMX_VIDEO_AVCLevel42;
+ break;
+ case 50:
+ *pLevel = OMX_VIDEO_AVCLevel5;
+ break;
+ case 51:
+ *pLevel = OMX_VIDEO_AVCLevel51;
+ break;
+ default:
+ *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ }
+ ALOGV("getAVCProfileAndLevel profile %ld level %ld", *pProfile, *pLevel);
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR getH263ProfileAndLevel(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
+
+ M4OSA_UInt16 index = 7; /* the 5th and 6th bytes contain the level and profile */
+
+ if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
+ ALOGE("getH263ProfileAndLevel invalid pointer for pProfile");
+ return M4ERR_PARAMETER;
+ }
+
+ if ((DSISize < index) || (pDSI == M4OSA_NULL)) {
+ ALOGE("getH263ProfileAndLevel: DSI is invalid");
+ *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ return M4ERR_PARAMETER;
+ }
+ ALOGV("getH263ProfileAndLevel profile_byte %d, level_byte",
+ pDSI[6], pDSI[5]);
+ /* get the H263 level */
+ switch (pDSI[5]) {
+ case 10:
+ *pLevel = OMX_VIDEO_H263Level10;
+ break;
+ case 20:
+ *pLevel = OMX_VIDEO_H263Level20;
+ break;
+ case 30:
+ *pLevel = OMX_VIDEO_H263Level30;
+ break;
+ case 40:
+ *pLevel = OMX_VIDEO_H263Level40;
+ break;
+ case 45:
+ *pLevel = OMX_VIDEO_H263Level45;
+ break;
+ case 50:
+ *pLevel = OMX_VIDEO_H263Level50;
+ break;
+ case 60:
+ *pLevel = OMX_VIDEO_H263Level60;
+ break;
+ case 70:
+ *pLevel = OMX_VIDEO_H263Level70;
+ break;
+ default:
+ *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ }
+
+ /* get H263 profile */
+ switch (pDSI[6]) {
+ case 0:
+ *pProfile = OMX_VIDEO_H263ProfileBaseline;
+ break;
+ case 1:
+ *pProfile = OMX_VIDEO_H263ProfileH320Coding;
+ break;
+ case 2:
+ *pProfile = OMX_VIDEO_H263ProfileBackwardCompatible;
+ break;
+ case 3:
+ *pProfile = OMX_VIDEO_H263ProfileISWV2;
+ break;
+ case 4:
+ *pProfile = OMX_VIDEO_H263ProfileISWV3;
+ break;
+ case 5:
+ *pProfile = OMX_VIDEO_H263ProfileHighCompression;
+ break;
+ case 6:
+ *pProfile = OMX_VIDEO_H263ProfileInternet;
+ break;
+ case 7:
+ *pProfile = OMX_VIDEO_H263ProfileInterlace;
+ break;
+ case 8:
+ *pProfile = OMX_VIDEO_H263ProfileHighLatency;
+ break;
+ default:
+ *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ }
+ ALOGV("getH263ProfileAndLevel profile %ld level %ld", *pProfile, *pLevel);
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR getMPEG4ProfileAndLevel(M4OSA_UInt8 profileAndLevel,
+ M4OSA_Int32 *pProfile, M4OSA_Int32 *pLevel) {
+
+ M4OSA_UInt32 i = 0;
+ M4OSA_UInt32 length = 0;
+ if ((pProfile == M4OSA_NULL) || (pLevel == M4OSA_NULL)) {
+ return M4ERR_PARAMETER;
+ }
+ ALOGV("getMPEG4ProfileAndLevel profileAndLevel %d", profileAndLevel);
+ length = sizeof(mpeg4ProfileLevelTable) /sizeof(mpeg4ProfileLevelTable[0]);
+ *pProfile = M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ *pLevel = M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ for (i = 0; i < length; i++) {
+ if (mpeg4ProfileLevelTable[i].code == profileAndLevel) {
+ *pProfile = mpeg4ProfileLevelTable[i].profile;
+ *pLevel = mpeg4ProfileLevelTable[i].level;
+ break;
+ }
+ }
+ ALOGV("getMPEG4ProfileAndLevel profile %ld level %ld", *pProfile, *pLevel);
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VD_Tools.c b/libvideoeditor/vss/src/M4VD_Tools.c
new file mode 100755
index 0000000..fdb4b41
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_Tools.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_Tools.h"
+
+/**
+ ************************************************************************
+ * @file M4VD_Tools.c
+ * @brief
+ * @note This file implements helper functions for Bitstream parser
+ ************************************************************************
+ */
+
+M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+ M4OSA_UInt32 nb_bits)
+{
+ M4OSA_UInt32 code;
+ M4OSA_UInt32 i;
+ code = 0;
+ for (i = 0; i < nb_bits; i++)
+ {
+ if (parsingCtxt->stream_index == 8)
+ {
+ //M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
+ // sizeof(unsigned char));
+ parsingCtxt->stream_byte = (unsigned char)(parsingCtxt->in)[0];
+ parsingCtxt->in++;
+ //fread(&stream_byte, sizeof(unsigned char),1,in);
+ parsingCtxt->stream_index = 0;
+ }
+ code = (code << 1);
+ code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
+
+ parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
+ parsingCtxt->stream_index++;
+ }
+
+ return code;
+}
+
+M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+ M4OSA_MemAddr32 dest_bits,
+ M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+ M4OSA_UInt8 i,j;
+ M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
+ M4OSA_UInt32 input = bitsToWrite;
+ input = (input << (32 - nb_bits - offset));
+
+ /* Put destination buffer to 0 */
+ for(j=0;j<3;j++)
+ {
+ for(i=0;i<8;i++)
+ {
+ if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+ {
+ mask |= (temp << ((7*(j+1))-i+j));
+ }
+ }
+ }
+ mask = ~mask;
+ *dest_bits &= mask;
+
+ /* Parse input bits, and fill output buffer */
+ for(j=0;j<3;j++)
+ {
+ for(i=0;i<8;i++)
+ {
+ if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+ {
+ temp = ((input & (0x80000000 >> offset)) >> (31-offset));
+ //*dest_bits |= (temp << (31 - i));
+ *dest_bits |= (temp << ((7*(j+1))-i+j));
+ input = (input << 1);
+ }
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
new file mode 100755
index 0000000..86bb46b
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_RGB565toYUV420.c
+ * @brief Contain video library function
+ * @note Color Conversion Filter
+ * -# Contains the format conversion filters from RGB565 to YUV420
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief transform RGB565 image to a YUV420 image.
+ * @note Convert RGB565 to YUV420,
+ * Loop on each row ( 2 rows by 2 rows )
+ * Loop on each column ( 2 col by 2 col )
+ * Get 4 RGB samples from input data and build 4 output Y samples
+ * and each single U & V data
+ * end loop on col
+ * end loop on row
+ * @param pUserData: (IN) User Specific Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param pPlaneOut: (OUT) Pointer to YUV420 buffer Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+ M4VIFI_UInt32 u32_stride_rgb, u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+ M4VIFI_UInt16 u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+ M4VIFI_UInt8 count_null=0;
+
+ /* Check planes height are appropriate */
+ if( (pPlaneIn->u_height != pPlaneOut[0].u_height) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1)) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check planes width are appropriate */
+ if( (pPlaneIn->u_width != pPlaneOut[0].u_width) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the output data buffers */
+ pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input data buffers */
+ pu8_rgbn_data = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+ /* Get the size of the output image */
+ u32_width = pPlaneOut[0].u_width;
+ u32_height = pPlaneOut[0].u_height;
+
+ /* Set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = pPlaneOut[0].u_stride;
+ u32_stride2_Y = u32_stride_Y << 1;
+ u32_stride_U = pPlaneOut[1].u_stride;
+ u32_stride_V = pPlaneOut[2].u_stride;
+
+ /* Set the size of the memory jumps corresponding to row jump in input plane */
+ u32_stride_rgb = pPlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+ /* Loop on each row of the output image, input coordinates are estimated from output ones */
+ /* Two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* Current Y plane row pointers */
+ pu8_yn = pu8_y_data;
+ /* Next Y plane row pointers */
+ pu8_ys = pu8_yn + u32_stride_Y;
+ /* Current U plane row pointer */
+ pu8_u = pu8_u_data;
+ /* Current V plane row pointer */
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn = pu8_rgbn_data;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Get four RGB 565 samples from input data */
+ u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+ u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+ u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+ u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+ /* Unpack RGB565 to 8bit R, G, B */
+ /* (x,y) */
+ GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
+ /* (x+1,y) */
+ GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
+ /* (x,y+1) */
+ GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
+ /* (x+1,y+1) */
+ GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
+ /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+ if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+ {
+ i32_b00 = 31;
+ i32_r00 = 31;
+ }
+ if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+ {
+ i32_b10 = 31;
+ i32_r10 = 31;
+ }
+ if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+ {
+ i32_b01 = 31;
+ i32_r01 = 31;
+ }
+ if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+ {
+ i32_b11 = 31;
+ i32_r11 = 31;
+ }
+ /* Convert RGB value to YUV */
+ i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+ /* luminance value */
+ i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+ i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+ /* luminance value */
+ i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+ i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+ /* luminance value */
+ i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+ i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+ /* luminance value */
+ i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+ /* Store luminance data */
+ pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+ pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+ pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+ /* Prepare for next column */
+ pu8_rgbn += (CST_RGB_16_SIZE<<1);
+ /* Update current Y plane line pointer*/
+ pu8_yn += 2;
+ /* Update next Y plane line pointer*/
+ pu8_ys += 2;
+ /* Update U plane line pointer*/
+ pu8_u ++;
+ /* Update V plane line pointer*/
+ pu8_v ++;
+ } /* End of horizontal scanning */
+
+ /* Prepare pointers for the next row */
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+/* End of file M4VIFI_RGB565toYUV420.c */
+
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
new file mode 100755
index 0000000..bf0bc06
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
@@ -0,0 +1,4139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_AudioMixing.c
+ * @brief Video Studio Service 3GPP audio mixing implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/* Put the definition of silence frames here */
+#define M4VSS3GPP_SILENCE_FRAMES
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+
+
+#include "VideoEditorResampler.h"
+/**
+ ******************************************************************************
+ * @brief Static functions
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+ M4VSS3GPP_AudioMixingSettings *pSettings );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+ M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+ M4OSA_Int32 storeCount,
+ M4OSA_Int32 thresholdValue );
+/**
+ * Internal warning */
+#define M4VSS3GPP_WAR_END_OF_ADDED_AUDIO M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
+
+/* A define used with SSRC 1.04 and above to avoid taking
+blocks smaller that the minimal block size */
+#define M4VSS_SSRC_MINBLOCKSIZE 600
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
+ * M4VSS3GPP_AudioMixingSettings* pSettings)
+ * @brief Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param pSettings (IN) Pointer to valid audio mixing settings
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_audioMixingInit( M4VSS3GPP_AudioMixingContext *pContext,
+ M4VSS3GPP_AudioMixingSettings *pSettings,
+ M4OSA_FileReadPointer *pFileReadPtrFct,
+ M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+ M4VSS3GPP_InternalAudioMixingContext *pC;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_audioMixingInit called with pContext=0x%x, pSettings=0x%x",
+ pContext, pSettings);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingInit: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingInit: pSettings is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingInit: pFileReadPtrFct is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingInit: pFileWritePtrFct is M4OSA_NULL");
+
+ if( pSettings->uiBeginLoop > pSettings->uiEndLoop )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_audioMixingInit: Begin loop time is higher than end loop time!");
+ return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+ }
+
+ /**
+ * Allocate the VSS audio mixing context and return it to the user */
+ pC = (M4VSS3GPP_InternalAudioMixingContext
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_InternalAudioMixingContext),
+ M4VSS3GPP,(M4OSA_Char *)"M4VSS3GPP_InternalAudioMixingContext");
+ *pContext = pC;
+
+ if( M4OSA_NULL == pC )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_audioMixingInit(): unable to allocate \
+ M4VSS3GPP_InternalAudioMixingContext,returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /* Initialization of context Variables */
+ memset((void *)pC ,0,
+ sizeof(M4VSS3GPP_InternalAudioMixingContext));
+ /**
+ * Copy this setting in context */
+ pC->iAddCts = pSettings->uiAddCts;
+ pC->bRemoveOriginal = pSettings->bRemoveOriginal;
+ pC->b_DuckingNeedeed = pSettings->b_DuckingNeedeed;
+ pC->InDucking_threshold = pSettings->InDucking_threshold;
+ pC->fBTVolLevel = pSettings->fBTVolLevel;
+ pC->fPTVolLevel = pSettings->fPTVolLevel;
+ pC->InDucking_lowVolume = pSettings->InDucking_lowVolume;
+ pC->bDoDucking = M4OSA_FALSE;
+ pC->bLoop = pSettings->bLoop;
+ pC->bNoLooping = M4OSA_FALSE;
+ pC->bjumpflag = M4OSA_TRUE;
+ /**
+ * Init some context variables */
+
+ pC->pInputClipCtxt = M4OSA_NULL;
+ pC->pAddedClipCtxt = M4OSA_NULL;
+ pC->fOrigFactor = 1.0F;
+ pC->fAddedFactor = 0.0F;
+ pC->bSupportSilence = M4OSA_FALSE;
+ pC->bHasAudio = M4OSA_FALSE;
+ pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+
+ /* Init PC->ewc members */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+ pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+ pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ pC->ewc.uiNbChannels = 1;
+ pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+ pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+ pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+ pC->ewc.pSilenceFrameData = M4OSA_NULL;
+ pC->ewc.pEncContext = M4OSA_NULL;
+ pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+ pC->ewc.p3gpWriterContext = M4OSA_NULL;
+ pC->pLVAudioResampler = M4OSA_NULL;
+ /**
+ * Set the OSAL filesystem function set */
+ pC->pOsaFileReadPtr = pFileReadPtrFct;
+ pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+ /**
+ * Ssrc stuff */
+ pC->b_SSRCneeded = M4OSA_FALSE;
+ pC->pSsrcBufferIn = M4OSA_NULL;
+ pC->pSsrcBufferOut = M4OSA_NULL;
+ pC->pTempBuffer = M4OSA_NULL;
+ pC->pPosInTempBuffer = M4OSA_NULL;
+ pC->pPosInSsrcBufferIn = M4OSA_NULL;
+ pC->pPosInSsrcBufferOut = M4OSA_NULL;
+ pC->SsrcScratch = M4OSA_NULL;
+ pC->uiBeginLoop = pSettings->uiBeginLoop;
+ pC->uiEndLoop = pSettings->uiEndLoop;
+
+ /*
+ * Reset pointers for media and codecs interfaces */
+ err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ /* Call the media and codecs subscription module */
+ err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Open input clip, added clip and output clip and proceed with the settings */
+ err = M4VSS3GPP_intAudioMixingOpen(pC, pSettings);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Update main state automaton */
+ if( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream )
+ pC->State = M4VSS3GPP_kAudioMixingState_VIDEO;
+ else
+ pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+
+ pC->ewc.iOutputDuration = (M4OSA_Int32)pC->pInputClipCtxt->pSettings->
+ ClipProperties.uiClipDuration;
+ /*gInputParams.lvBTChannelCount*/
+ pC->pLVAudioResampler = LVAudioResamplerCreate(16,
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels,
+ /* gInputParams.lvOutSampleRate*/(M4OSA_Int32)pSettings->outputASF, 1);
+ if( M4OSA_NULL == pC->pLVAudioResampler )
+ {
+ return M4ERR_ALLOC;
+ }
+ LVAudiosetSampleRate(pC->pLVAudioResampler,
+ /*gInputParams.lvInSampleRate*/
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency);
+
+ LVAudiosetVolume(pC->pLVAudioResampler,
+ (M4OSA_Int16)(0x1000 ),
+ (M4OSA_Int16)(0x1000 ));
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_audioMixingInit(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief Perform one step of audio mixing.
+ * @note
+ * @param pContext (IN) VSS audio mixing context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @param pProgress (OUT) Progress percentage (0 to 100) of the finalization operation
+ * @return M4ERR_STATE: VSS is not in an appropriate state for this function to be called
+ * @return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should now call
+ * M4VSS3GPP_audioMixingCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingStep( M4VSS3GPP_AudioMixingContext pContext,
+ M4OSA_UInt8 *pProgress )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_InternalAudioMixingContext *pC =
+ (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+
+ M4OSA_TRACE3_1("M4VSS3GPP_audioMixingStep called with pContext=0x%x",
+ pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingStep: pContext is M4OSA_NULL");
+
+ /**
+ * State automaton */
+ switch( pC->State )
+ {
+ case M4VSS3GPP_kAudioMixingState_VIDEO:
+ err = M4VSS3GPP_intAudioMixingStepVideo(pC);
+
+ /**
+ * Compute the progress percentage
+ * Note: audio and video CTS are not initialized before
+ * the call of M4VSS3GPP_intAudioMixingStepVideo */
+
+ /* P4ME00003276: First 0-50% segment is dedicated to state :
+ M4VSS3GPP_kAudioMixingState_VIDEO */
+ *pProgress = (M4OSA_UInt8)(50 * (pC->ewc.WriterVideoAU.CTS)
+ / pC->pInputClipCtxt->pVideoStream->
+ m_basicProperties.m_duration);
+
+ /**
+ * There may be no audio track (Remove audio track feature).
+ * In that case we double the current percentage */
+ if( M4SYS_kAudioUnknown == pC->ewc.WriterAudioStream.streamType )
+ {
+ ( *pProgress) <<= 1; /**< x2 */
+ }
+ else if( *pProgress >= 50 )
+ {
+ *pProgress =
+ 49; /**< Video processing is not greater than 50% */
+ }
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ if( pC->bHasAudio )
+ {
+ /**
+ * Video is over, state transition to audio and return OK */
+ if( pC->iAddCts > 0 )
+ pC->State =
+ M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+ else
+ pC->State =
+ M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+ }
+ else
+ {
+ /**
+ * No audio, state transition to FINISHED */
+ pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+ }
+
+ return M4NO_ERROR;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepVideo returns 0x%x!",
+ err);
+ return err;
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+ break;
+
+ case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+ case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+ case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+ if( pC->pAddedClipCtxt->iAudioFrameCts
+ != -pC->pAddedClipCtxt->iSilenceFrameDuration
+ && (pC->pAddedClipCtxt->iAudioFrameCts - 0.5)
+ / pC->pAddedClipCtxt->scale_audio > pC->uiEndLoop
+ && pC->uiEndLoop > 0 )
+ {
+ if(pC->bLoop == M4OSA_FALSE)
+ {
+ pC->bNoLooping = M4OSA_TRUE;
+ }
+ else
+ {
+ M4OSA_Int32 jumpCTS = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+ pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pC->pAddedClipCtxt->
+ pAudioStream, &jumpCTS);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingStep: error when jumping in added audio clip: 0x%x",
+ err);
+ return err;
+ }
+ /**
+ * Use offset to give a correct CTS ... */
+ pC->pAddedClipCtxt->iAoffset =
+ (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+ }
+
+ }
+
+ if( M4OSA_FALSE == pC->bRemoveOriginal )
+ {
+ err = M4VSS3GPP_intAudioMixingStepAudioMix(pC);
+ }
+ else
+ {
+ err = M4VSS3GPP_intAudioMixingStepAudioReplace(pC);
+ }
+
+ /**
+ * Compute the progress percentage
+ * Note: audio and video CTS are not initialized before
+ * the call of M4VSS3GPP_intAudioMixingStepAudio */
+ if( 0 != pC->ewc.iOutputDuration )
+ {
+ /* P4ME00003276: Second 50-100% segment is dedicated to states :
+ M4VSS3GPP_kAudioMixingState_AUDIO... */
+ /* For Audio the progress computation is based on dAto and offset,
+ it is more accurate */
+ *pProgress = (M4OSA_UInt8)(50
+ + (50 * pC->ewc.dATo - pC->pInputClipCtxt->iVoffset)
+ / (pC->ewc.iOutputDuration)); /**< 50 for 100/2 **/
+
+ if( *pProgress >= 100 )
+ {
+ *pProgress =
+ 99; /**< It's not really finished, I prefer to return less than 100% */
+ }
+ }
+ else
+ {
+ *pProgress = 99;
+ }
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * Audio is over, state transition to FINISHED */
+ pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+ return M4NO_ERROR;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepAudio returns 0x%x!",
+ err);
+ return err;
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+ break;
+
+ case M4VSS3GPP_kAudioMixingState_FINISHED:
+
+ /**
+ * Progress percentage: finalize finished -> 100% */
+ *pProgress = 100;
+
+ /**
+ * Audio mixing is finished, return correct warning */
+ return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingStep: State error (0x%x)! Returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief Free all resources used by the VSS audio mixing operation.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) VSS audio mixing context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingCleanUp( M4VSS3GPP_AudioMixingContext pContext )
+{
+ M4VSS3GPP_InternalAudioMixingContext *pC =
+ (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+ M4OSA_ERR err;
+ M4OSA_UInt32 lastCTS;
+
+ M4OSA_TRACE3_1("M4VSS3GPP_audioMixingCleanUp called with pContext=0x%x",
+ pContext);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_audioMixingCleanUp: pContext is M4OSA_NULL");
+
+ /**
+ * Check input parameter */
+ if( M4OSA_NULL == pContext )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_audioMixingCleanUp(): M4VSS3GPP_audioMixingCleanUp: pContext is\
+ M4OSA_NULL, returning M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Close Input 3GPP file */
+ if( M4OSA_NULL != pC->pInputClipCtxt )
+ {
+ M4VSS3GPP_intClipCleanUp(pC->pInputClipCtxt);
+ pC->pInputClipCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Close Added 3GPP file */
+ if( M4OSA_NULL != pC->pAddedClipCtxt )
+ {
+ M4VSS3GPP_intClipCleanUp(pC->pAddedClipCtxt);
+ pC->pAddedClipCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Close the 3GP writer. In normal use case it has already been closed,
+ but not in abort use case */
+ if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+ {
+ /* Update last Video CTS */
+ lastCTS = pC->ewc.iOutputDuration;
+
+ err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+ pC->ewc.p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingCleanUp: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+ err);
+ }
+
+ err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+ pC->ewc.p3gpWriterContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingCleanUp: pWriterGlobalFcts->pFctCloseWrite returns 0x%x!",
+ err);
+ /**< don't return the error because we have other things to free! */
+ }
+ pC->ewc.p3gpWriterContext = M4OSA_NULL;
+ }
+
+ /**
+ * Free the Audio encoder context */
+ if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+ {
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+ pC->ewc.pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+ pC->ewc.pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the ssrc stuff */
+
+ if( M4OSA_NULL != pC->SsrcScratch )
+ {
+ free(pC->SsrcScratch);
+ pC->SsrcScratch = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pSsrcBufferIn )
+ {
+ free(pC->pSsrcBufferIn);
+ pC->pSsrcBufferIn = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pSsrcBufferOut
+ && (M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0) )
+ {
+ free(pC->pSsrcBufferOut);
+ pC->pSsrcBufferOut = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->pTempBuffer )
+ {
+ free(pC->pTempBuffer);
+ pC->pTempBuffer = M4OSA_NULL;
+ }
+
+ if (pC->pLVAudioResampler != M4OSA_NULL)
+ {
+ LVDestroy(pC->pLVAudioResampler);
+ pC->pLVAudioResampler = M4OSA_NULL;
+ }
+
+ /**
+ * Free the shells interfaces */
+ M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+ /**
+ * Free the context */
+ free(pContext);
+ pContext = M4OSA_NULL;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_audioMixingCleanUp(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/********* STATIC FUNCTIONS **********/
+/******************************************************************************/
+/******************************************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingOpen()
+ * @brief Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param pSettings (IN) Pointer to valid audio mixing settings
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+ M4VSS3GPP_AudioMixingSettings *pSettings )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 outputASF = 0;
+ M4ENCODER_Header *encHeader;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_intAudioMixingOpen called with pContext=0x%x, pSettings=0x%x",
+ pC, pSettings);
+
+ /**
+ * The Add Volume must be (strictly) superior than zero */
+ if( pSettings->uiAddVolume == 0 )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): AddVolume is zero,\
+ returning M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+ return M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO;
+ }
+ /*
+ else if(pSettings->uiAddVolume >= 100) // If volume is set to 100, no more original audio ...
+ {
+ pC->bRemoveOriginal = M4OSA_TRUE;
+ }
+ */
+ /**
+ * Build the input clip settings */
+ pC->InputClipSettings.pFile =
+ pSettings->pOriginalClipFile; /**< Input 3GPP file descriptor */
+ pC->InputClipSettings.FileType = M4VIDEOEDITING_kFileType_3GPP;
+ pC->InputClipSettings.uiBeginCutTime =
+ 0; /**< No notion of cut for the audio mixing feature */
+ pC->InputClipSettings.uiEndCutTime =
+ 0; /**< No notion of cut for the audio mixing feature */
+
+ /**
+ * Open the original Audio/Video 3GPP clip */
+ err = M4VSS3GPP_intClipInit(&pC->pInputClipCtxt, pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(orig) returns 0x%x",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipOpen(pC->pInputClipCtxt, &pC->InputClipSettings,
+ M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(orig) returns 0x%x",
+ err);
+ return err;
+ }
+
+ if( M4OSA_NULL == pC->pInputClipCtxt->pAudioStream )
+ {
+ pC->bRemoveOriginal = M4OSA_TRUE;
+ }
+ /**
+ * If there is no video, it's an error */
+ if( M4OSA_NULL == pC->pInputClipCtxt->pVideoStream )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): no video stream in clip,\
+ returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+ return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+ }
+
+ /**
+ * Compute clip properties */
+ err = M4VSS3GPP_intBuildAnalysis(pC->pInputClipCtxt,
+ &pC->pInputClipCtxt->pSettings->ClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(orig) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Build the added clip settings */
+ pC->AddedClipSettings.pFile =
+ pSettings->pAddedAudioTrackFile; /**< Added file descriptor */
+ pC->AddedClipSettings.FileType = pSettings->AddedAudioFileType;
+ pC->AddedClipSettings.uiBeginCutTime =
+ 0; /**< No notion of cut for the audio mixing feature */
+ pC->AddedClipSettings.uiEndCutTime = 0;/**< No notion of cut for the audio mixing feature */
+ pC->AddedClipSettings.ClipProperties.uiNbChannels=
+ pSettings->uiNumChannels;
+ pC->AddedClipSettings.ClipProperties.uiSamplingFrequency= pSettings->uiSamplingFrequency;
+
+ if( M4OSA_NULL != pC->AddedClipSettings.pFile )
+ {
+ /**
+ * Open the added Audio clip */
+ err = M4VSS3GPP_intClipInit(&pC->pAddedClipCtxt, pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(added) returns 0x%x",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipOpen(pC->pAddedClipCtxt, &pC->AddedClipSettings,
+ M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(added) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * If there is no audio, it's an error */
+ if( M4OSA_NULL == pC->pAddedClipCtxt->pAudioStream )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): no audio nor video stream in clip,\
+ returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+ return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+ }
+
+ /**
+ * Compute clip properties */
+ err = M4VSS3GPP_intBuildAnalysis(pC->pAddedClipCtxt,
+ &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(added) returns 0x%x",
+ err);
+ return err;
+ }
+
+ switch( pSettings->outputASF )
+ {
+ case M4VIDEOEDITING_k8000_ASF:
+ outputASF = 8000;
+ break;
+
+ case M4VIDEOEDITING_k16000_ASF:
+ outputASF = 16000;
+ break;
+
+ case M4VIDEOEDITING_k22050_ASF:
+ outputASF = 22050;
+ break;
+
+ case M4VIDEOEDITING_k24000_ASF:
+ outputASF = 24000;
+ break;
+
+ case M4VIDEOEDITING_k32000_ASF:
+ outputASF = 32000;
+ break;
+
+ case M4VIDEOEDITING_k44100_ASF:
+ outputASF = 44100;
+ break;
+
+ case M4VIDEOEDITING_k48000_ASF:
+ outputASF = 48000;
+ break;
+
+ default:
+ M4OSA_TRACE1_0("Bad parameter in output ASF ");
+ return M4ERR_PARAMETER;
+ break;
+ }
+
+ if( pC->bRemoveOriginal == M4OSA_TRUE
+ && (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ == M4VIDEOEDITING_kMP3 || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType
+ != pSettings->outputAudioFormat
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency != outputASF
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels
+ != pSettings->outputNBChannels) )
+ {
+
+ if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+ {
+ pSettings->outputASF = M4VIDEOEDITING_k8000_ASF;
+ pSettings->outputNBChannels = 1;
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize = 320;
+ }
+ else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+ {
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize =
+ 2048 * pSettings->outputNBChannels;
+ }
+
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency =
+ outputASF;
+
+ if( outputASF != pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency )
+ {
+ /* We need to call SSRC in order to align ASF and/or nb of channels */
+ /* Moreover, audio encoder may be needed in case of audio replacing... */
+ pC->b_SSRCneeded = M4OSA_TRUE;
+ }
+
+ if( pSettings->outputNBChannels
+ < pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+ {
+ /* Stereo to Mono */
+ pC->ChannelConversion = 1;
+ }
+ else if( pSettings->outputNBChannels
+ > pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+ {
+ /* Mono to Stereo */
+ pC->ChannelConversion = 2;
+ }
+
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels =
+ pSettings->outputNBChannels;
+ }
+
+ /**
+ * Check compatibility chart */
+ err = M4VSS3GPP_intAudioMixingCompatibility(pC,
+ &pC->pInputClipCtxt->pSettings->ClipProperties,
+ &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ M4VSS3GPP_intAudioMixingCompatibility returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check loop parameters */
+ if( pC->uiBeginLoop > pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiClipAudioDuration )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ begin loop time is higher than added clip audio duration");
+ return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+ }
+
+ /**
+ * Ok, let's go with this audio track */
+ pC->bHasAudio = M4OSA_TRUE;
+ }
+ else
+ {
+ /* No added file, force remove original */
+ pC->AddedClipSettings.FileType = M4VIDEOEDITING_kFileType_Unsupported;
+ pC->bRemoveOriginal = M4OSA_TRUE;
+ pC->bHasAudio = M4OSA_FALSE;
+ }
+
+ /**
+ * Copy the video properties of the input clip to the output properties */
+ pC->ewc.uiVideoBitrate =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+ pC->ewc.uiVideoWidth =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoWidth;
+ pC->ewc.uiVideoHeight =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoHeight;
+ pC->ewc.uiVideoTimeScale =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoTimeScale;
+ pC->ewc.bVideoDataPartitioning =
+ pC->pInputClipCtxt->pSettings->ClipProperties.bMPEG4dataPartition;
+ pC->ewc.outputVideoProfile =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoProfile;
+ pC->ewc.outputVideoLevel =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoLevel;
+ switch( pC->pInputClipCtxt->pSettings->ClipProperties.VideoStreamType )
+ {
+ case M4VIDEOEDITING_kH263:
+ pC->ewc.VideoStreamType = M4SYS_kH263;
+ break;
+
+ case M4VIDEOEDITING_kMPEG4:
+ pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+ break;
+
+ case M4VIDEOEDITING_kH264:
+ pC->ewc.VideoStreamType = M4SYS_kH264;
+ break;
+
+ default:
+ pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+ break;
+ }
+
+ /* Add a link to video dsi */
+ if( M4SYS_kH264 == pC->ewc.VideoStreamType )
+ {
+
+ /* For H.264 encoder case
+ * Fetch the DSI from the shell video encoder, and feed it to the writer */
+
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen: get DSI for H264 stream");
+
+ if( M4OSA_NULL == pC->ewc.pEncContext )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL");
+ err = M4VSS3GPP_intAudioMixingCreateVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen:\
+ M4VSS3GPP_intAudioMixingCreateVideoEncoder returned error 0x%x",
+ err);
+ }
+ }
+
+ if( M4OSA_NULL != pC->ewc.pEncContext )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+ pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+ (M4OSA_DataOption) &encHeader);
+
+ if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen: failed to get the encoder header (err 0x%x)",
+ err);
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intAudioMixingOpen: encHeader->pBuf=0x%x, size=0x%x",
+ encHeader->pBuf, encHeader->Size);
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen: send DSI for H264 stream to 3GP writer");
+
+ /**
+ * Allocate and copy the new DSI */
+ pC->ewc.pVideoOutputDsi =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(encHeader->Size, M4VSS3GPP,
+ (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+ if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+ memcpy((void *)pC->ewc.pVideoOutputDsi, (void *)encHeader->pBuf,
+ encHeader->Size);
+ }
+
+ err = M4VSS3GPP_intAudioMixingDestroyVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen:\
+ M4VSS3GPP_intAudioMixingDestroyVideoEncoder returned error 0x%x",
+ err);
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL, cannot get the DSI");
+ }
+ }
+ else
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingOpen: input clip video stream type = 0x%x",
+ pC->ewc.VideoStreamType);
+ pC->ewc.uiVideoOutputDsiSize =
+ (M4OSA_UInt16)pC->pInputClipCtxt->pVideoStream->
+ m_basicProperties.m_decoderSpecificInfoSize;
+ pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo;
+ }
+
+ /**
+ * Copy the audio properties of the added clip to the output properties */
+ if( pC->bHasAudio )
+ {
+ if( pC->bRemoveOriginal == M4OSA_TRUE )
+ {
+ pC->ewc.uiNbChannels =
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ pC->ewc.uiAudioBitrate =
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+ pC->ewc.uiSamplingFrequency = pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency;
+ pC->ewc.uiSilencePcmSize =
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+ pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+ /* if output settings are differents from added clip settings,
+ we need to reencode BGM */
+ if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ != pSettings->outputAudioFormat
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency != outputASF
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels
+ != pSettings->outputNBChannels
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+ {
+ /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+ if( pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo != M4OSA_NULL )
+ {
+
+ /*
+ free(pC->pAddedClipCtxt->pAudioStream->\
+ m_basicProperties.m_pDecoderSpecificInfo);
+ */
+ pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_decoderSpecificInfoSize = 0;
+ pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ pC->ewc.uiNbChannels =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency;
+ pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+ if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAMR;
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+ pC->ewc.uiAudioBitrate = 12200;
+ pC->ewc.uiSamplingFrequency = 8000;
+ pC->ewc.uiSilencePcmSize = 320;
+ pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+ }
+ else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+ if( pSettings->outputAudioBitrate
+ == M4VIDEOEDITING_kUndefinedBitrate )
+ {
+ switch( pC->ewc.uiSamplingFrequency )
+ {
+ case 16000:
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k24_KBPS;
+ break;
+
+ case 22050:
+ case 24000:
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k32_KBPS;
+ break;
+
+ case 32000:
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k48_KBPS;
+ break;
+
+ case 44100:
+ case 48000:
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+
+ default:
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+ }
+
+ if( pC->ewc.uiNbChannels == 2 )
+ {
+ /* Output bitrate have to be doubled */
+ pC->ewc.uiAudioBitrate += pC->ewc.uiAudioBitrate;
+ }
+ }
+ else
+ {
+ pC->ewc.uiAudioBitrate = pSettings->outputAudioBitrate;
+ }
+
+ if( pC->ewc.uiNbChannels == 1 )
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ }
+ else
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ }
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ }
+ }
+ else
+ {
+ switch( pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pC->ewc.AudioStreamType = M4SYS_kAMR;
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+ if( pC->ewc.uiNbChannels == 1 )
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ }
+ else
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ }
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ pC->ewc.AudioStreamType = M4SYS_kEVRC;
+ pC->ewc.pSilenceFrameData = M4OSA_NULL;
+ pC->ewc.uiSilenceFrameSize = 0;
+ pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+ (makes it easier to factorize amr and evrc code) */
+ break;
+
+ case M4VIDEOEDITING_kPCM:
+ /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+ pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_decoderSpecificInfoSize = 0;
+ pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo =
+ M4OSA_NULL;
+
+ if( pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency == 8000 )
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAMR;
+ pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+ *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+ pC->ewc.uiAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+ }
+ else if( pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency == 16000 )
+ {
+ if( pC->ewc.uiNbChannels == 1 )
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+ pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+ *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k32_KBPS;
+ }
+ else
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+ pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+ *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ pC->ewc.uiAudioBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ }
+ }
+ else
+ {
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ }
+ break;
+
+ default:
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ break;
+ }
+ }
+
+ /* Add a link to audio dsi */
+ pC->ewc.uiAudioOutputDsiSize =
+ (M4OSA_UInt16)pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_decoderSpecificInfoSize;
+ pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pAddedClipCtxt->pAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo;
+ }
+ else
+ {
+ pC->ewc.uiNbChannels =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ pC->ewc.uiAudioBitrate =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+ pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency;
+ pC->ewc.uiSilencePcmSize =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+ pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+ switch( pC->pInputClipCtxt->pSettings->
+ ClipProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pC->ewc.AudioStreamType = M4SYS_kAMR;
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+ if( pC->ewc.uiNbChannels == 1 )
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ }
+ else
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ }
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ break;
+
+ default:
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen: No audio track in input file.");
+ return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+ break;
+ }
+
+ /* Add a link to audio dsi */
+ pC->ewc.uiAudioOutputDsiSize =
+ (M4OSA_UInt16)pC->pInputClipCtxt->pAudioStream->
+ m_basicProperties.m_decoderSpecificInfoSize;
+ pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pAudioStream->
+ m_basicProperties.m_pDecoderSpecificInfo;
+ }
+ }
+
+ /**
+ * Copy common 'silence frame stuff' to ClipContext */
+ pC->pInputClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+ pC->pInputClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+ pC->pInputClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+ pC->pInputClipCtxt->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+ pC->pInputClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+ pC->pInputClipCtxt->iAudioFrameCts =
+ -pC->pInputClipCtxt->iSilenceFrameDuration; /* Reset time */
+
+ /**
+ * Copy common 'silence frame stuff' to ClipContext */
+ if( pC->bHasAudio )
+ {
+ pC->pAddedClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+ pC->pAddedClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+ pC->pAddedClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+ pC->pAddedClipCtxt->iSilenceFrameDuration =
+ pC->ewc.iSilenceFrameDuration;
+ pC->pAddedClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+ pC->pAddedClipCtxt->iAudioFrameCts =
+ -pC->pAddedClipCtxt->iSilenceFrameDuration; /* Reset time */
+ }
+
+ /**
+ * Check AddCts is lower than original clip duration */
+ if( ( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream)
+ && (pC->iAddCts > (M4OSA_Int32)pC->pInputClipCtxt->pVideoStream->
+ m_basicProperties.m_duration) )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): uiAddCts is larger than video duration,\
+ returning M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+ return M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION;
+ }
+
+ /**
+ * If the audio tracks are not compatible, replace input track by silence */
+ if( M4OSA_FALSE == pC->pInputClipCtxt->pSettings->
+ ClipProperties.bAudioIsCompatibleWithMasterClip )
+ {
+ M4VSS3GPP_intClipDeleteAudioTrack(pC->pInputClipCtxt);
+ }
+
+ /**
+ * Check if audio mixing is required */
+ if( ( ( pC->bHasAudio) && (M4OSA_FALSE
+ == pC->pAddedClipCtxt->pSettings->ClipProperties.bAudioIsEditable))
+ || (M4OSA_TRUE == pC->bRemoveOriginal) ) /*||
+ (pSettings->uiAddVolume >= 100)) */
+ {
+ pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+ }
+ else
+ {
+ pC->bAudioMixingIsNeeded = M4OSA_TRUE;
+ }
+
+ /**
+ * Check if output audio can support silence frames
+ Trick i use bAudioIsCompatibleWithMasterClip filed to store that */
+ if( pC->bHasAudio )
+ {
+ pC->bSupportSilence = pC->pAddedClipCtxt->pSettings->
+ ClipProperties.bAudioIsCompatibleWithMasterClip;
+
+ if( M4OSA_FALSE == pC->bSupportSilence )
+ {
+ if( pC->iAddCts > 0 )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ iAddCts should be set to 0 with this audio track !");
+ return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+ }
+
+ if( 0 < pC->uiEndLoop )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ uiEndLoop should be set to 0 with this audio track !");
+ return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+ }
+ }
+ }
+ if( pC->b_DuckingNeedeed == M4OSA_FALSE)
+ {
+ /**
+ * Compute the factor to apply to sample to do the mixing */
+ pC->fAddedFactor = 0.50F;
+ pC->fOrigFactor = 0.50F;
+ }
+
+
+ /**
+ * Check if SSRC is needed */
+ if( M4OSA_TRUE == pC->b_SSRCneeded )
+ {
+ M4OSA_UInt32 numerator, denominator, ratio, ratioBuffer;
+
+ /**
+ * Init the SSRC module */
+ SSRC_ReturnStatus_en
+ ReturnStatus; /* Function return status */
+ LVM_INT16 NrSamplesMin =
+ 0; /* Minimal number of samples on the input or on the output */
+ LVM_INT32
+ ScratchSize; /* The size of the scratch memory */
+ LVM_INT16
+ *pInputInScratch; /* Pointer to input in the scratch buffer */
+ LVM_INT16
+ *
+ pOutputInScratch; /* Pointer to the output in the scratch buffer */
+ SSRC_Params_t ssrcParams; /* Memory for init parameters */
+
+ switch( pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency )
+ {
+ case 8000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_8000;
+ break;
+
+ case 11025:
+ ssrcParams.SSRC_Fs_In = LVM_FS_11025;
+ break;
+
+ case 12000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_12000;
+ break;
+
+ case 16000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_16000;
+ break;
+
+ case 22050:
+ ssrcParams.SSRC_Fs_In = LVM_FS_22050;
+ break;
+
+ case 24000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_24000;
+ break;
+
+ case 32000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_32000;
+ break;
+
+ case 44100:
+ ssrcParams.SSRC_Fs_In = LVM_FS_44100;
+ break;
+
+ case 48000:
+ ssrcParams.SSRC_Fs_In = LVM_FS_48000;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen: invalid added clip sampling frequency (%d Hz),\
+ returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM",
+ pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency);
+ return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+ }
+
+ if( 1 == pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+ {
+ ssrcParams.SSRC_NrOfChannels = LVM_MONO;
+ }
+ else
+ {
+ ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
+ }
+
+ switch( pC->ewc.uiSamplingFrequency )
+ {
+ case 8000:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+ break;
+
+ case 16000:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
+ break;
+
+ case 22050:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
+ break;
+
+ case 24000:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
+ break;
+
+ case 32000:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
+ break;
+
+ case 44100:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
+ break;
+
+ case 48000:
+ ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen: invalid output sampling frequency (%d Hz),\
+ returning M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED",
+ pC->ewc.uiSamplingFrequency);
+ return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+ break;
+ }
+ ReturnStatus = 0;
+
+ switch (ssrcParams.SSRC_Fs_In){
+ case LVM_FS_8000:
+ ssrcParams.NrSamplesIn = 320;
+ break;
+ case LVM_FS_11025:
+ ssrcParams.NrSamplesIn =441;
+ break;
+ case LVM_FS_12000:
+ ssrcParams.NrSamplesIn = 480;
+ break;
+ case LVM_FS_16000:
+ ssrcParams.NrSamplesIn = 640;
+ break;
+ case LVM_FS_22050:
+ ssrcParams.NrSamplesIn = 882;
+ break;
+ case LVM_FS_24000:
+ ssrcParams.NrSamplesIn = 960;
+ break;
+ case LVM_FS_32000:
+ ssrcParams.NrSamplesIn = 1280;
+ break;
+ case LVM_FS_44100:
+ ssrcParams.NrSamplesIn = 1764;
+ break;
+ case LVM_FS_48000:
+ ssrcParams.NrSamplesIn = 1920;
+ break;
+ default:
+ ReturnStatus = -1;
+ break;
+ }
+
+ switch (ssrcParams.SSRC_Fs_Out){
+ case LVM_FS_8000:
+ ssrcParams.NrSamplesOut= 320;
+ break;
+ case LVM_FS_11025:
+ ssrcParams.NrSamplesOut =441;
+ break;
+ case LVM_FS_12000:
+ ssrcParams.NrSamplesOut= 480;
+ break;
+ case LVM_FS_16000:
+ ssrcParams.NrSamplesOut= 640;
+ break;
+ case LVM_FS_22050:
+ ssrcParams.NrSamplesOut= 882;
+ break;
+ case LVM_FS_24000:
+ ssrcParams.NrSamplesOut= 960;
+ break;
+ case LVM_FS_32000:
+ ssrcParams.NrSamplesOut = 1280;
+ break;
+ case LVM_FS_44100:
+ ssrcParams.NrSamplesOut= 1764;
+ break;
+ case LVM_FS_48000:
+ ssrcParams.NrSamplesOut = 1920;
+ break;
+ default:
+ ReturnStatus = -1;
+ break;
+ }
+ if( ReturnStatus != SSRC_OK )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen:\
+ Error code %d returned by the SSRC_GetNrSamples function",
+ ReturnStatus);
+ return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+ }
+
+ NrSamplesMin =
+ (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
+ ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
+
+ while( NrSamplesMin < M4VSS_SSRC_MINBLOCKSIZE )
+ { /* Don't take blocks smaller that the minimal block size */
+ ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
+ ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
+ NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
+ }
+ pC->iSsrcNbSamplIn = (LVM_INT16)(
+ ssrcParams.
+ NrSamplesIn); /* multiplication by NrOfChannels is done below */
+ pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
+
+ numerator =
+ pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+ * pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ denominator =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+ * pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+
+ if( numerator % denominator == 0 )
+ {
+ ratioBuffer = (M4OSA_UInt32)(numerator / denominator);
+ }
+ else
+ {
+ ratioBuffer = (M4OSA_UInt32)(numerator / denominator) + 1;
+ }
+
+ ratio =
+ (M4OSA_UInt32)(( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+ * ratioBuffer) / (pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels));
+
+ if( ratio == 0 )
+ {
+ /* It means that the input size of SSRC bufferIn is bigger than the asked buffer */
+ pC->minimumBufferIn = pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels;
+ }
+ else
+ {
+ ratio++; /* We use the immediate superior integer */
+ pC->minimumBufferIn = ratio * (pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ }
+
+ /**
+ * Allocate buffer for the input of the SSRC */
+ pC->pSsrcBufferIn =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->minimumBufferIn
+ + pC->pAddedClipCtxt->
+ AudioDecBufferOut.
+ m_bufferSize,
+ M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+ if( M4OSA_NULL == pC->pSsrcBufferIn )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+ /**
+ * Allocate buffer for the output of the SSRC */
+ /* The "3" value below should be optimized ... one day ... */
+ pC->pSsrcBufferOut =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(3 * pC->iSsrcNbSamplOut * sizeof(short)
+ * pC->ewc.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+ if( M4OSA_NULL == pC->pSsrcBufferOut )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+
+ /**
+ * Allocate temporary buffer needed in case of channel conversion */
+ if( pC->ChannelConversion > 0 )
+ {
+ /* The "3" value below should be optimized ... one day ... */
+ pC->pTempBuffer =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(3 * pC->iSsrcNbSamplOut
+ * sizeof(short) * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+ if( M4OSA_NULL == pC->pTempBuffer )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ unable to allocate pTempBuffer, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInTempBuffer = pC->pTempBuffer;
+ }
+ }
+ else if( pC->ChannelConversion > 0 )
+ {
+ pC->minimumBufferIn =
+ pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+
+ /**
+ * Allocate buffer for the input of the SSRC */
+ pC->pSsrcBufferIn =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->minimumBufferIn
+ + pC->pAddedClipCtxt->
+ AudioDecBufferOut.
+ m_bufferSize,
+ M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+ if( M4OSA_NULL == pC->pSsrcBufferIn )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): \
+ unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+ /**
+ * Allocate buffer for the output of the SSRC */
+ /* The "3" value below should be optimized ... one day ... */
+ pC->pSsrcBufferOut = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize,
+ M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+ if( M4OSA_NULL == pC->pSsrcBufferOut )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen():\
+ unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+ }
+ else if( (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3)||
+ (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM))
+ {
+ M4OSA_UInt32 minbuffer = 0;
+
+ if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+ {
+ pC->minimumBufferIn = 2048 * pC->ewc.uiNbChannels;
+ minbuffer = pC->minimumBufferIn;
+ }
+ else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+ {
+ pC->minimumBufferIn = 320;
+
+ if( pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize > 320 )
+ {
+ minbuffer = pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ else
+ {
+ minbuffer = pC->minimumBufferIn; /* Not really possible ...*/
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0("Bad output audio format, in case of MP3 replacing");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Allocate buffer for the input of the SSRC */
+ pC->pSsrcBufferIn =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(2 * minbuffer, M4VSS3GPP,
+ (M4OSA_Char *)"pSsrcBufferIn");
+
+ if( M4OSA_NULL == pC->pSsrcBufferIn )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingOpen(): unable to allocate pSsrcBufferIn,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+ pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+ pC->pSsrcBufferOut = pC->pSsrcBufferIn;
+ }
+
+ /**
+ * Check if audio encoder is needed to do audio mixing or audio resampling */
+ if( M4OSA_TRUE == pC->bAudioMixingIsNeeded || M4VIDEOEDITING_kPCM
+ == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ || M4VIDEOEDITING_kMP3
+ == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ != pSettings->outputAudioFormat
+ || pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+ != outputASF
+ || pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels
+ != pSettings->outputNBChannels )
+ {
+ /**
+ * Init the audio encoder */
+ err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+ pC->ewc.uiAudioBitrate);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreateAudioEncoder() returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* In case of PCM, MP3 or audio replace with reencoding, use encoder DSI */
+ if( pC->ewc.uiAudioOutputDsiSize == 0 && (M4VIDEOEDITING_kPCM
+ == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ || M4VIDEOEDITING_kMP3 == pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType
+ != pSettings->outputAudioFormat
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiSamplingFrequency != outputASF
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels
+ != pSettings->outputNBChannels) )
+ {
+ pC->ewc.uiAudioOutputDsiSize =
+ (M4OSA_UInt16)pC->ewc.pAudioEncDSI.infoSize;
+ pC->ewc.pAudioOutputDsi = pC->ewc.pAudioEncDSI.pInfo;
+ }
+ }
+
+ /**
+ * Init the output 3GPP file */
+ /*11/12/2008 CR3283 add the max output file size for the MMS use case in VideoArtist*/
+ err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+ pC->pOsaFileWritPtr, pSettings->pOutputClipFile,
+ pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreate3GPPOutputFile() returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence()
+ * @brief Write an audio silence frame into the writer
+ * @note Mainly used when padding with silence
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingWriteSilence:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!", err);
+ return err;
+ }
+
+ M4OSA_TRACE2_0("A #### silence AU");
+
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+
+ pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+ pC->ewc.WriterAudioAU.CTS =
+ (M4OSA_Time)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ M4OSA_TRACE2_2("B ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.dATo), pC->ewc.WriterAudioAU.size);
+
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingWriteSilence:\
+ pWriterDataFcts->pProcessAU(silence) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Perform one step of video.
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt16 offset;
+
+ M4OSA_TRACE2_3(" VIDEO step : dVTo = %f state = %d offset = %ld",
+ pC->ewc.dOutputVidCts, pC->State, pC->pInputClipCtxt->iVoffset);
+
+ /**
+ * Read the input video AU */
+ err = pC->pInputClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pC->pInputClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pC->pInputClipCtxt->pVideoStream,
+ &pC->pInputClipCtxt->VideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingStepVideo(): m_pFctGetNextAu(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE2_3("C .... read : cts = %.0f + %ld [ 0x%x ]",
+ pC->pInputClipCtxt->VideoAU.m_CTS, pC->pInputClipCtxt->iVoffset,
+ pC->pInputClipCtxt->VideoAU.m_size);
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ offset = 0;
+ /* for h.264 stream do not read the 1st 4 bytes as they are header indicators */
+ if( pC->pInputClipCtxt->pVideoStream->m_basicProperties.m_streamType
+ == M4DA_StreamTypeVideoMpeg4Avc )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepVideo(): input stream type H264");
+ offset = 4;
+ }
+ pC->pInputClipCtxt->VideoAU.m_size -= offset;
+ /**
+ * Check that the video AU is not larger than expected */
+ if( pC->pInputClipCtxt->VideoAU.m_size > pC->ewc.uiVideoMaxAuSize )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intAudioMixingStepVideo: AU size greater than MaxAuSize (%d>%d)!\
+ returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+ pC->pInputClipCtxt->VideoAU.m_size, pC->ewc.uiVideoMaxAuSize);
+ return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+ }
+
+ /**
+ * Copy the input AU payload to the output AU */
+ memcpy((void *)pC->ewc.WriterVideoAU.dataAddress,
+ (void *)(pC->pInputClipCtxt->VideoAU.m_dataAddress + offset),
+ (pC->pInputClipCtxt->VideoAU.m_size));
+
+ /**
+ * Copy the input AU parameters to the output AU */
+ pC->ewc.WriterVideoAU.size = pC->pInputClipCtxt->VideoAU.m_size;
+ pC->ewc.WriterVideoAU.CTS =
+ (M4OSA_UInt32)(pC->pInputClipCtxt->VideoAU.m_CTS + 0.5);
+ pC->ewc.WriterVideoAU.attribute = pC->pInputClipCtxt->VideoAU.m_attribute;
+
+ /**
+ * Write the AU */
+ M4OSA_TRACE2_2("D ---- write : cts = %lu [ 0x%x ]",
+ pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingStepVideo(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Perform one step of audio.
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_3(" AUDIO mix : dATo = %f state = %d offset = %ld",
+ pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+ switch( pC->State )
+ {
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+ {
+ err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ M4VSS3GPP_intAudioMixingCopyOrig(1) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Check if we reached the AddCts */
+ if( pC->ewc.dATo >= pC->iAddCts )
+ {
+ /**
+ * First segment is over, state transition to second and return OK */
+ pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+ /* Transition from reading state to encoding state */
+ err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error so the step function will be called again */
+ pC->pAddedClipCtxt->iAoffset =
+ (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR (1->2)");
+
+ return M4NO_ERROR;
+ }
+ }
+ break;
+
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+ {
+ if( M4OSA_TRUE == pC->bAudioMixingIsNeeded ) /**< Mix */
+ {
+ /**
+ * Read the added audio AU */
+ if( pC->ChannelConversion > 0 || pC->b_SSRCneeded == M4OSA_TRUE
+ || pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+ {
+ /* In case of sampling freq conversion and/or channel conversion,
+ the read next AU will be called by the
+ M4VSS3GPP_intAudioMixingDoMixing function */
+ }
+ else
+ {
+ err =
+ M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+ M4OSA_TRACE2_3("E .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pAddedClipCtxt->iAudioFrameCts
+ / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->iAoffset
+ / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->uiAudioFrameSize);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * Decide what to do when audio is over */
+ if( pC->uiEndLoop > 0 )
+ {
+ /**
+ * Jump at the Begin loop time */
+ M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err = pC->pAddedClipCtxt->ShellAPI.m_pReader->
+ m_pFctJump(
+ pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler
+ *)pC->pAddedClipCtxt->pAudioStream,
+ &time);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ m_pReader->m_pFctJump(audio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else
+ {
+ /* Transition from encoding state to reading state */
+ err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ pre-encode fails err = 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Second segment is over, state transition to third and
+ return OK */
+ pC->State =
+ M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+ /**
+ * Return with no error so the step function will be
+ called again */
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ returning M4NO_ERROR (2->3) a");
+ return M4NO_ERROR;
+ }
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ m_pFctGetNextAu(audio) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Read the original audio AU */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+ M4OSA_TRACE2_3("F .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pInputClipCtxt->iAudioFrameCts
+ / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->iAoffset
+ / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->uiAudioFrameSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ m_pFctGetNextAu(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ if( pC->ChannelConversion == 0
+ && pC->b_SSRCneeded == M4OSA_FALSE
+ && pC->pAddedClipCtxt->pSettings->
+ ClipProperties.AudioStreamType != M4VIDEOEDITING_kMP3 )
+ {
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Perform the audio mixing */
+ err = M4VSS3GPP_intAudioMixingDoMixing(pC);
+
+ if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+ {
+ return M4NO_ERROR;
+ }
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ M4VSS3GPP_intAudioMixingDoMixing returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ else /**< No mix, just copy added audio */
+ {
+ err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * Decide what to do when audio is over */
+ if( pC->uiEndLoop > 0 )
+ {
+ /**
+ * Jump at the Begin loop time */
+ M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err =
+ pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+ pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler
+ *)pC->pAddedClipCtxt->pAudioStream,
+ &time);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ m_pReader->m_pFctJump(audio returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * 'BZZZ' bug fix:
+ * add a silence frame */
+ err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error so the step function will be called again to
+ read audio data */
+ pC->pAddedClipCtxt->iAoffset =
+ (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio
+ + 0.5);
+
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ returning M4NO_ERROR (loop)");
+ return M4NO_ERROR;
+ }
+ else
+ {
+ /* Transition to begin cut */
+ err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ pre-encode fails err = 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Second segment is over, state transition to third */
+ pC->State =
+ M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+ /**
+ * Return with no error so the step function will be called again */
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ returning M4NO_ERROR (2->3) b");
+ return M4NO_ERROR;
+ }
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Check if we reached the end of the video */
+ if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix(): Video duration reached,\
+ returning M4WAR_NO_MORE_AU");
+ return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+ }
+ }
+ break;
+
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+ {
+ err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ M4VSS3GPP_intAudioMixingCopyOrig(3) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Check if we reached the end of the video */
+ if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ Video duration reached, returning M4WAR_NO_MORE_AU");
+ return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Perform one step of audio.
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ M4OSA_TRACE2_3(" AUDIO repl : dATo = %f state = %d offset = %ld",
+ pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+ switch( pC->State )
+ {
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+ {
+ /**
+ * Replace the SID (silence) payload in the writer AU */
+ err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check if we reached the AddCts */
+ if( pC->ewc.dATo >= pC->iAddCts )
+ {
+ /**
+ * First segment is over, state transition to second and return OK */
+ pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+ /**
+ * Return with no error so the step function will be called again */
+ pC->pAddedClipCtxt->iAoffset =
+ (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ M4OSA_TRACE2_0("M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ returning M4NO_ERROR (1->2)");
+ return M4NO_ERROR;
+ }
+ }
+ break;
+
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+ {
+ err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * Decide what to do when audio is over */
+
+ if( pC->uiEndLoop > 0 )
+ {
+ /**
+ * Jump at the Begin loop time */
+ M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+ pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler
+ *)pC->pAddedClipCtxt->pAudioStream, &time);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ m_pReader->m_pFctJump(audio returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * 'BZZZ' bug fix:
+ * add a silence frame */
+ err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error so the step function will be called again to
+ read audio data */
+ pC->pAddedClipCtxt->iAoffset =
+ (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ returning M4NO_ERROR (loop)");
+
+ return M4NO_ERROR;
+ }
+ else if( M4OSA_TRUE == pC->bSupportSilence )
+ {
+ /**
+ * Second segment is over, state transition to third and return OK */
+ pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+ /**
+ * Return with no error so the step function will be called again */
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ returning M4NO_ERROR (2->3)");
+ return M4NO_ERROR;
+ }
+ else
+ {
+ /**
+ * The third segment (silence) is only done if supported.
+ * In other case, we finish here. */
+ pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+
+ /**
+ * Return with no error so the step function will be called again */
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ returning M4NO_ERROR (2->F)");
+ return M4NO_ERROR;
+ }
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check if we reached the end of the clip */
+ if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace(): Clip duration reached,\
+ returning M4WAR_NO_MORE_AU");
+ return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+ }
+ }
+ break;
+
+ /**********************************************************/
+ case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+ {
+ /**
+ * Replace the SID (silence) payload in the writer AU */
+ err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix():\
+ M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check if we reached the end of the video */
+ if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+ Video duration reached, returning M4WAR_NO_MORE_AU");
+ return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingStepAudioReplace(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Read one AU from the original audio file and write it to the output
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Read the input original audio AU */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+ M4OSA_TRACE2_3("G .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pInputClipCtxt->iAudioFrameCts / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->iAoffset / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->uiAudioFrameSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingCopyOrig(): m_pFctGetNextAu(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Copy the input AU properties to the output AU */
+ pC->ewc.WriterAudioAU.size = pC->pInputClipCtxt->uiAudioFrameSize;
+ pC->ewc.WriterAudioAU.CTS =
+ pC->pInputClipCtxt->iAudioFrameCts + pC->pInputClipCtxt->iAoffset;
+
+ /**
+ * Copy the AU itself */
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->pInputClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+ /**
+ * Write the mixed AU */
+ M4OSA_TRACE2_2("H ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Increment the audio CTS for the next step */
+ pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyOrig(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Read one AU from the added audio file and write it to the output
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ if(pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 ||
+ pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM ||
+ pC->b_SSRCneeded == M4OSA_TRUE ||
+ pC->ChannelConversion > 0)
+ {
+ M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+ M4ENCODER_AudioBuffer
+ pEncOutBuffer; /**< Encoder output buffer for api */
+ M4OSA_Time
+ frameTimeDelta; /**< Duration of the encoded (then written) data */
+ M4OSA_MemAddr8 tempPosBuffer;
+
+ err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+ if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+ {
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingCopyAdded:\
+ M4VSS3GPP_intAudioMixingConvert end of added file");
+ return M4NO_ERROR;
+ }
+ else if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingCopyAdded:\
+ M4VSS3GPP_intAudioMixingConvert returned 0x%x", err);
+ return err;
+ }
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] = pC->pSsrcBufferOut;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("K **** blend AUs");
+ /**
+ * Encode the PCM audio */
+
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Set AU cts and size */
+ pC->ewc.WriterAudioAU.size =
+ pEncOutBuffer.
+ pTableBufferSize[0]; /**< Get the size of encoded data */
+ pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+ /* Update decoded buffer here */
+ if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+ {
+ tempPosBuffer = pC->pSsrcBufferOut
+ + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ memmove((void *)pC->pSsrcBufferOut, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferOut - tempPosBuffer);
+ pC->pPosInSsrcBufferOut -=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ else
+ {
+ tempPosBuffer = pC->pSsrcBufferIn + pC->minimumBufferIn;
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -= pC->minimumBufferIn;
+ }
+
+ /**
+ * Write the mixed AU */
+ M4OSA_TRACE2_2("J ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ err =
+ pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCopyAdded:\
+ pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Increment the audio CTS for the next step */
+ pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+ }
+ else
+ {
+ /**
+ * Read the added audio AU */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+ M4OSA_TRACE2_3("I .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pAddedClipCtxt->iAudioFrameCts
+ / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->uiAudioFrameSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingCopyAdded(): m_pFctGetNextAu(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCopyAdded:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Copy the input AU properties to the output AU */
+
+ /** THE CHECK BELOW IS ADDED TO PREVENT ISSUES LINKED TO PRE-ALLOCATED MAX AU SIZE
+ max AU size is set based on M4VSS3GPP_AUDIO_MAX_AU_SIZE defined in file
+ M4VSS3GPP_InternalConfig.h, If this error occurs increase the limit set in this file
+ */
+ if( pC->pAddedClipCtxt->uiAudioFrameSize > pC->ewc.WriterAudioAU.size )
+ {
+ M4OSA_TRACE1_2(
+ "ERROR: audio AU size (%d) to copy larger than allocated one (%d) => abort",
+ pC->pAddedClipCtxt->uiAudioFrameSize,
+ pC->ewc.WriterAudioAU.size);
+ M4OSA_TRACE1_0(
+ "PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY");
+ err = M4ERR_UNSUPPORTED_MEDIA_TYPE;
+ return err;
+ }
+ pC->ewc.WriterAudioAU.size = pC->pAddedClipCtxt->uiAudioFrameSize;
+ pC->ewc.WriterAudioAU.CTS =
+ pC->pAddedClipCtxt->iAudioFrameCts + pC->pAddedClipCtxt->iAoffset;
+
+ /**
+ * Copy the AU itself */
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->pAddedClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+ /**
+ * Write the mixed AU */
+ M4OSA_TRACE2_2("J ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ err =
+ pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCopyAdded:\
+ pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Increment the audio CTS for the next step */
+ pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyAdded(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Convert PCM of added track to the right ASF / nb of Channels
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+ int ssrcErr; /**< Error while ssrc processing */
+ M4OSA_UInt32 uiChannelConvertorNbSamples =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short)
+ / pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ M4OSA_MemAddr8 tempPosBuffer;
+
+ M4OSA_UInt32 outFrameCount = uiChannelConvertorNbSamples;
+ /* Do we need to feed SSRC buffer In ? */
+ /**
+ * RC: This is not really optimum (memmove). We should handle this with linked list. */
+ while( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn < (M4OSA_Int32)pC->minimumBufferIn )
+ {
+ /* We need to get more PCM data */
+ if (pC->bNoLooping == M4OSA_TRUE)
+ {
+ err = M4WAR_NO_MORE_AU;
+ }
+ else
+ {
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+ }
+ if(pC->bjumpflag)
+ {
+ /**
+ * Jump at the Begin loop time */
+ M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err =
+ pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump\
+ (pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler*)pC->pAddedClipCtxt->pAudioStream, &time);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingConvert():\
+ m_pReader->m_pFctJump(audio returns 0x%x", err);
+ return err;
+ }
+ pC->bjumpflag = M4OSA_FALSE;
+ }
+ M4OSA_TRACE2_3("E .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pAddedClipCtxt->iAudioFrameCts / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+ pC->pAddedClipCtxt->uiAudioFrameSize);
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ if(pC->bNoLooping == M4OSA_TRUE)
+ {
+ pC->uiEndLoop =0; /* Value 0 means no looping is required */
+ }
+ /**
+ * Decide what to do when audio is over */
+ if( pC->uiEndLoop > 0 )
+ {
+ /**
+ * Jump at the Begin loop time */
+ M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+ err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+ pC->pAddedClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pC->pAddedClipCtxt->
+ pAudioStream, &time);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingConvert():\
+ m_pReader->m_pFctJump(audio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else
+ {
+ /* Transition from encoding state to reading state */
+ err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Second segment is over, state transition to third and return OK */
+ pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+ /**
+ * Return with no error so the step function will be called again */
+ M4OSA_TRACE2_0(
+ "M4VSS3GPP_intAudioMixingConvert():\
+ returning M4VSS3GPP_WAR_END_OF_ADDED_AUDIO (2->3) a");
+ return M4VSS3GPP_WAR_END_OF_ADDED_AUDIO;
+ }
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingConvert(): m_pFctGetNextAu(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+ err);
+ return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+ }
+
+ /* Copy decoded data into SSRC buffer in */
+ memcpy((void *)pC->pPosInSsrcBufferIn,
+ (void *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress,
+ pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize);
+ /* Update position pointer into SSRC buffer In */
+
+ pC->pPosInSsrcBufferIn +=
+ pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+
+ /* Do the resampling / channel conversion if needed (=feed buffer out) */
+ if( pC->b_SSRCneeded == M4OSA_TRUE )
+ {
+ pC->ChannelConversion = 0;
+ if( pC->ChannelConversion > 0 )
+ {
+ while( pC->pPosInTempBuffer - pC->pTempBuffer
+ < (M4OSA_Int32)(pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+ *pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels)
+ / pC->ChannelConversion )
+ /* We use ChannelConversion variable because in case 2, we need twice less data */
+ {
+ ssrcErr = 0;
+ memset((void *)pC->pPosInTempBuffer,0,
+ (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels));
+
+ LVAudioresample_LowQuality((short*)pC->pPosInTempBuffer,
+ (short*)pC->pSsrcBufferIn,
+ pC->iSsrcNbSamplOut,
+ pC->pLVAudioResampler);
+ if( 0 != ssrcErr )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+ ssrcErr);
+ return ssrcErr;
+ }
+
+ pC->pPosInTempBuffer += pC->iSsrcNbSamplOut * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels;
+
+ /* Update SSRC bufferIn */
+ tempPosBuffer =
+ pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels;
+ }
+ }
+ else
+ {
+ while( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+ < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ ssrcErr = 0;
+ memset((void *)pC->pPosInSsrcBufferOut,0,
+ (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels));
+
+ LVAudioresample_LowQuality((short*)pC->pPosInSsrcBufferOut,
+ (short*)pC->pSsrcBufferIn,
+ pC->iSsrcNbSamplOut,
+ pC->pLVAudioResampler);
+ if( 0 != ssrcErr )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+ ssrcErr);
+ return ssrcErr;
+ }
+ pC->pPosInSsrcBufferOut +=
+ pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels;
+
+ /* Update SSRC bufferIn */
+ tempPosBuffer =
+ pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels;
+ }
+ }
+
+ /* Convert Stereo<->Mono */
+ switch( pC->ChannelConversion )
+ {
+ case 0: /* No channel conversion */
+ break;
+
+ case 1: /* stereo to mono */
+ if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+ < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ From2iToMono_16((short *)pC->pTempBuffer,
+ (short *)pC->pSsrcBufferOut,
+ (short)(uiChannelConvertorNbSamples));
+ /* Update pTempBuffer */
+ tempPosBuffer = pC->pTempBuffer
+ + (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.
+ uiNbChannels); /* Buffer is in bytes */
+ memmove((void *)pC->pTempBuffer, (void *)tempPosBuffer,
+ pC->pPosInTempBuffer - tempPosBuffer);
+ pC->pPosInTempBuffer -=
+ (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ pC->pPosInSsrcBufferOut +=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ break;
+
+ case 2: /* mono to stereo */
+ if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+ < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ MonoTo2I_16((short *)pC->pTempBuffer,
+ (short *)pC->pSsrcBufferOut,
+ (short)uiChannelConvertorNbSamples);
+ tempPosBuffer = pC->pTempBuffer
+ + (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ memmove((void *)pC->pTempBuffer, (void *)tempPosBuffer,
+ pC->pPosInTempBuffer - tempPosBuffer);
+ pC->pPosInTempBuffer -=
+ (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ pC->pPosInSsrcBufferOut +=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ break;
+ }
+ }
+ else if( pC->ChannelConversion > 0 )
+ {
+ //M4OSA_UInt32 uiChannelConvertorNbSamples =
+ // pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short) /
+ // pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+ /* Convert Stereo<->Mono */
+ switch( pC->ChannelConversion )
+ {
+ case 0: /* No channel conversion */
+ break;
+
+ case 1: /* stereo to mono */
+ if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+ < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ From2iToMono_16((short *)pC->pSsrcBufferIn,
+ (short *)pC->pSsrcBufferOut,
+ (short)(uiChannelConvertorNbSamples));
+ /* Update pTempBuffer */
+ tempPosBuffer = pC->pSsrcBufferIn
+ + (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.
+ uiNbChannels); /* Buffer is in bytes */
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -=
+ (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ pC->pPosInSsrcBufferOut +=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ break;
+
+ case 2: /* mono to stereo */
+ if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+ < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ MonoTo2I_16((short *)pC->pSsrcBufferIn,
+ (short *)pC->pSsrcBufferOut,
+ (short)uiChannelConvertorNbSamples);
+ tempPosBuffer = pC->pSsrcBufferIn
+ + (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -=
+ (uiChannelConvertorNbSamples * sizeof(short)
+ * pC->pAddedClipCtxt->pSettings->
+ ClipProperties.uiNbChannels);
+ pC->pPosInSsrcBufferOut +=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ break;
+ }
+ }
+ else
+ {
+ /* No channel conversion nor sampl. freq. conversion needed, just buffer management */
+ pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_Int32 M4VSS3GPP_getDecibelSound( M4OSA_UInt32 value )
+ {
+ int dbSound = 1;
+
+ if( value == 0 )
+ return 0;
+
+ if( value > 0x4000 && value <= 0x8000 ) // 32768
+ dbSound = 90;
+
+ else if( value > 0x2000 && value <= 0x4000 ) // 16384
+ dbSound = 84;
+
+ else if( value > 0x1000 && value <= 0x2000 ) // 8192
+ dbSound = 78;
+
+ else if( value > 0x0800 && value <= 0x1000 ) // 4028
+ dbSound = 72;
+
+ else if( value > 0x0400 && value <= 0x0800 ) // 2048
+ dbSound = 66;
+
+ else if( value > 0x0200 && value <= 0x0400 ) // 1024
+ dbSound = 60;
+
+ else if( value > 0x0100 && value <= 0x0200 ) // 512
+ dbSound = 54;
+
+ else if( value > 0x0080 && value <= 0x0100 ) // 256
+ dbSound = 48;
+
+ else if( value > 0x0040 && value <= 0x0080 ) // 128
+ dbSound = 42;
+
+ else if( value > 0x0020 && value <= 0x0040 ) // 64
+ dbSound = 36;
+
+ else if( value > 0x0010 && value <= 0x0020 ) // 32
+ dbSound = 30;
+
+ else if( value > 0x0008 && value <= 0x0010 ) //16
+ dbSound = 24;
+
+ else if( value > 0x0007 && value <= 0x0008 ) //8
+ dbSound = 24;
+
+ else if( value > 0x0003 && value <= 0x0007 ) // 4
+ dbSound = 18;
+
+ else if( value > 0x0001 && value <= 0x0003 ) //2
+ dbSound = 12;
+
+ else if( value > 0x000 && value <= 0x0001 ) // 1
+ dbSound = 6;
+
+ else
+ dbSound = 0;
+
+ return dbSound;
+ }
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Mix the current audio AUs (decoder, mix, encode)
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_Int16 *pPCMdata1;
+ M4OSA_Int16 *pPCMdata2;
+ M4OSA_UInt32 uiPCMsize;
+
+ M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+ M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+ M4OSA_Time
+ frameTimeDelta; /**< Duration of the encoded (then written) data */
+ M4OSA_MemAddr8 tempPosBuffer;
+ /* ducking variable */
+ M4OSA_UInt16 loopIndex = 0;
+ M4OSA_Int16 *pPCM16Sample = M4OSA_NULL;
+ M4OSA_Int32 peakDbValue = 0;
+ M4OSA_Int32 previousDbValue = 0;
+ M4OSA_UInt32 i;
+
+ /**
+ * Decode original audio track AU */
+
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pInputClipCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame(orig) returns 0x%x",
+ err);
+ return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+ }
+
+ if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0
+ || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ == M4VIDEOEDITING_kMP3 )
+ {
+ err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+ if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+ {
+ return err;
+ }
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing: M4VSS3GPP_intAudioMixingConvert returned 0x%x",
+ err);
+ return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+ }
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingStepAudioMix:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ pPCMdata2 = (M4OSA_Int16 *)pC->pSsrcBufferOut;
+ }
+ else
+ {
+ /**
+ * Decode added audio track AU */
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+ err);
+ return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+ }
+
+ /**
+ * Check both clips decoded the same amount of PCM samples */
+ if( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+ != pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingDoMixing:\
+ both clips AU must have the same decoded PCM size!");
+ return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+ }
+ pPCMdata2 = (M4OSA_Int16 *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress;
+ }
+
+ /**
+ * Mix the two decoded PCM audios */
+ pPCMdata1 =
+ (M4OSA_Int16 *)pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+ uiPCMsize = pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+ / 2; /*buffer size (bytes) to number of sample (int16)*/
+
+ if( pC->b_DuckingNeedeed )
+ {
+ loopIndex = 0;
+ peakDbValue = 0;
+ previousDbValue = peakDbValue;
+
+ pPCM16Sample = (M4OSA_Int16 *)pC->pInputClipCtxt->
+ AudioDecBufferOut.m_dataAddress;
+
+ //Calculate the peak value
+ while( loopIndex
+ < pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+ / sizeof(M4OSA_Int16) )
+ {
+ if( pPCM16Sample[loopIndex] >= 0 )
+ {
+ peakDbValue = previousDbValue > pPCM16Sample[loopIndex]
+ ? previousDbValue : pPCM16Sample[loopIndex];
+ previousDbValue = peakDbValue;
+ }
+ else
+ {
+ peakDbValue = previousDbValue > -pPCM16Sample[loopIndex]
+ ? previousDbValue : -pPCM16Sample[loopIndex];
+ previousDbValue = peakDbValue;
+ }
+ loopIndex++;
+ }
+
+ pC->audioVolumeArray[pC->audVolArrIndex] =
+ M4VSS3GPP_getDecibelSound(peakDbValue);
+
+ /* WINDOW_SIZE is 10 by default and check for threshold is done after 10 cycles */
+ if( pC->audVolArrIndex >= WINDOW_SIZE - 1 )
+ {
+ pC->bDoDucking =
+ M4VSS3GPP_isThresholdBreached((M4OSA_Int32 *)&(pC->audioVolumeArray),
+ pC->audVolArrIndex, pC->InDucking_threshold);
+
+ pC->audVolArrIndex = 0;
+ }
+ else
+ {
+ pC->audVolArrIndex++;
+ }
+
+ /*
+ *Below logic controls the mixing weightage for Background Track and Primary Track
+ *for the duration of window under analysis to give fade-out for Background and fade-in
+ *for primary
+ *
+ *Current fading factor is distributed in equal range over the defined window size.
+ *
+ *For a window size = 25 (500 ms (window under analysis) / 20 ms (sample duration))
+ *
+ */
+
+ if( pC->bDoDucking )
+ {
+ if( pC->duckingFactor
+ > pC->InDucking_lowVolume ) // FADE OUT BG Track
+ {
+ // decrement ducking factor in total steps in factor of low volume steps to reach
+ // low volume level
+ pC->duckingFactor -= (pC->InDucking_lowVolume);
+ }
+ else
+ {
+ pC->duckingFactor = pC->InDucking_lowVolume;
+ }
+ }
+ else
+ {
+ if( pC->duckingFactor < 1.0 ) // FADE IN BG Track
+ {
+ // increment ducking factor in total steps of low volume factor to reach
+ // orig.volume level
+ pC->duckingFactor += (pC->InDucking_lowVolume);
+ }
+ else
+ {
+ pC->duckingFactor = 1.0;
+ }
+ }
+ /* endif - ducking_enable */
+
+ /* Mixing Logic */
+
+ while( uiPCMsize-- > 0 )
+ {
+ M4OSA_Int32 temp;
+
+ /* set vol factor for BT and PT */
+ *pPCMdata2 = (M4OSA_Int16)(*pPCMdata2 * pC->fBTVolLevel);
+
+ *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fPTVolLevel);
+
+ /* mix the two samples */
+
+ *pPCMdata2 = (M4OSA_Int16)(( *pPCMdata2) * (pC->duckingFactor));
+ *pPCMdata1 = (M4OSA_Int16)(*pPCMdata2 / 2 + *pPCMdata1 / 2);
+
+
+ if( *pPCMdata1 < 0 )
+ {
+ temp = -( *pPCMdata1)
+ * 2; // bring to same Amplitude level as it was original
+
+ if( temp > 32767 )
+ {
+ *pPCMdata1 = -32766; // less then max allowed value
+ }
+ else
+ {
+ *pPCMdata1 = (M4OSA_Int16)(-temp);
+ }
+ }
+ else
+ {
+ temp = ( *pPCMdata1)
+ * 2; // bring to same Amplitude level as it was original
+
+ if( temp > 32768 )
+ {
+ *pPCMdata1 = 32767; // less than max allowed value
+ }
+ else
+ {
+ *pPCMdata1 = (M4OSA_Int16)temp;
+ }
+ }
+
+ pPCMdata2++;
+ pPCMdata1++;
+ }
+ }
+ else
+ {
+ while( uiPCMsize-- > 0 )
+ {
+ /* mix the two samples */
+ *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fOrigFactor * pC->fPTVolLevel
+ + *pPCMdata2 * pC->fAddedFactor * pC->fBTVolLevel );
+
+ pPCMdata1++;
+ pPCMdata2++;
+ }
+ }
+
+ /* Update pC->pSsrcBufferOut buffer */
+
+ if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+ {
+ tempPosBuffer = pC->pSsrcBufferOut
+ + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ memmove((void *)pC->pSsrcBufferOut, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferOut - tempPosBuffer);
+ pC->pPosInSsrcBufferOut -=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+ else if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+ == M4VIDEOEDITING_kMP3 )
+ {
+ tempPosBuffer = pC->pSsrcBufferIn
+ + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ memmove((void *)pC->pSsrcBufferIn, (void *)tempPosBuffer,
+ pC->pPosInSsrcBufferIn - tempPosBuffer);
+ pC->pPosInSsrcBufferIn -=
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ pEncInBuffer.pTableBufferSize[0] / sizeof(short) / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("K **** blend AUs");
+
+ /**
+ * Encode the PCM audio */
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(pC->ewc.pAudioEncCtxt,
+ &pEncInBuffer, &pEncOutBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Set AU cts and size */
+ pC->ewc.WriterAudioAU.size =
+ pEncOutBuffer.pTableBufferSize[0]; /**< Get the size of encoded data */
+ pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+ /**
+ * Write the AU */
+ M4OSA_TRACE2_2("L ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDoMixing: pWriterDataFcts->pProcessAU returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Increment the audio CTS for the next step */
+ pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingDoMixing(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief Decode/encode a few AU backward to initiate the encoder for later Mix segment.
+ * @note
+ * @param pC (IN) VSS audio mixing internal context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+
+ M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+ M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+ M4OSA_Time
+ frameTimeDelta = 0; /**< Duration of the encoded (then written) data */
+
+ M4OSA_Int32 iTargetCts, iCurrentCts;
+
+ /**
+ * 'BZZZ' bug fix:
+ * add a silence frame */
+ err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition():\
+ M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+ err);
+ return err;
+ }
+
+ iCurrentCts = (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ /* Do not do pre-encode step if there is no mixing (remove, 100 %, or not editable) */
+ if( M4OSA_FALSE == pC->bAudioMixingIsNeeded )
+ {
+ /**
+ * Advance in the original audio stream to reach the current time
+ * (We don't want iAudioCTS to be modified by the jump function,
+ * so we have to use a local variable). */
+ err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iCurrentCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingTransition:\
+ M4VSS3GPP_intClipJumpAudioAt() returns 0x%x!", err);
+ return err;
+ }
+ }
+ else
+ {
+ /**< don't try to pre-decode if clip is at its beginning... */
+ if( iCurrentCts > 0 )
+ {
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition:\
+ pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Jump a few AUs backward */
+ iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+ * pC->ewc.iSilenceFrameDuration;
+
+ if( iTargetCts < 0 )
+ {
+ iTargetCts = 0; /**< Sanity check */
+ }
+
+ err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iTargetCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+ M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Decode/encode up to the wanted position */
+ while( pC->pInputClipCtxt->iAudioFrameCts < iCurrentCts )
+ {
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+ M4OSA_TRACE2_3("M .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pInputClipCtxt->iAudioFrameCts
+ / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->iAoffset
+ / pC->pInputClipCtxt->scale_audio,
+ pC->pInputClipCtxt->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+ M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+ pC->pInputClipCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("N **** pre-encode");
+
+ /**
+ * Encode the PCM audio */
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Set AU cts and size */
+ pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+ 0]; /**< Get the size of encoded data */
+ pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+ /**
+ * Write the AU */
+ M4OSA_TRACE2_2("O ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingTransition:\
+ pWriterDataFcts->pProcessAU returns 0x%x!", err);
+ return err;
+ }
+
+ /**
+ * Increment the audio CTS for the next step */
+ pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder()
+ * @brief Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err;
+ M4ENCODER_AdvancedParams EncParams;
+
+ /**
+ * Simulate a writer interface with our specific function */
+ pC->ewc.OurWriterDataInterface.pProcessAU =
+ M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+ but it follow the writer interface */
+ pC->ewc.OurWriterDataInterface.pStartAU =
+ M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+ but it follow the writer interface */
+ pC->ewc.OurWriterDataInterface.pWriterContext =
+ (M4WRITER_Context)
+ pC; /**< We give the internal context as writer context */
+
+ /**
+ * Get the encoder interface, if not already done */
+ if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+ {
+ err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+ pC->ewc.VideoStreamType);
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+ err);
+ M4ERR_CHECK_RETURN(err);
+ }
+
+ /**
+ * Set encoder shell parameters according to VSS settings */
+
+ /* Common parameters */
+ EncParams.InputFormat = M4ENCODER_kIYUV420;
+ EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+ EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+ EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+ EncParams.videoProfile = pC->ewc.outputVideoProfile;
+ EncParams.videoLevel = pC->ewc.outputVideoLevel;
+
+ /* No strict regulation in video editor */
+ /* Because of the effects and transitions we should allow more flexibility */
+ /* Also it prevents to drop important frames
+ (with a bad result on sheduling and block effetcs) */
+ EncParams.bInternalRegulation = M4OSA_FALSE;
+ EncParams.FrameRate = M4ENCODER_kVARIABLE_FPS;
+
+ /**
+ * Other encoder settings (defaults) */
+ EncParams.uiHorizontalSearchRange = 0; /* use default */
+ EncParams.uiVerticalSearchRange = 0; /* use default */
+ EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+ EncParams.uiIVopPeriod = 0; /* use default */
+ EncParams.uiMotionEstimationTools = 0; /* M4V_MOTION_EST_TOOLS_ALL */
+ EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */
+ EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */
+ EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+ switch( pC->ewc.VideoStreamType )
+ {
+ case M4SYS_kH263:
+
+ EncParams.Format = M4ENCODER_kH263;
+
+ EncParams.uiStartingQuantizerValue = 10;
+ EncParams.uiRateFactor = 1; /* default */
+
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ break;
+
+ case M4SYS_kMPEG_4:
+
+ EncParams.Format = M4ENCODER_kMPEG4;
+
+ EncParams.uiStartingQuantizerValue = 8;
+ EncParams.uiRateFactor = 1;
+
+ if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+ {
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ }
+ else
+ {
+ EncParams.bErrorResilience = M4OSA_TRUE;
+ EncParams.bDataPartitioning = M4OSA_TRUE;
+ }
+ break;
+
+ case M4SYS_kH264:
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: M4SYS_H264");
+
+ EncParams.Format = M4ENCODER_kH264;
+
+ EncParams.uiStartingQuantizerValue = 10;
+ EncParams.uiRateFactor = 1; /* default */
+
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: Unknown videoStreamType 0x%x",
+ pC->ewc.VideoStreamType);
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+ }
+
+ EncParams.Bitrate =
+ pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctInit");
+ /**
+ * Init the video encoder (advanced settings version of the encoder Open function) */
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+ &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+ pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+ pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctOpen");
+ M4OSA_TRACE1_2("vss: audio mix encoder open profile :%d, level %d",
+ EncParams.videoProfile, EncParams.videoLevel);
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+ &pC->ewc.WriterVideoAU, &EncParams);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctStart");
+
+ if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingCreateVideoEncoder: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder()
+ * @brief Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+ M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ if( M4OSA_NULL != pC->ewc.pEncContext )
+ {
+ if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+ {
+ if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+ err);
+ }
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+ }
+
+ /* Has the encoder actually been opened? Don't close it if that's not the case. */
+ if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+ }
+
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+ /**
+ * Reset variable */
+ pC->ewc.pEncContext = M4OSA_NULL;
+ }
+
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intAudioMixingDestroyVideoEncoder: returning 0x%x", err);
+ return err;
+}
+
+M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+ M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue )
+{
+ M4OSA_Bool result = 0;
+ int i;
+ int finalValue = 0;
+
+ for ( i = 0; i < storeCount; i++ )
+ finalValue += averageValue[i];
+
+ finalValue = finalValue / storeCount;
+
+
+ if( finalValue > thresholdValue )
+ result = M4OSA_TRUE;
+ else
+ result = M4OSA_FALSE;
+
+ return result;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Clip.c b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
new file mode 100755
index 0000000..a79128d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
@@ -0,0 +1,2100 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_Clip.c
+ * @brief Implementation of functions related to input clip management.
+ * @note All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h" /* OSAL debug management */
+
+
+/**
+ * Common headers (for aac) */
+#include "M4_Common.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/* Osal header fileno */
+#include "M4OSA_CharStar.h"
+
+/**
+ ******************************************************************************
+ * define Static function prototypes
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+ M4VSS3GPP_ClipContext *pClipCtxt );
+
+static M4OSA_ERR M4VSS3GPP_intCheckAndGetCodecAacProperties(
+ M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipOpen()
+ * @brief Open a clip. Creates a clip context.
+ * @note
+ * @param hClipCtxt (OUT) Return the internal clip context
+ * @param pClipSettings (IN) Edit settings of this clip. The module will keep a
+ * reference to this pointer
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param bSkipAudioTrack (IN) If true, do not open the audio
+ * @param bFastOpenMode (IN) If true, use the fast mode of the 3gpp reader
+ * (only the first AU is read)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_intClipInit( M4VSS3GPP_ClipContext ** hClipCtxt,
+ M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+ M4VSS3GPP_ClipContext *pClipCtxt;
+ M4OSA_ERR err;
+
+ M4OSA_DEBUG_IF2((M4OSA_NULL == hClipCtxt), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipInit: hClipCtxt is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipInit: pFileReadPtrFct is M4OSA_NULL");
+
+ /**
+ * Allocate the clip context */
+ *hClipCtxt =
+ (M4VSS3GPP_ClipContext *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipContext),
+ M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_ClipContext");
+
+ if( M4OSA_NULL == *hClipCtxt )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intClipInit(): unable to allocate M4VSS3GPP_ClipContext,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ M4OSA_TRACE3_1("M4VSS3GPP_intClipInit(): clipCtxt=0x%x", *hClipCtxt);
+
+
+ /**
+ * Use this shortcut to simplify the code */
+ pClipCtxt = *hClipCtxt;
+
+ /* Inialization of context Variables */
+ memset((void *)pClipCtxt, 0,sizeof(M4VSS3GPP_ClipContext));
+
+ pClipCtxt->pSettings = M4OSA_NULL;
+
+ /**
+ * Init the clip context */
+ pClipCtxt->iVoffset = 0;
+ pClipCtxt->iAoffset = 0;
+ pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_READ;
+ pClipCtxt->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+ pClipCtxt->pReaderContext = M4OSA_NULL;
+ pClipCtxt->pVideoStream = M4OSA_NULL;
+ pClipCtxt->pAudioStream = M4OSA_NULL;
+ pClipCtxt->VideoAU.m_dataAddress = M4OSA_NULL;
+ pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+
+ pClipCtxt->pViDecCtxt = M4OSA_NULL;
+ pClipCtxt->iVideoDecCts = 0;
+ pClipCtxt->iVideoRenderCts = 0;
+ pClipCtxt->lastDecodedPlane = M4OSA_NULL;
+ pClipCtxt->iActualVideoBeginCut = 0;
+ pClipCtxt->iActualAudioBeginCut = 0;
+ pClipCtxt->bVideoAuAvailable = M4OSA_FALSE;
+ pClipCtxt->bFirstAuWritten = M4OSA_FALSE;
+
+ pClipCtxt->bMpeg4GovState = M4OSA_FALSE;
+
+ pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+ pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+ pClipCtxt->iAudioFrameCts = 0;
+ pClipCtxt->pAudioDecCtxt = 0;
+ pClipCtxt->AudioDecBufferOut.m_bufferSize = 0;
+ pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+
+ pClipCtxt->pFileReadPtrFct = pFileReadPtrFct;
+ pClipCtxt->pPlaneYuv = M4OSA_NULL;
+ pClipCtxt->pPlaneYuvWithEffect = M4OSA_NULL;
+ pClipCtxt->m_pPreResizeFrame = M4OSA_NULL;
+ pClipCtxt->bGetYuvDataFromDecoder = M4OSA_TRUE;
+
+ /*
+ * Reset pointers for media and codecs interfaces */
+ err = M4VSS3GPP_clearInterfaceTables(&pClipCtxt->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ /*
+ * Call the media and codecs subscription module */
+ err = M4VSS3GPP_subscribeMediaAndCodec(&pClipCtxt->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ return M4NO_ERROR;
+}
+
+/* Note: if the clip is opened in fast mode, it can only be used for analysis and nothing else. */
+M4OSA_ERR M4VSS3GPP_intClipOpen( M4VSS3GPP_ClipContext *pClipCtxt,
+ M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Bool bSkipAudioTrack,
+ M4OSA_Bool bFastOpenMode, M4OSA_Bool bAvoidOpeningVideoDec )
+{
+ M4OSA_ERR err;
+ M4READER_MediaFamily mediaFamily;
+ M4_StreamHandler *pStreamHandler;
+ M4_StreamHandler dummyStreamHandler;
+ M4OSA_Int32 iDuration;
+ M4OSA_Void *decoderUserData;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ M4DECODER_MPEG4_DecoderConfigInfo dummy;
+ M4DECODER_VideoSize videoSizeFromDSI;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ M4DECODER_OutputFilter FilterOption;
+ M4OSA_Char pTempFile[100];
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipOpen: pClipCtxt is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipOpen: pClipSettings is M4OSA_NULL");
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_intClipOpen: called with pClipCtxt: 0x%x, bAvoidOpeningVideoDec=0x%x",
+ pClipCtxt, bAvoidOpeningVideoDec);
+ /**
+ * Keep a pointer to the clip settings. Remember that we don't possess it! */
+ pClipCtxt->pSettings = pClipSettings;
+ if(M4VIDEOEDITING_kFileType_ARGB8888 == pClipCtxt->pSettings->FileType) {
+ M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen: Image stream; set current vid dec");
+ err = M4VSS3GPP_setCurrentVideoDecoder(
+ &pClipCtxt->ShellAPI, M4DA_StreamTypeVideoARGB8888);
+ M4ERR_CHECK_RETURN(err);
+
+ decoderUserData = M4OSA_NULL;
+
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
+ &pClipCtxt->pViDecCtxt,
+ &dummyStreamHandler,
+ pClipCtxt->ShellAPI.m_pReader,
+ pClipCtxt->ShellAPI.m_pReaderDataIt,
+ &pClipCtxt->VideoAU,
+ decoderUserData);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: \
+ m_pVideoDecoder->m_pFctCreate returns 0x%x", err);
+ return err;
+ }
+ M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: \
+ Vid dec started; pViDecCtxt=0x%x", pClipCtxt->pViDecCtxt);
+
+ return M4NO_ERROR;
+ }
+
+ /**
+ * Get the correct reader interface */
+ err = M4VSS3GPP_setCurrentReader(&pClipCtxt->ShellAPI,
+ pClipCtxt->pSettings->FileType);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Init the 3GPP or MP3 reader */
+ err =
+ pClipCtxt->ShellAPI.m_pReader->m_pFctCreate(&pClipCtxt->pReaderContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctCreate returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Link the reader interface to the reader context (used by the decoder to know the reader) */
+ pClipCtxt->ShellAPI.m_pReaderDataIt->m_readerContext =
+ pClipCtxt->pReaderContext;
+
+ /**
+ * Set the OSAL read function set */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+ pClipCtxt->pReaderContext,
+ M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+ (M4OSA_DataOption)(pClipCtxt->pFileReadPtrFct));
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the fast open mode if asked (3GPP only) */
+ if( M4VIDEOEDITING_kFileType_3GPP == pClipCtxt->pSettings->FileType )
+ {
+ if( M4OSA_TRUE == bFastOpenMode )
+ {
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+ pClipCtxt->pReaderContext,
+ M4READER_3GP_kOptionID_FastOpenMode, M4OSA_NULL);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen():\
+ m_pReader->m_pFctSetOption(FastOpenMode) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Set the skip audio option if asked */
+ if( M4OSA_TRUE == bSkipAudioTrack )
+ {
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+ pClipCtxt->pReaderContext,
+ M4READER_3GP_kOptionID_VideoOnly, M4OSA_NULL);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption(VideoOnly) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ }
+ if(pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+ {
+
+
+
+
+ M4OSA_chrNCopy(pTempFile,pClipSettings->pFile,strlen(pClipSettings->pFile));
+
+
+ switch (pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency)
+ {
+ case 8000:
+ strncat((char *)pTempFile,(const char *)"_8000",6);
+ break;
+ case 11025:
+ strncat((char *)pTempFile,(const char *)"_11025",6);
+ break;
+ case 12000:
+ strncat((char *)pTempFile,(const char *)"_12000",6);
+ break;
+ case 16000:
+ strncat((char *)pTempFile,(const char *)"_16000",6);
+ break;
+ case 22050:
+ strncat((char *)pTempFile,(const char *)"_22050",6);
+ break;
+ case 24000:
+ strncat((char *)pTempFile,(const char *)"_24000",6);
+ break;
+ case 32000:
+ strncat((char *)pTempFile,(const char *)"_32000",6);
+ break;
+ case 44100:
+ strncat((char *)pTempFile,(const char *)"_44100",6);
+ break;
+ case 48000:
+ strncat((char *)pTempFile,(const char *)"_48000",6);
+ break;
+ default:
+ M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG tracksampling \
+ frequency (%d Hz), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY"\
+ ,pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency );
+ return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+ }
+
+
+
+ //M4OSA_chrNCat(pTempFile,
+ // itoa(pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency),5);
+ switch(pClipCtxt->pSettings->ClipProperties.uiNbChannels)
+ {
+ case 1:
+ strncat((char *)pTempFile,(const char *)"_1.pcm",6);
+ break;
+ case 2:
+ strncat((char *)pTempFile,(const char *)"_2.pcm",6);
+ break;
+ default:
+ M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG track no.\
+ of channels (%d ), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS",\
+ pClipCtxt->pSettings->ClipProperties.uiNbChannels);
+ return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+ }
+ //M4OSA_chrNCat(pTempFile,itoa(pClipCtxt->pSettings->ClipProperties.uiNbChannels),1);
+
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext, pTempFile);
+
+ }
+ else
+ {
+ /**
+ * Open the 3GPP/MP3 clip file */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext,
+ pClipSettings->pFile);
+ }
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_UInt32 uiDummy, uiCoreId;
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+ /**
+ * If the error is from the core reader, we change it to a public VSS3GPP error */
+ M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+
+ if( M4MP4_READER == uiCoreId )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intClipOpen(): returning M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+ return M4VSS3GPP_ERR_INVALID_3GPP_FILE;
+ }
+ return err;
+ }
+
+ /**
+ * Get the audio and video streams */
+ while( err == M4NO_ERROR )
+ {
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetNextStream(
+ pClipCtxt->pReaderContext, &mediaFamily, &pStreamHandler);
+
+ /*in case we found a BIFS stream or something else...*/
+ if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+ || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
+ {
+ err = M4NO_ERROR;
+ continue;
+ }
+
+ if( M4NO_ERROR == err ) /**< One stream found */
+ {
+ /**
+ * Found a video stream */
+ if( ( mediaFamily == M4READER_kMediaFamilyVideo)
+ && (M4OSA_NULL == pClipCtxt->pVideoStream) )
+ {
+ if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeVideoMpeg4
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeVideoMpeg4Avc
+ == pStreamHandler->m_streamType) )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipOpen():\
+ Found a H263 or MPEG-4 or H264 video stream in input 3gpp clip; %d",
+ pStreamHandler->m_streamType);
+
+ /**
+ * Keep pointer to the video stream */
+ pClipCtxt->pVideoStream =
+ (M4_VideoStreamHandler *)pStreamHandler;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+ /**
+ * Reset the stream reader */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pVideoStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(video) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pVideoStream,
+ &pClipCtxt->VideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen():\
+ m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else /**< Not H263 or MPEG-4 (H264, etc.) */
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS_editClipOpen():\
+ Found an unsupported video stream (0x%x) in input 3gpp clip",
+ pStreamHandler->m_streamType);
+
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ /**
+ * Found an audio stream */
+ else if( ( mediaFamily == M4READER_kMediaFamilyAudio)
+ && (M4OSA_NULL == pClipCtxt->pAudioStream) )
+ {
+ if( ( M4DA_StreamTypeAudioAmrNarrowBand
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioMp3
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioEvrc
+ == pStreamHandler->m_streamType)
+ || (M4DA_StreamTypeAudioPcm
+ == pStreamHandler->m_streamType) )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipOpen(): \
+ Found an AMR-NB or AAC or MP3 audio stream in input clip; %d",
+ pStreamHandler->m_streamType);
+
+ /**
+ * Keep pointer to the audio stream */
+ pClipCtxt->pAudioStream =
+ (M4_AudioStreamHandler *)pStreamHandler;
+ pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+ /**
+ * Reset the stream reader */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(audio) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Initializes an access Unit */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &pClipCtxt->AudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen():\
+ m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else /**< Not AMR-NB or AAC (AMR-WB...) */
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen():\
+ Found an unsupported audio stream (0x%x) in input 3gpp/mp3 clip",
+ pStreamHandler->m_streamType);
+
+ pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+ }
+ }
+ }
+ else if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctGetNextStream() returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Init Video decoder */
+ if( M4OSA_NULL != pClipCtxt->pVideoStream )
+ {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+ /* If external decoders are possible, it's best to avoid opening the decoder if the clip is only
+ going to be used for analysis, as we're not going to use it for the analysis in the case of a
+ possible external decoder anyway, and either there could be no decoder at this point or the HW
+ decoder could be present, which we want to avoid opening for that. See comments in
+ intBuildAnalysis for more details. */
+
+ /* CHANGEME Temporarily only do this for MPEG4, since for now only MPEG4 external decoders are
+ supported, and the following wouldn't work for H263 so a release where external decoders are
+ possible, but not used, wouldn't work with H263 stuff. */
+
+ if( bAvoidOpeningVideoDec && M4DA_StreamTypeVideoMpeg4
+ == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+ {
+ /* Oops! The mere act of opening the decoder also results in the image size being
+ filled in the video stream! Compensate for this by using ParseVideoDSI to fill
+ this info. */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipOpen: Mpeg4 stream; vid dec not started");
+ err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pClipCtxt->pVideoStream->
+ m_basicProperties.m_decoderSpecificInfoSize,
+ &dummy, &videoSizeFromDSI);
+
+ pClipCtxt->pVideoStream->m_videoWidth = videoSizeFromDSI.m_uiWidth;
+ pClipCtxt->pVideoStream->m_videoHeight =
+ videoSizeFromDSI.m_uiHeight;
+ }
+ else
+ {
+
+#endif
+
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipOpen: Mp4/H263/H264 stream; set current vid dec");
+ err = M4VSS3GPP_setCurrentVideoDecoder(&pClipCtxt->ShellAPI,
+ pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+ M4ERR_CHECK_RETURN(err);
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ decoderUserData =
+ pClipCtxt->ShellAPI.m_pCurrentVideoDecoderUserData;
+
+#else
+
+ decoderUserData = M4OSA_NULL;
+
+#endif
+
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
+ &pClipCtxt->pViDecCtxt,
+ &pClipCtxt->pVideoStream->m_basicProperties,
+ pClipCtxt->ShellAPI.m_pReader,
+ pClipCtxt->ShellAPI.m_pReaderDataIt,
+ &pClipCtxt->VideoAU, decoderUserData);
+
+ if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+ || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+ {
+ /**
+ * Our decoder is not compatible with H263 profile other than 0.
+ * So it returns this internal error code.
+ * We translate it to our own error code */
+ return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctCreate returns 0x%x",
+ err);
+ return err;
+ }
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipOpen: Vid dec started; pViDecCtxt=0x%x",
+ pClipCtxt->pViDecCtxt);
+
+ if( M4DA_StreamTypeVideoMpeg4Avc
+ == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+ {
+ FilterOption.m_pFilterFunction =
+ (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
+ FilterOption.m_pFilterUserData = M4OSA_NULL;
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_OutputFilter,
+ (M4OSA_DataOption) &FilterOption);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption returns 0x%x",
+ err);
+ return err;
+ }
+ else
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption\
+ M4DECODER_kOptionID_OutputFilter OK");
+ }
+ }
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ }
+
+#endif
+
+ }
+
+ /**
+ * Init Audio decoder */
+ if( M4OSA_NULL != pClipCtxt->pAudioStream )
+ {
+ err = M4VSS3GPP_intClipPrepareAudioDecoder(pClipCtxt);
+ M4ERR_CHECK_RETURN(err);
+ M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: Audio dec started; context=0x%x",
+ pClipCtxt->pAudioDecCtxt);
+ }
+ else
+ {
+ pClipCtxt->AudioAU.m_streamID = 0;
+ pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+ pClipCtxt->AudioAU.m_size = 0;
+ pClipCtxt->AudioAU.m_CTS = 0;
+ pClipCtxt->AudioAU.m_DTS = 0;
+ pClipCtxt->AudioAU.m_attribute = 0;
+ pClipCtxt->AudioAU.m_maxsize = 0;
+ pClipCtxt->AudioAU.m_structSize = sizeof(pClipCtxt->AudioAU);
+ }
+
+ /**
+ * Get the duration of the longest stream */
+ if( M4OSA_TRUE == pClipCtxt->pSettings->ClipProperties.bAnalysed )
+ {
+ /* If already calculated set it to previous value */
+ /* Because fast open and full open can return a different value,
+ it can mismatch user settings */
+ /* Video track is more important than audio track (if video track is shorter than
+ audio track, it can led to cut larger than expected) */
+ iDuration = pClipCtxt->pSettings->ClipProperties.uiClipVideoDuration;
+
+ if( iDuration == 0 )
+ {
+ iDuration = pClipCtxt->pSettings->ClipProperties.uiClipDuration;
+ }
+ }
+ else
+ {
+ /* Else compute it from streams */
+ iDuration = 0;
+
+ if( M4OSA_NULL != pClipCtxt->pVideoStream )
+ {
+ iDuration = (M4OSA_Int32)(
+ pClipCtxt->pVideoStream->m_basicProperties.m_duration);
+ }
+
+ if( ( M4OSA_NULL != pClipCtxt->pAudioStream) && ((M4OSA_Int32)(
+ pClipCtxt->pAudioStream->m_basicProperties.m_duration)
+ > iDuration) && iDuration == 0 )
+ {
+ iDuration = (M4OSA_Int32)(
+ pClipCtxt->pAudioStream->m_basicProperties.m_duration);
+ }
+ }
+
+ /**
+ * If end time is not used, we set it to the video track duration */
+ if( 0 == pClipCtxt->pSettings->uiEndCutTime )
+ {
+ pClipCtxt->pSettings->uiEndCutTime = (M4OSA_UInt32)iDuration;
+ }
+
+ pClipCtxt->iEndTime = (M4OSA_Int32)pClipCtxt->pSettings->uiEndCutTime;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ ******************************************************************************
+ */
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ /**
+ * But we don't have to free the audio stream. It will be freed by the reader when closing it*/
+ pClipCtxt->pAudioStream = M4OSA_NULL;
+
+ /**
+ * We will return a constant silence AMR AU.
+ * We set it here once, instead of at each read audio step. */
+ pClipCtxt->pAudioFramePtr = (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+ pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+
+ /**
+ * Free the decoded audio buffer (it needs to be re-allocated to store silence
+ frame eventually)*/
+ if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+ {
+ free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
+ pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+ }
+
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
+ * @brief Jump to the previous RAP and decode up to the current video time
+ * @param pClipCtxt (IN) Internal clip context
+ * @param iCts (IN) Target CTS
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts( M4VSS3GPP_ClipContext *pClipCtxt,
+ M4OSA_Int32 iCts )
+{
+ M4OSA_Int32 iRapCts, iClipCts;
+ M4_MediaTime dDecodeTime;
+ M4OSA_Bool bClipJump = M4OSA_FALSE;
+ M4OSA_ERR err;
+
+ /**
+ * Compute the time in the clip base */
+ iClipCts = iCts - pClipCtxt->iVoffset;
+
+ /**
+ * If we were reading the clip, we must jump to the previous RAP
+ * to decode from that point. */
+ if( M4VSS3GPP_kClipStatus_READ == pClipCtxt->Vstatus )
+ {
+ /**
+ * The decoder must be told to jump */
+ bClipJump = M4OSA_TRUE;
+ pClipCtxt->iVideoDecCts = iClipCts;
+
+ /**
+ * Remember the clip reading state */
+ pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE_UP_TO;
+ }
+
+ /**
+ * If we are in decodeUpTo() process, check if we need to do
+ one more step or if decoding is finished */
+ if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pClipCtxt->Vstatus )
+ {
+ /* Do a step of 500 ms decoding */
+ pClipCtxt->iVideoDecCts += 500;
+
+ if( pClipCtxt->iVideoDecCts > iClipCts )
+ {
+ /* Target time reached, we switch back to DECODE mode */
+ pClipCtxt->iVideoDecCts = iClipCts;
+ pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE;
+ }
+
+ M4OSA_TRACE2_1("c ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+ }
+ else
+ {
+ /* Just decode at current clip cts */
+ pClipCtxt->iVideoDecCts = iClipCts;
+
+ M4OSA_TRACE2_1("d ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+ }
+
+ /**
+ * Decode up to the target */
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f, pClipCtxt=0x%x",
+ dDecodeTime, pClipCtxt);
+
+ dDecodeTime = (M4OSA_Double)pClipCtxt->iVideoDecCts;
+ pClipCtxt->isRenderDup = M4OSA_FALSE;
+ err =
+ pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDecode(pClipCtxt->pViDecCtxt,
+ &dDecodeTime, bClipJump, 0);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+ && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctDecode returns 0x%x!",
+ err);
+ return err;
+ }
+
+ if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+ {
+ pClipCtxt->isRenderDup = M4OSA_TRUE;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeVideoUpToCts: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
+ * @brief Read one AU frame in the clip
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(
+ M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ M4OSA_ERR err;
+
+ /* ------------------------------ */
+ /* ---------- NO AUDIO ---------- */
+ /* ------------------------------ */
+
+ if( M4OSA_NULL == pClipCtxt->pAudioStream )
+ {
+ /* If there is no audio track, we return silence AUs */
+ pClipCtxt->pAudioFramePtr =
+ (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+ pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+ pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+ M4OSA_TRACE2_0("b #### blank track");
+ }
+
+ /* ---------------------------------- */
+ /* ---------- AMR-NB, EVRC ---------- */
+ /* ---------------------------------- */
+
+ else if( ( M4VIDEOEDITING_kAMR_NB
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+ || (M4VIDEOEDITING_kEVRC
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+ {
+ if( M4OSA_FALSE == pClipCtxt->bAudioFrameAvailable )
+ {
+ /**
+ * No AU available, so we must must read one from the original track reader */
+ err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &pClipCtxt->AudioAU);
+
+ if( M4NO_ERROR == err )
+ {
+ /**
+ * Set the current AMR frame position at the beginning of the read AU */
+ pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+
+ /**
+ * Set the AMR frame CTS */
+ pClipCtxt->iAudioFrameCts =
+ (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS
+ * pClipCtxt->scale_audio + 0.5);
+ }
+ else if( ( M4WAR_NO_MORE_AU == err) && (M4VIDEOEDITING_kAMR_NB
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+ {
+ /**
+ * If there is less audio than the stream duration indicated,
+ * we return silence at the end of the stream. */
+ pClipCtxt->pAudioFramePtr =
+ (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+ pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+ pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+ M4OSA_TRACE2_0("a #### silence AU");
+
+ /**
+ * Return with M4WAR_NO_MORE_AU */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: \
+ returning M4WAR_NO_MORE_AU (silence)");
+ return M4WAR_NO_MORE_AU;
+ }
+ else /**< fatal error (or no silence in EVRC) */
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: m_pFctGetNextAu() returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ else /* bAudioFrameAvailable */
+ {
+ /**
+ * Go to the next AMR frame in the AU */
+ pClipCtxt->pAudioFramePtr += pClipCtxt->uiAudioFrameSize;
+
+ /**
+ * Increment CTS: one AMR frame is 20 ms long */
+ pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+ }
+
+ /**
+ * Get the size of the pointed AMR frame */
+ switch( pClipCtxt->pSettings->ClipProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pClipCtxt->uiAudioFrameSize =
+ (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_AMRNB(
+ pClipCtxt->pAudioFramePtr);
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ pClipCtxt->uiAudioFrameSize =
+ (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_EVRC(
+ pClipCtxt->pAudioFramePtr);
+ break;
+ default:
+ break;
+ }
+
+ if( 0 == pClipCtxt->uiAudioFrameSize )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size == 0,\
+ returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+ return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+ }
+ else if( pClipCtxt->uiAudioFrameSize > pClipCtxt->AudioAU.m_size )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size greater than AU size!,\
+ returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+ return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+ }
+
+ /**
+ * Check if the end of the current AU has been reached or not */
+ if( ( pClipCtxt->pAudioFramePtr + pClipCtxt->uiAudioFrameSize)
+ < (pClipCtxt->AudioAU.m_dataAddress + pClipCtxt->AudioAU.m_size) )
+ {
+ pClipCtxt->bAudioFrameAvailable = M4OSA_TRUE;
+ }
+ else
+ {
+ pClipCtxt->bAudioFrameAvailable =
+ M4OSA_FALSE; /**< will be used for next call */
+ }
+ }
+
+ /* ------------------------- */
+ /* ---------- AAC ---------- */
+ /* ------------------------- */
+
+ else if( ( M4VIDEOEDITING_kAAC
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+ || (M4VIDEOEDITING_kAACplus
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+ || (M4VIDEOEDITING_keAACplus
+ == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+ {
+ err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &pClipCtxt->AudioAU);
+
+ if( M4NO_ERROR == err )
+ {
+ pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+ pClipCtxt->uiAudioFrameSize =
+ (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+ pClipCtxt->iAudioFrameCts =
+ (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+ + 0.5);
+
+ /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+ /* (cts is not an integer with frequency 24 kHz for example) */
+ pClipCtxt->iAudioFrameCts = ( ( pClipCtxt->iAudioFrameCts
+ + pClipCtxt->iSilenceFrameDuration / 2)
+ / pClipCtxt->iSilenceFrameDuration)
+ * pClipCtxt->iSilenceFrameDuration;
+ }
+ else if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * If there is less audio than the stream duration indicated,
+ * we return silence at the end of the stream. */
+ pClipCtxt->pAudioFramePtr =
+ (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+ pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+ pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+ M4OSA_TRACE2_0("a #### silence AU");
+
+ /**
+ * Return with M4WAR_NO_MORE_AU */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AAC:\
+ returning M4WAR_NO_MORE_AU (silence)");
+ return M4WAR_NO_MORE_AU;
+ }
+ else /**< fatal error */
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-AAC: m_pFctGetNextAu() returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /* --------------------------------- */
+ /* ---------- MP3, others ---------- */
+ /* --------------------------------- */
+
+ else
+ {
+ err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &pClipCtxt->AudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipReadNextAudioFrame()-MP3: m_pFctGetNextAu() returns 0x%x",
+ err);
+ return err;
+ }
+
+ pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+ pClipCtxt->uiAudioFrameSize = (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+ pClipCtxt->iAudioFrameCts =
+ (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+ + 0.5);
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipReadNextAudioFrame(): returning M4NO_ERROR");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder()
+ * @brief Creates and initialize the audio decoder for the clip.
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+ M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4_StreamType audiotype;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ M4_AACType iAacType = 0;
+
+#endif
+
+ /**
+ * Set the proper audio decoder */
+
+ audiotype = pClipCtxt->pAudioStream->m_basicProperties.m_streamType;
+
+ //EVRC
+ if( M4DA_StreamTypeAudioEvrc
+ != audiotype ) /* decoder not supported yet, but allow to do null encoding */
+
+ err = M4VSS3GPP_setCurrentAudioDecoder(&pClipCtxt->ShellAPI, audiotype);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Creates the audio decoder */
+ if( M4OSA_NULL == pClipCtxt->ShellAPI.m_pAudioDecoder )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intClipPrepareAudioDecoder(): Fails to initiate the audio decoder.");
+ return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+ }
+
+ if( M4OSA_NULL == pClipCtxt->pAudioDecCtxt )
+ {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pClipCtxt->ShellAPI.bAllowFreeingOMXCodecInterface )
+ {
+ if( M4DA_StreamTypeAudioAac == audiotype ) {
+ err = M4VSS3GPP_intCheckAndGetCodecAacProperties(
+ pClipCtxt);
+ } else if (M4DA_StreamTypeAudioPcm != audiotype) {
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+ M4OSA_NULL);
+ } else {
+ err = M4NO_ERROR;
+ }
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder: m_pAudioDecoder->m_pFctCreateAudioDec\
+ returns 0x%x", err);
+ return err;
+ }
+ }
+ else
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder:\
+ Creating external audio decoder of type 0x%x", audiotype);
+ /* External OMX codecs are used*/
+ if( M4DA_StreamTypeAudioAac == audiotype )
+ {
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+ pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+ if( M4NO_ERROR == err )
+ {
+ /* AAC properties*/
+ /*get from Reader; temporary, till Audio decoder shell API
+ available to get the AAC properties*/
+ pClipCtxt->AacProperties.aNumChan =
+ pClipCtxt->pAudioStream->m_nbChannels;
+ pClipCtxt->AacProperties.aSampFreq =
+ pClipCtxt->pAudioStream->m_samplingFrequency;
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->
+ m_pFctGetOptionAudioDec(pClipCtxt->pAudioDecCtxt,
+ M4AD_kOptionID_StreamType,
+ (M4OSA_DataOption) &iAacType);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder:\
+ m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x", err);
+ iAacType = M4_kAAC; //set to default
+ err = M4NO_ERROR;
+ }
+ else {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder: \
+ m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
+ iAacType);
+ }
+ switch( iAacType )
+ {
+ case M4_kAAC:
+ pClipCtxt->AacProperties.aSBRPresent = 0;
+ pClipCtxt->AacProperties.aPSPresent = 0;
+ break;
+
+ case M4_kAACplus:
+ pClipCtxt->AacProperties.aSBRPresent = 1;
+ pClipCtxt->AacProperties.aPSPresent = 0;
+ pClipCtxt->AacProperties.aExtensionSampFreq =
+ pClipCtxt->pAudioStream->m_samplingFrequency;
+ break;
+
+ case M4_keAACplus:
+ pClipCtxt->AacProperties.aSBRPresent = 1;
+ pClipCtxt->AacProperties.aPSPresent = 1;
+ pClipCtxt->AacProperties.aExtensionSampFreq =
+ pClipCtxt->pAudioStream->m_samplingFrequency;
+ break;
+ default:
+ break;
+ }
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_intClipPrepareAudioDecoder: AAC NBChans=%d, SamplFreq=%d",
+ pClipCtxt->AacProperties.aNumChan,
+ pClipCtxt->AacProperties.aSampFreq);
+ }
+ }
+ else
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+ pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder:\
+ m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+#else
+ /* Trick, I use pUserData to retrieve aac properties,
+ waiting for some better implementation... */
+
+ if( M4DA_StreamTypeAudioAac == audiotype )
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt,
+ pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
+ else
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+ M4OSA_NULL /* to be changed with HW interfaces */);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder:\
+ m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+
+#endif
+
+ }
+
+ if( M4DA_StreamTypeAudioAmrNarrowBand == audiotype ) {
+ /* AMR DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else if( M4DA_StreamTypeAudioEvrc == audiotype ) {
+ /* EVRC DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else if( M4DA_StreamTypeAudioMp3 == audiotype ) {
+ /* MP3 DECODER CONFIGURATION */
+
+ /* nothing specific to do */
+ }
+ else if( M4DA_StreamTypeAudioAac == audiotype )
+ {
+ /* AAC DECODER CONFIGURATION */
+
+ /* Decode high quality aac but disable PS and SBR */
+ /* Because we have to mix different kind of AAC so we must take the lowest capability */
+ /* In MCS it was not needed because there is only one stream */
+ M4_AacDecoderConfig AacDecParam;
+
+ AacDecParam.m_AACDecoderProfile = AAC_kAAC;
+ AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
+
+ if( M4ENCODER_kMono == pClipCtxt->pAudioStream->m_nbChannels )
+ {
+ AacDecParam.m_OutputMode = AAC_kMono;
+ }
+ else
+ {
+ AacDecParam.m_OutputMode = AAC_kStereo;
+ }
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt,
+ M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
+ }
+
+ if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec ) {
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_3gpReaderInterface,
+ (M4OSA_DataOption) pClipCtxt->ShellAPI.m_pReaderDataIt);
+
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_AudioAU,
+ (M4OSA_DataOption) &pClipCtxt->AudioAU);
+ }
+
+ if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec )
+ {
+ /* Not implemented in all decoders */
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
+ pClipCtxt->pAudioDecCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipPrepareAudioDecoder:\
+ m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Allocate output buffer for the audio decoder */
+ pClipCtxt->AudioDecBufferOut.m_bufferSize =
+ pClipCtxt->pAudioStream->m_byteFrameLength
+ * pClipCtxt->pAudioStream->m_byteSampleSize
+ * pClipCtxt->pAudioStream->m_nbChannels;
+ pClipCtxt->AudioDecBufferOut.m_dataAddress =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pClipCtxt->AudioDecBufferOut.m_bufferSize
+ * sizeof(M4OSA_Int16),
+ M4VSS3GPP, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
+
+ if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intClipPrepareAudioDecoder():\
+ unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
+ * @brief Decode the current AUDIO frame.
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+ M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Silence mode */
+ if( pClipCtxt->pSilenceFrameData
+ == (M4OSA_UInt8 *)pClipCtxt->pAudioFramePtr )
+ {
+ if( pClipCtxt->AudioDecBufferOut.m_dataAddress == M4OSA_NULL )
+ {
+ /**
+ * Allocate output buffer for the audio decoder */
+ pClipCtxt->AudioDecBufferOut.m_bufferSize =
+ pClipCtxt->uiSilencePcmSize;
+ pClipCtxt->AudioDecBufferOut.m_dataAddress =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
+ pClipCtxt->AudioDecBufferOut.m_bufferSize
+ * sizeof(M4OSA_Int16),
+ M4VSS3GPP,(M4OSA_Char *) "AudioDecBufferOut.m_bufferSize");
+
+ if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+ unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ /* Fill it with 0 (= pcm silence) */
+ memset(pClipCtxt->AudioDecBufferOut.m_dataAddress,0,
+ pClipCtxt->AudioDecBufferOut.m_bufferSize * sizeof(M4OSA_Int16));
+ }
+ else if (pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+ {
+ pClipCtxt->AudioDecBufferIn.m_dataAddress = (M4OSA_MemAddr8) pClipCtxt->pAudioFramePtr;
+ pClipCtxt->AudioDecBufferIn.m_bufferSize = pClipCtxt->uiAudioFrameSize;
+
+ memcpy((void *)pClipCtxt->AudioDecBufferOut.m_dataAddress,
+ (void *)pClipCtxt->AudioDecBufferIn.m_dataAddress, pClipCtxt->AudioDecBufferIn.m_bufferSize);
+ pClipCtxt->AudioDecBufferOut.m_bufferSize = pClipCtxt->AudioDecBufferIn.m_bufferSize;
+ /**
+ * Return with no error */
+
+ M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+ }
+ /**
+ * Standard decoding mode */
+ else
+ {
+ /**
+ * Decode current AMR frame */
+ if ( pClipCtxt->pAudioFramePtr != M4OSA_NULL ) {
+ pClipCtxt->AudioDecBufferIn.m_dataAddress =
+ (M4OSA_MemAddr8)pClipCtxt->pAudioFramePtr;
+ pClipCtxt->AudioDecBufferIn.m_bufferSize =
+ pClipCtxt->uiAudioFrameSize;
+ pClipCtxt->AudioDecBufferIn.m_timeStampUs =
+ (int64_t) (pClipCtxt->iAudioFrameCts * 1000LL);
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
+ pClipCtxt->pAudioDecCtxt,
+ &pClipCtxt->AudioDecBufferIn, &pClipCtxt->AudioDecBufferOut,
+ M4OSA_FALSE);
+ } else {
+ // Pass Null input buffer
+ // Reader invoked from Audio decoder source
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
+ pClipCtxt->pAudioDecCtxt,
+ M4OSA_NULL, &pClipCtxt->AudioDecBufferOut,
+ M4OSA_FALSE);
+ }
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+ m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
+ * @brief Jump in the audio track of the clip.
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @param pJumpCts (IN/OUT) in:target CTS, out: reached CTS
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt( M4VSS3GPP_ClipContext *pClipCtxt,
+ M4OSA_Int32 *pJumpCts )
+{
+ M4OSA_ERR err;
+ M4OSA_Int32 iTargetCts;
+ M4OSA_Int32 iJumpCtsMs;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipJumpAudioAt: pClipCtxt is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pJumpCts), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipJumpAudioAt: pJumpCts is M4OSA_NULL");
+
+ iTargetCts = *pJumpCts;
+
+ /**
+ * If there is no audio stream, we simulate a jump at the target jump CTS */
+ if( M4OSA_NULL == pClipCtxt->pAudioStream )
+ {
+ /**
+ * the target CTS will be reached at next ReadFrame call (thus the -20) */
+ *pJumpCts = iTargetCts - pClipCtxt->iSilenceFrameDuration;
+
+ /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+ /* (cts is not an integer with frequency 24 kHz for example) */
+ *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+ / pClipCtxt->iSilenceFrameDuration)
+ * pClipCtxt->iSilenceFrameDuration;
+ pClipCtxt->iAudioFrameCts =
+ *
+ pJumpCts; /* simulate a read at jump position for later silence AUs */
+ }
+ else
+ {
+ M4OSA_Int32 current_time = 0;
+ M4OSA_Int32 loop_counter = 0;
+
+ if( (M4DA_StreamTypeAudioMp3
+ == pClipCtxt->pAudioStream->m_basicProperties.m_streamType) )
+ {
+ while( ( loop_counter < M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX)
+ && (current_time < iTargetCts) )
+ {
+ err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &pClipCtxt->AudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intClipJumpAudioAt: m_pFctGetNextAu() returns 0x%x",
+ err);
+ return err;
+ }
+
+ current_time = (M4OSA_Int32)pClipCtxt->AudioAU.m_CTS;
+ loop_counter++;
+ }
+
+ /**
+ * The current AU is stored */
+ pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+ pClipCtxt->uiAudioFrameSize =
+ (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+ pClipCtxt->iAudioFrameCts =
+ (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+ + 0.5);
+
+ *pJumpCts = pClipCtxt->iAudioFrameCts;
+ }
+ else
+ {
+ /**
+ * Jump in the audio stream */
+ iJumpCtsMs =
+ (M4OSA_Int32)(*pJumpCts / pClipCtxt->scale_audio + 0.5);
+
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream,
+ &iJumpCtsMs);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipJumpAudioAt(): m_pFctJump() returns 0x%x",
+ err);
+ return err;
+ }
+
+ *pJumpCts =
+ (M4OSA_Int32)(iJumpCtsMs * pClipCtxt->scale_audio + 0.5);
+
+ /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+ /* (cts is not an integer with frequency 24 kHz for example) */
+ *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+ / pClipCtxt->iSilenceFrameDuration)
+ * pClipCtxt->iSilenceFrameDuration;
+ pClipCtxt->iAudioFrameCts = 0; /* No frame read yet */
+
+ /**
+ * To detect some may-be bugs, I prefer to reset all these after a jump */
+ pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+ pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+
+ /**
+ * In AMR, we have to manage multi-framed AUs,
+ but also in AAC the jump can be 1 AU too much backward */
+ if( *pJumpCts < iTargetCts )
+ {
+ /**
+ * Jump doesn't read any AU, we must read at least one */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipJumpAudioAt():\
+ M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Read AU frames as long as we reach the AU before the target CTS
+ * (so the target will be reached when the user call ReadNextAudioFrame). */
+ while( pClipCtxt->iAudioFrameCts
+ < (iTargetCts - pClipCtxt->iSilenceFrameDuration) )
+ {
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipJumpAudioAt():\
+ M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return the CTS that will be reached at next ReadFrame */
+ *pJumpCts = pClipCtxt->iAudioFrameCts
+ + pClipCtxt->iSilenceFrameDuration;
+ }
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intClipJumpAudioAt(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipClose()
+ * @brief Close a clip. Destroy the context.
+ * @note
+ * @param pClipCtxt (IN) Internal clip context
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipClose( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipClose: pClipCtxt is M4OSA_NULL");
+
+ /**
+ * Free the video decoder context */
+ if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+ {
+ pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+ pClipCtxt->pViDecCtxt);
+ pClipCtxt->pViDecCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the audio decoder context */
+ if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+ {
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+ pClipCtxt->pAudioDecCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipClose: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the decoded audio buffer */
+ if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+ {
+ free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
+ pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+ }
+
+ /**
+ * Audio AU is allocated by reader.
+ * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+ * As a consequence, if audio AU is set to 'silent' (static)
+ it can't be free unless it is set to NULL */
+ if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+ == pClipCtxt->AudioAU.m_dataAddress)
+ || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+ == pClipCtxt->AudioAU.m_dataAddress) )
+ {
+ pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pClipCtxt->pReaderContext )
+ {
+ /**
+ * Close the 3GPP or MP3 reader */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+ pClipCtxt->pReaderContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipClose(): m_pReader->m_pFctClose returns 0x%x",
+ err);
+ }
+
+ /**
+ * Destroy the 3GPP or MP3 reader context */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+ pClipCtxt->pReaderContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipClose(): m_pReader->m_pFctDestroy returns 0x%x",
+ err);
+ }
+
+ pClipCtxt->pReaderContext = M4OSA_NULL;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_1("M4VSS3GPP_intClipClose(Ctxt=0x%x): returning M4NO_ERROR",
+ pClipCtxt);
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4VSS3GPP_intClipCleanUp( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+ M4OSA_ERR err = M4NO_ERROR, err2;
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+ "M4VSS3GPP_intClipCleanUp: pClipCtxt is M4OSA_NULL");
+
+ /**
+ * Free the video decoder context */
+ if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+ {
+ pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+ pClipCtxt->pViDecCtxt);
+ pClipCtxt->pViDecCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the audio decoder context */
+ if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+ {
+ err2 = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+ pClipCtxt->pAudioDecCtxt);
+
+ if( M4NO_ERROR != err2 )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipCleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ if( M4NO_ERROR != err )
+ err = err2;
+ }
+
+ pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the decoded audio buffer */
+ if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+ {
+ free(pClipCtxt->AudioDecBufferOut.m_dataAddress);
+ pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+ }
+
+ /**
+ * Audio AU is allocated by reader.
+ * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+ * As a consequence, if audio AU is set to 'silent' (static)
+ it can't be free unless it is set to NULL */
+ if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+ == pClipCtxt->AudioAU.m_dataAddress)
+ || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+ == pClipCtxt->AudioAU.m_dataAddress) )
+ {
+ pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pClipCtxt->pReaderContext )
+ {
+ /**
+ * Close the 3GPP or MP3 reader */
+ err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+ pClipCtxt->pReaderContext);
+
+ if( M4NO_ERROR != err2 )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctClose returns 0x%x",
+ err);
+
+ if( M4NO_ERROR != err )
+ err = err2;
+ }
+
+ /**
+ * Destroy the 3GPP or MP3 reader context */
+ err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+ pClipCtxt->pReaderContext);
+
+ if( M4NO_ERROR != err2 )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctDestroy returns 0x%x",
+ err);
+
+ if( M4NO_ERROR != err )
+ err = err2;
+ }
+
+ pClipCtxt->pReaderContext = M4OSA_NULL;
+ }
+
+ if(pClipCtxt->pPlaneYuv != M4OSA_NULL) {
+ if(pClipCtxt->pPlaneYuv[0].pac_data != M4OSA_NULL) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ pClipCtxt->pPlaneYuv[0].pac_data = M4OSA_NULL;
+ }
+ free(pClipCtxt->pPlaneYuv);
+ pClipCtxt->pPlaneYuv = M4OSA_NULL;
+ }
+
+ if(pClipCtxt->pPlaneYuvWithEffect != M4OSA_NULL) {
+ if(pClipCtxt->pPlaneYuvWithEffect[0].pac_data != M4OSA_NULL) {
+ free(pClipCtxt->pPlaneYuvWithEffect[0].pac_data);
+ pClipCtxt->pPlaneYuvWithEffect[0].pac_data = M4OSA_NULL;
+ }
+ free(pClipCtxt->pPlaneYuvWithEffect);
+ pClipCtxt->pPlaneYuvWithEffect = M4OSA_NULL;
+ }
+ /**
+ * Free the shells interfaces */
+ M4VSS3GPP_unRegisterAllWriters(&pClipCtxt->ShellAPI);
+ M4VSS3GPP_unRegisterAllEncoders(&pClipCtxt->ShellAPI);
+ M4VSS3GPP_unRegisterAllReaders(&pClipCtxt->ShellAPI);
+ M4VSS3GPP_unRegisterAllDecoders(&pClipCtxt->ShellAPI);
+
+ M4OSA_TRACE3_1("M4VSS3GPP_intClipCleanUp: pClipCtxt=0x%x", pClipCtxt);
+ /**
+ * Free the clip context */
+ free(pClipCtxt);
+
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
+ * @brief Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param pAudioFrame (IN) AMRNB frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
+{
+ M4OSA_UInt32 frameSize = 0;
+ M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
+
+ switch( frameType )
+ {
+ case 0:
+ frameSize = 95;
+ break; /* 4750 bps */
+
+ case 1:
+ frameSize = 103;
+ break; /* 5150 bps */
+
+ case 2:
+ frameSize = 118;
+ break; /* 5900 bps */
+
+ case 3:
+ frameSize = 134;
+ break; /* 6700 bps */
+
+ case 4:
+ frameSize = 148;
+ break; /* 7400 bps */
+
+ case 5:
+ frameSize = 159;
+ break; /* 7950 bps */
+
+ case 6:
+ frameSize = 204;
+ break; /* 10200 bps */
+
+ case 7:
+ frameSize = 244;
+ break; /* 12000 bps */
+
+ case 8:
+ frameSize = 39;
+ break; /* SID (Silence) */
+
+ case 15:
+ frameSize = 0;
+ break; /* No data */
+
+ default:
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
+ return 0;
+ }
+
+ return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
+ * @brief Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ * 0 1 2 3
+ * +-+-+-+-+
+ * |fr type| RFC 3558
+ * +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ * The frame type indicates the type of the corresponding codec data
+ * frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value Rate Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ * 0 Blank 0 (0 bit)
+ * 1 1/8 2 (16 bits)
+ * 2 1/4 5 (40 bits; not valid for EVRC)
+ * 3 1/2 10 (80 bits)
+ * 4 1 22 (171 bits; 5 padded at end with zeros)
+ * 5 Erasure 0 (SHOULD NOT be transmitted by sender)
+ *
+ * @param pCpAudioFrame (IN) EVRC frame
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
+{
+ M4OSA_UInt32 frameSize = 0;
+ M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
+
+ switch( frameType )
+ {
+ case 0:
+ frameSize = 0;
+ break; /* blank */
+
+ case 1:
+ frameSize = 16;
+ break; /* 1/8 */
+
+ case 2:
+ frameSize = 40;
+ break; /* 1/4 */
+
+ case 3:
+ frameSize = 80;
+ break; /* 1/2 */
+
+ case 4:
+ frameSize = 171;
+ break; /* 1 */
+
+ case 5:
+ frameSize = 0;
+ break; /* erasure */
+
+ default:
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
+ return 0;
+ }
+
+ return (1 + (( frameSize + 7) / 8));
+}
+
+M4OSA_ERR M4VSS3GPP_intCheckAndGetCodecAacProperties(
+ M4VSS3GPP_ClipContext *pClipCtxt) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4AD_Buffer outputBuffer;
+ uint32_t optionValue =0;
+
+ // Decode first audio frame from clip to get properties from codec
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+ &pClipCtxt->pAudioDecCtxt,
+ pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
+
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_3gpReaderInterface,
+ (M4OSA_DataOption) pClipCtxt->ShellAPI.m_pReaderDataIt);
+
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt, M4AD_kOptionID_AudioAU,
+ (M4OSA_DataOption) &pClipCtxt->AudioAU);
+
+ if( pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL ) {
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
+ pClipCtxt->pAudioDecCtxt);
+ if( M4NO_ERROR != err ) {
+
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckAndGetCodecAacProperties: \
+ m_pFctStartAudioDec returns 0x%x", err);
+ return err;
+ }
+ }
+
+ /**
+ * Allocate output buffer for the audio decoder */
+ outputBuffer.m_bufferSize =
+ pClipCtxt->pAudioStream->m_byteFrameLength
+ * pClipCtxt->pAudioStream->m_byteSampleSize
+ * pClipCtxt->pAudioStream->m_nbChannels;
+
+ if( outputBuffer.m_bufferSize > 0 ) {
+
+ outputBuffer.m_dataAddress =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(outputBuffer.m_bufferSize \
+ *sizeof(short), M4VSS3GPP, (M4OSA_Char *)"outputBuffer.m_bufferSize");
+
+ if( M4OSA_NULL == outputBuffer.m_dataAddress ) {
+
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckAndGetCodecAacProperties():\
+ unable to allocate outputBuffer.m_dataAddress");
+ return M4ERR_ALLOC;
+ }
+ }
+
+ err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
+ pClipCtxt->pAudioDecCtxt, M4OSA_NULL, &outputBuffer, M4OSA_FALSE);
+
+ if ( err == M4WAR_INFO_FORMAT_CHANGE ) {
+
+ // Get the properties from codec node
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt,
+ M4AD_kOptionID_AudioNbChannels, (M4OSA_DataOption) &optionValue);
+
+ pClipCtxt->AacProperties.aNumChan = optionValue;
+ // Reset Reader structure value also
+ pClipCtxt->pAudioStream->m_nbChannels = optionValue;
+
+ pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
+ pClipCtxt->pAudioDecCtxt,
+ M4AD_kOptionID_AudioSampFrequency, (M4OSA_DataOption) &optionValue);
+
+ pClipCtxt->AacProperties.aSampFreq = optionValue;
+ // Reset Reader structure value also
+ pClipCtxt->pAudioStream->m_samplingFrequency = optionValue;
+
+ } else if( err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intCheckAndGetCodecAacProperties:\
+ m_pFctStepAudioDec returns err = 0x%x", err);
+ }
+
+ free(outputBuffer.m_dataAddress);
+
+ // Reset the stream reader
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+ pClipCtxt->pReaderContext,
+ (M4_StreamHandler *)pClipCtxt->pAudioStream);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intCheckAndGetCodecAacProperties\
+ Error in reseting reader: 0x%x", err);
+ }
+
+ return err;
+
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
new file mode 100755
index 0000000..e2c6d7a
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
@@ -0,0 +1,1032 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_ClipAnalysis.c
+ * @brief Implementation of functions related to analysis of input clips
+ * @note All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VD_EXTERNAL_Interface.h"
+
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h" /* OSAL debug management */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
+ * @brief This function allows checking if a clip is compatible with VSS 3GPP editing
+ * @note It also fills a ClipAnalysis structure, which can be used to check if two
+ * clips are compatible
+ * @param pClip (IN) File descriptor of the input 3GPP/MP3 clip file.
+ * @param pClipProperties (IN) Pointer to a valid ClipProperties structure.
+ * @param FileType (IN) Type of the input file (.3gp, .amr, .mp3)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ * @return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
+ * @return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
+ * @return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editAnalyseClip( M4OSA_Void *pClip,
+ M4VIDEOEDITING_FileType FileType,
+ M4VIDEOEDITING_ClipProperties *pClipProperties,
+ M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_ClipContext *pClipContext;
+ M4VSS3GPP_ClipSettings ClipSettings;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_editAnalyseClip called with pClip=0x%x, pClipProperties=0x%x",
+ pClip, pClipProperties);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClip), M4ERR_PARAMETER,
+ "M4VSS3GPP_editAnalyseClip: pClip is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipProperties), M4ERR_PARAMETER,
+ "M4VSS3GPP_editAnalyseClip: pClipProperties is M4OSA_NULL");
+
+ /**
+ * Build dummy clip settings, in order to use the editClipOpen function */
+ ClipSettings.pFile = pClip;
+ ClipSettings.FileType = FileType;
+ ClipSettings.uiBeginCutTime = 0;
+ ClipSettings.uiEndCutTime = 0;
+
+ /* Clip properties not build yet, set at least this flag */
+ ClipSettings.ClipProperties.bAnalysed = M4OSA_FALSE;
+
+ /**
+ * Open the clip in fast open mode */
+ err = M4VSS3GPP_intClipInit(&pClipContext, pFileReadPtrFct);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+ err);
+
+ /**
+ * Free the clip */
+ if( M4OSA_NULL != pClipContext )
+ {
+ M4VSS3GPP_intClipCleanUp(pClipContext);
+ }
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipOpen(pClipContext, &ClipSettings, M4OSA_FALSE,
+ M4OSA_TRUE, M4OSA_TRUE);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
+ err);
+
+ M4VSS3GPP_intClipCleanUp(pClipContext);
+
+ /**
+ * Here it is better to return the Editing specific error code */
+ if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+ || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editAnalyseClip:\
+ M4VSS3GPP_intClipOpen() returns M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+ return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+ }
+ return err;
+ }
+
+ /**
+ * Analyse the clip */
+ if(M4VIDEOEDITING_kFileType_ARGB8888 != pClipContext->pSettings->FileType) {
+ err = M4VSS3GPP_intBuildAnalysis(pClipContext, pClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intBuildAnalysis() returns 0x%x!",
+ err);
+
+ /**
+ * Free the clip */
+ M4VSS3GPP_intClipCleanUp(pClipContext);
+ return err;
+ }
+ }
+ /**
+ * Free the clip */
+ err = M4VSS3GPP_intClipClose(pClipContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editAnalyseClip: M4VSS_intClipClose() returns 0x%x!",
+ err);
+ M4VSS3GPP_intClipCleanUp(pClipContext);
+ return err;
+ }
+
+ M4VSS3GPP_intClipCleanUp(pClipContext);
+
+ /**
+ * Check the clip is compatible with VSS editing */
+ if(M4VIDEOEDITING_kFileType_ARGB8888 != ClipSettings.FileType) {
+ err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editAnalyseClip:\
+ M4VSS3GPP_intCheckClipCompatibleWithVssEditing() returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_editAnalyseClip(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
+ * @brief This function allows checking if two clips are compatible with each other for
+ * VSS 3GPP editing assembly feature.
+ * @note
+ * @param pClip1Properties (IN) Clip analysis of the first clip
+ * @param pClip2Properties (IN) Clip analysis of the second clip
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
+ * @return M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
+ * @return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility( M4VIDEOEDITING_ClipProperties *pClip1Properties,
+ M4VIDEOEDITING_ClipProperties *pClip2Properties )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_ERR video_err = M4NO_ERROR;
+ M4OSA_ERR audio_err = M4NO_ERROR;
+
+ M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+ M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+ M4OSA_TRACE3_2("M4VSS3GPP_editCheckClipCompatibility called with pClip1Analysis=0x%x,\
+ pClip2Analysis=0x%x", pClip1Properties, pClip2Properties);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClip1Properties), M4ERR_PARAMETER,
+ "M4VSS3GPP_editCheckClipCompatibility: pClip1Properties is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClip2Properties), M4ERR_PARAMETER,
+ "M4VSS3GPP_editCheckClipCompatibility: pClip2Properties is M4OSA_NULL");
+
+ if( ( M4VIDEOEDITING_kFileType_MP3 == pClip1Properties->FileType)
+ || (M4VIDEOEDITING_kFileType_AMR == pClip1Properties->FileType) )
+ {
+ if( pClip1Properties != pClip2Properties )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCheckClipCompatibility: MP3 CAN ONLY BE CUT,\
+ returning M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+ return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY;
+ }
+ else
+ {
+ /* We are in VSS Splitter mode */
+ goto audio_analysis;
+ }
+ }
+
+ /********** Audio ************/
+
+audio_analysis:
+ if( M4VIDEOEDITING_kNoneAudio != pClip1Properties->
+ AudioStreamType ) /**< if there is an audio stream */
+ {
+ /**
+ * Check audio format is AAC */
+ switch( pClip1Properties->AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ bClip1IsAAC = M4OSA_TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if( M4VIDEOEDITING_kNoneAudio != pClip2Properties->
+ AudioStreamType ) /**< if there is an audio stream */
+ {
+ /**
+ * Check audio format is AAC */
+ switch( pClip2Properties->AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ bClip2IsAAC = M4OSA_TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /**
+ * If there is no audio, the clips are compatibles ... */
+ if( ( pClip1Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+ && (pClip2Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio) )
+ {
+ /**
+ * Check both clips have same audio stream type
+ * And let_s say AAC, AAC+ and eAAC+ are mixable */
+ if( ( pClip1Properties->AudioStreamType
+ != pClip2Properties->AudioStreamType)
+ && (( M4OSA_FALSE == bClip1IsAAC) || (M4OSA_FALSE == bClip2IsAAC)) )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCheckClipCompatibility:\
+ Clips don't have the same Audio Stream Type");
+
+ audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE;
+ goto analysis_done;
+ }
+
+ /**
+ * Check both clips have same number of channels */
+ if( pClip1Properties->uiNbChannels != pClip2Properties->uiNbChannels )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same Nb of Channels");
+ audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+ goto analysis_done;
+ }
+
+ /**
+ * Check both clips have same sampling frequency */
+ if( pClip1Properties->uiSamplingFrequency
+ != pClip2Properties->uiSamplingFrequency )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCheckClipCompatibility:\
+ Clips don't have the same Sampling Frequency");
+ audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+ goto analysis_done;
+ }
+ }
+
+ pClip2Properties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+ /**
+ * Return with no error */
+
+analysis_done:
+ if( video_err != M4NO_ERROR )
+ return video_err;
+
+ if( audio_err != M4NO_ERROR )
+ return audio_err;
+
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_editCheckClipCompatibility(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
+ * @brief Get video and audio properties from the clip streams
+ * @note This function must return fatal errors only (errors that should not happen
+ * in the final integrated product).
+ * @param pClipCtxt (IN) internal clip context
+ * @param pClipProperties (OUT) Pointer to a valid ClipProperties structure.
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intBuildAnalysis( M4VSS3GPP_ClipContext *pClipCtxt,
+ M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+ M4OSA_ERR err;
+ M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
+ M4DECODER_VideoSize dummySize;
+ M4DECODER_AVCProfileLevel AVCProfle;
+
+ pClipProperties->bAnalysed = M4OSA_FALSE;
+
+ /**
+ * Reset video characteristics */
+ pClipProperties->VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+ pClipProperties->uiClipVideoDuration = 0;
+ pClipProperties->uiVideoBitrate = 0;
+ pClipProperties->uiVideoMaxAuSize = 0;
+ pClipProperties->uiVideoWidth = 0;
+ pClipProperties->uiVideoHeight = 0;
+ pClipProperties->uiVideoTimeScale = 0;
+ pClipProperties->fAverageFrameRate = 0.0;
+ pClipProperties->uiVideoProfile =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ pClipProperties->uiVideoLevel =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ pClipProperties->bMPEG4dataPartition = M4OSA_FALSE;
+ pClipProperties->bMPEG4rvlc = M4OSA_FALSE;
+ pClipProperties->bMPEG4resynchMarker = M4OSA_FALSE;
+
+ memset((void *) &pClipProperties->ftyp,0,
+ sizeof(pClipProperties->ftyp));
+
+ /**
+ * Video Analysis */
+ if( M4OSA_NULL != pClipCtxt->pVideoStream )
+ {
+ pClipProperties->uiVideoWidth = pClipCtxt->pVideoStream->m_videoWidth;
+ pClipProperties->uiVideoHeight = pClipCtxt->pVideoStream->m_videoHeight;
+ pClipProperties->fAverageFrameRate =
+ pClipCtxt->pVideoStream->m_averageFrameRate;
+
+ switch( pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+ {
+ case M4DA_StreamTypeVideoMpeg4:
+
+ pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4;
+
+ /* This issue is so incredibly stupid that it's depressing. Basically, a file can be analysed
+ outside of any context (besides that of the clip itself), so that for instance two clips can
+ be checked for compatibility before allocating an edit context for editing them. But this
+ means there is no way in heck to pass an external video decoder (to begin with) to this
+ function, as they work by being registered in an existing context; furthermore, it is actually
+ pretty overkill to use a full decoder for that, moreso a HARDWARE decoder just to get the
+ clip config info. In fact, the hardware itself doesn't provide this service, in the case of a
+ HW decoder, the shell builds the config info itself, so we don't need the actual decoder, only
+ a detached functionality of it. So in case HW/external decoders may be present, we instead use
+ directly the DSI parsing function of the shell HW decoder (which we know to be present, since
+ HW decoders are possible) to get the config info. Notice this function is used even if the
+ software decoder is actually present and even if it will end up being actually used: figuring
+ out the config does not involve actual decoding nor the particularities of a specific decoder,
+ it's the fact that it's MPEG4 that matters, so it should not be functionally any different
+ from the way it was done before (and it's light enough for performance not to be any problem
+ whatsoever). */
+
+ err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
+ &DecConfigInfo, &dummySize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intBuildAnalysis():\
+ M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X", err);
+ return err;
+ }
+
+ pClipProperties->uiVideoTimeScale =
+ DecConfigInfo.uiTimeScale;
+ pClipProperties->bMPEG4dataPartition =
+ DecConfigInfo.bDataPartition;
+ pClipProperties->bMPEG4rvlc =
+ DecConfigInfo.bUseOfRVLC;
+ pClipProperties->bMPEG4resynchMarker =
+ DecConfigInfo.uiUseOfResynchMarker;
+ err = getMPEG4ProfileAndLevel(DecConfigInfo.uiProfile,
+ &(pClipProperties->uiVideoProfile),
+ &(pClipProperties->uiVideoLevel));
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
+ getMPEG4ProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ break;
+
+ case M4DA_StreamTypeVideoH263:
+
+ pClipProperties->VideoStreamType = M4VIDEOEDITING_kH263;
+ /* H263 time scale is always 30000 */
+ pClipProperties->uiVideoTimeScale = 30000;
+
+ err = getH263ProfileAndLevel(pClipCtxt->pVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
+ &pClipProperties->uiVideoProfile,
+ &pClipProperties->uiVideoLevel);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
+ getH263ProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ break;
+
+ case M4DA_StreamTypeVideoMpeg4Avc:
+
+ pClipProperties->VideoStreamType = M4VIDEOEDITING_kH264;
+ err = getAVCProfileAndLevel(pClipCtxt->pVideoStream->
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pClipCtxt->pVideoStream->m_basicProperties.m_decoderSpecificInfoSize,
+ &pClipProperties->uiVideoProfile,
+ &pClipProperties->uiVideoLevel);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): \
+ getAVCProfileAndLevel returns 0x%08X", err);
+ return err;
+ }
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intBuildAnalysis: unknown input video format (0x%x),\
+ returning M4NO_ERROR",
+ pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+
+ /** We do not return error here.
+ * The video format compatibility check will be done latter */
+ return M4NO_ERROR;
+ }
+
+ pClipProperties->uiClipVideoDuration =
+ (M4OSA_UInt32)pClipCtxt->pVideoStream->m_basicProperties.m_duration;
+ pClipProperties->uiVideoMaxAuSize =
+ pClipCtxt->pVideoStream->m_basicProperties.m_maxAUSize;
+
+ /* if video bitrate not available retrieve an estimation of the overall bitrate */
+ pClipProperties->uiVideoBitrate =
+ (M4OSA_UInt32)pClipCtxt->pVideoStream->
+ m_basicProperties.m_averageBitRate;
+
+ if( 0 == pClipProperties->uiVideoBitrate )
+ {
+ pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+ pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+ &pClipProperties->uiVideoBitrate);
+
+ if( M4OSA_NULL != pClipCtxt->pAudioStream )
+ {
+ /* we get the overall bitrate, substract the audio bitrate if any */
+ pClipProperties->uiVideoBitrate -=
+ pClipCtxt->pAudioStream->m_basicProperties.m_averageBitRate;
+ }
+ }
+ }
+
+ /**
+ * Reset audio characteristics */
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+ pClipProperties->uiClipAudioDuration = 0;
+ pClipProperties->uiAudioBitrate = 0;
+ pClipProperties->uiAudioMaxAuSize = 0;
+ pClipProperties->uiNbChannels = 0;
+ pClipProperties->uiSamplingFrequency = 0;
+ pClipProperties->uiExtendedSamplingFrequency = 0;
+ pClipProperties->uiDecodedPcmSize = 0;
+
+ /**
+ * Audio Analysis */
+ if( M4OSA_NULL != pClipCtxt->pAudioStream )
+ {
+ switch( pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kAMR_NB;
+ break;
+
+ case M4DA_StreamTypeAudioAac:
+
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kAAC;
+ break;
+
+ case M4DA_StreamTypeAudioMp3:
+
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kMP3;
+ break;
+
+ case M4DA_StreamTypeAudioEvrc:
+
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kEVRC;
+ break;
+
+ case M4DA_StreamTypeAudioPcm:
+
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kPCM;
+ break;
+
+ default:
+
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intBuildAnalysis: unknown input audio format (0x%x),\
+ returning M4NO_ERROR!",
+ pClipCtxt->pAudioStream->m_basicProperties.m_streamType);
+ return
+ M4NO_ERROR; /**< We do not return error here.
+ The audio format compatibility check will be done latter */
+ }
+
+ pClipProperties->uiAudioMaxAuSize =
+ pClipCtxt->pAudioStream->m_basicProperties.m_maxAUSize;
+ pClipProperties->uiClipAudioDuration =
+ (M4OSA_UInt32)pClipCtxt->pAudioStream->m_basicProperties.m_duration;
+
+ pClipProperties->uiNbChannels = pClipCtxt->pAudioStream->m_nbChannels;
+ pClipProperties->uiSamplingFrequency =
+ pClipCtxt->pAudioStream->m_samplingFrequency;
+ pClipProperties->uiDecodedPcmSize =
+ pClipCtxt->pAudioStream->m_byteFrameLength
+ * pClipCtxt->pAudioStream->m_byteSampleSize
+ * pClipCtxt->pAudioStream->m_nbChannels;
+
+ /**
+ * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps
+ according the GetProperties function */
+ pClipProperties->uiAudioBitrate =
+ (M4OSA_UInt32)pClipCtxt->pAudioStream->
+ m_basicProperties.m_averageBitRate;
+
+ if( 0 == pClipProperties->uiAudioBitrate )
+ {
+ if( M4VIDEOEDITING_kAMR_NB == pClipProperties->AudioStreamType )
+ {
+ /**
+ *Better returning a guessed 12.2 kbps value than a sure-to-be-false 0 kbps value!*/
+ pClipProperties->uiAudioBitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+ }
+ else if( M4VIDEOEDITING_kEVRC == pClipProperties->AudioStreamType )
+ {
+ /**
+ *Better returning a guessed 9.2 kbps value than a sure-to-be-false 0 kbps value!*/
+ pClipProperties->uiAudioBitrate =
+ M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+ }
+ else
+ {
+ pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+ pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+ &pClipProperties->uiAudioBitrate);
+
+ if( M4OSA_NULL != pClipCtxt->pVideoStream )
+ {
+ /* we get the overall bitrate, substract the video bitrate if any */
+ pClipProperties->uiAudioBitrate -= pClipCtxt->pVideoStream->
+ m_basicProperties.m_averageBitRate;
+ }
+ }
+ }
+
+ /* New aac properties */
+ if( M4DA_StreamTypeAudioAac
+ == pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+ {
+ pClipProperties->uiNbChannels = pClipCtxt->AacProperties.aNumChan;
+ pClipProperties->uiSamplingFrequency =
+ pClipCtxt->AacProperties.aSampFreq;
+
+ if( pClipCtxt->AacProperties.aSBRPresent )
+ {
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_kAACplus;
+ pClipProperties->uiExtendedSamplingFrequency =
+ pClipCtxt->AacProperties.aExtensionSampFreq;
+ }
+
+ if( pClipCtxt->AacProperties.aPSPresent )
+ {
+ pClipProperties->AudioStreamType = M4VIDEOEDITING_keAACplus;
+ }
+ }
+ }
+
+ /* Get 'ftyp' atom */
+ err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+ pClipCtxt->pReaderContext,
+ M4READER_kOptionID_3gpFtypBox, &pClipProperties->ftyp);
+
+ /**
+ * We write the VSS 3GPP version in the clip analysis to be sure the integrator doesn't
+ * mix older analysis results with newer libraries */
+ pClipProperties->Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
+ pClipProperties->Version[1] = M4VIDEOEDITING_VERSION_MINOR;
+ pClipProperties->Version[2] = M4VIDEOEDITING_VERSION_REVISION;
+
+ pClipProperties->FileType = pClipCtxt->pSettings->FileType;
+
+ if( pClipProperties->uiClipVideoDuration
+ > pClipProperties->uiClipAudioDuration )
+ pClipProperties->uiClipDuration = pClipProperties->uiClipVideoDuration;
+ else
+ pClipProperties->uiClipDuration = pClipProperties->uiClipAudioDuration;
+
+ /* Reset compatibility chart */
+ pClipProperties->bVideoIsEditable = M4OSA_FALSE;
+ pClipProperties->bAudioIsEditable = M4OSA_FALSE;
+ pClipProperties->bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
+ pClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+ /* Analysis successfully completed */
+ pClipProperties->bAnalysed = M4OSA_TRUE;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intBuildAnalysis(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
+ * @brief Check if the clip is compatible with VSS editing
+ * @note
+ * @param pClipCtxt (IN) internal clip context
+ * @param pClipProperties (OUT) Pointer to a valid ClipProperties structure.
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+ M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+ M4OSA_UInt32 uiNbOfValidStreams = 0;
+ M4OSA_ERR video_err = M4NO_ERROR;
+ M4OSA_ERR audio_err = M4NO_ERROR;
+ /********* file type *********/
+
+ if( M4VIDEOEDITING_kFileType_AMR == pClipProperties->FileType )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing:\
+ returning M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+ return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED;
+ }
+
+ if( M4VIDEOEDITING_kFileType_MP3 == pClipProperties->FileType )
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+ }
+
+ /********* Video *********/
+
+ if( M4VIDEOEDITING_kNoneVideo
+ != pClipProperties->VideoStreamType ) /**< if there is a video stream */
+ {
+ /* Check video format is MPEG-4, H263 or H264 */
+ switch( pClipProperties->VideoStreamType )
+ {
+ case M4VIDEOEDITING_kH263:
+ case M4VIDEOEDITING_kMPEG4:
+ case M4VIDEOEDITING_kH264:
+ uiNbOfValidStreams++;
+ pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+ break;
+
+ default: /*< KO, we return error */
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported video format");
+ video_err = M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+ break;
+ }
+ }
+ else
+ {
+ /**
+ * Audio only stream are currently not supported by the VSS editing feature
+ (unless in the MP3 case) */
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): No video stream in clip");
+ video_err = M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE;
+ }
+
+ /********* Audio *********/
+ if( M4VIDEOEDITING_kNoneAudio != pClipProperties->
+ AudioStreamType ) /**< if there is an audio stream */
+ {
+ /**
+ * Check audio format is AMR-NB, EVRC or AAC */
+ switch( pClipProperties->AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ uiNbOfValidStreams++;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ switch( pClipProperties->uiSamplingFrequency )
+ {
+ case 8000:
+ case 16000:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ break;
+
+ default:
+ break;
+ }
+ uiNbOfValidStreams++;
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ /*< OK, we proceed, no return */
+ uiNbOfValidStreams++;
+ break;
+
+ default: /*< KO, we return error */
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported audio format");
+ audio_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+ break;
+ }
+ }
+ else
+ {
+ /* Silence is always editable */
+ pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ }
+
+ /**
+ * Check there is at least one valid stream in the file... */
+ if( video_err != M4NO_ERROR )
+ return video_err;
+
+ if( audio_err != M4NO_ERROR )
+ return audio_err;
+
+ if( 0 == uiNbOfValidStreams )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): File contains no supported stream,\
+ returning M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+ return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
+ * @brief This function allows checking if two clips are compatible with each other for
+ * VSS 3GPP audio mixing feature.
+ * @note
+ * @param pC (IN) Context of the audio mixer
+ * @param pInputClipProperties (IN) Clip analysis of the first clip
+ * @param pAddedClipProperties (IN) Clip analysis of the second clip
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
+ * @return M4NO_ERROR
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intAudioMixingCompatibility( M4VSS3GPP_InternalAudioMixingContext
+ *pC, M4VIDEOEDITING_ClipProperties *pInputClipProperties,
+ M4VIDEOEDITING_ClipProperties *pAddedClipProperties )
+{
+ M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+ M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+ /**
+ * Reset settings */
+ pInputClipProperties->bAudioIsEditable = M4OSA_FALSE;
+ pAddedClipProperties->bAudioIsEditable = M4OSA_FALSE;
+ pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+ /**
+ * Check that analysis has been generated by this version of the VSS3GPP library */
+ if( ( pInputClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+ || (pInputClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+ || (pInputClipProperties->Version[2]
+ != M4VIDEOEDITING_VERSION_REVISION) )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+ by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+ return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+ }
+
+ if( ( pAddedClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+ || (pAddedClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+ || (pAddedClipProperties->Version[2]
+ != M4VIDEOEDITING_VERSION_REVISION) )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+ by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+ return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+ }
+
+ /********* input file type *********/
+
+ if( M4VIDEOEDITING_kFileType_3GPP != pInputClipProperties->FileType )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCompatibility:\
+ returning M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+ return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP;
+ }
+
+ /********* input audio *********/
+
+ if( M4VIDEOEDITING_kNoneAudio != pInputClipProperties->
+ AudioStreamType ) /**< if there is an audio stream */
+ {
+ /**
+ * Check audio format is AMR-NB or AAC */
+ switch( pInputClipProperties->AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ switch( pInputClipProperties->uiSamplingFrequency )
+ {
+ case 8000:
+ case 16000:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ break;
+
+ default:
+ break;
+ }
+ bClip1IsAAC = M4OSA_TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ /* Silence is always editable */
+ pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ }
+
+ /********* added audio *********/
+
+ if( M4VIDEOEDITING_kNoneAudio != pAddedClipProperties->
+ AudioStreamType ) /**< if there is an audio stream */
+ {
+ /**
+ * Check audio format is AMR-NB or AAC */
+ switch( pAddedClipProperties->AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+ M4OSA_TRUE; /* I use this field to know if silence supported */
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ switch( pAddedClipProperties->uiSamplingFrequency )
+ {
+ case 8000:
+ case 16000:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ break;
+
+ default:
+ break;
+ }
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+ M4OSA_TRUE; /* I use this field to know if silence supported */
+ bClip2IsAAC = M4OSA_TRUE;
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ break;
+
+ case M4VIDEOEDITING_kPCM:
+ pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+ M4OSA_TRUE; /* I use this field to know if silence supported */
+
+ if( pAddedClipProperties->uiSamplingFrequency == 16000 )
+ {
+ bClip2IsAAC = M4OSA_TRUE;
+ }
+ break;
+
+ case M4VIDEOEDITING_kMP3: /*RC*/
+ pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+ M4OSA_TRUE; /* I use this field to know if silence supported */
+ break;
+
+ default:
+ /* The writer cannot write this into a 3gpp */
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAudioMixingCompatibility:\
+ returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+ return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+ }
+ }
+ else
+ {
+ /* Silence is always editable */
+ pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+ pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+ M4OSA_TRUE; /* I use this field to know if silence supported */
+ }
+
+ if( pC->bRemoveOriginal == M4OSA_FALSE )
+ {
+ if( pInputClipProperties->uiSamplingFrequency
+ != pAddedClipProperties->uiSamplingFrequency )
+ {
+ /* We need to call SSRC in order to align ASF and/or nb of channels */
+ /* Moreover, audio encoder may be needed in case of audio replacing... */
+ pC->b_SSRCneeded = M4OSA_TRUE;
+ }
+
+ if( pInputClipProperties->uiNbChannels
+ < pAddedClipProperties->uiNbChannels )
+ {
+ /* Stereo to Mono */
+ pC->ChannelConversion = 1;
+ }
+ else if( pInputClipProperties->uiNbChannels
+ > pAddedClipProperties->uiNbChannels )
+ {
+ /* Mono to Stereo */
+ pC->ChannelConversion = 2;
+ }
+ }
+
+ pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intAudioMixingCompatibility(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
new file mode 100755
index 0000000..1ced937
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file M4VSS3GPP_Codecs.c
+ * @brief VSS implementation
+ * @note This file contains all functions related to audio/video
+ * codec manipulations.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Debug.h" /**< Include for OSAL debug services */
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h" /**< Internal types of the VSS */
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_clearInterfaceTables()
+ * @brief Clear encoders, decoders, reader and writers interfaces tables
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: The context is null
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_clearInterfaceTables( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+ M4OSA_UInt8 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+
+ /* Initialisation that will allow to check if registering twice */
+ pC->pWriterGlobalFcts = M4OSA_NULL;
+ pC->pWriterDataFcts = M4OSA_NULL;
+ pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+ pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+ pC->pCurrentAudioEncoderUserData = M4OSA_NULL;
+ pC->pCurrentAudioDecoderUserData = M4OSA_NULL;
+
+ pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
+ pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
+
+ for ( i = 0; i < M4WRITER_kType_NB; i++ )
+ {
+ pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+ pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+ }
+
+ for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+ {
+ pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+ pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
+ pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+ {
+ pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+ pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
+ pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ /* Initialisation that will allow to check if registering twice */
+ pC->m_pReader = M4OSA_NULL;
+ pC->m_pReaderDataIt = M4OSA_NULL;
+ pC->m_uiNbRegisteredReaders = 0;
+
+ for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+ {
+ pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+ pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+ }
+
+ pC->m_pVideoDecoder = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ pC->m_uiNbRegisteredVideoDec = 0;
+
+ for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+ {
+ pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ }
+
+ pC->m_pAudioDecoder = M4OSA_NULL;
+
+ for ( i = 0; i < M4AD_kType_NB; i++ )
+ {
+ pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+ pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
+ pC->pAudioDecoderUserDataTable[i] = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerWriter()
+ * @brief This function will register a specific file format writer.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the writer context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext,pWtrGlobalInterface or pWtrDataInterface is M4OSA_NULL
+ * (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4WRITER_OutputFileType MediaType,
+ M4WRITER_GlobalInterface *pWtrGlobalInterface,
+ M4WRITER_DataInterface *pWtrDataInterface )
+{
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+ "VSS: context is M4OSA_NULL in M4VSS3GPP_registerWriter");
+ M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+ "pWtrGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+ M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL), M4ERR_PARAMETER,
+ "pWtrDataInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+
+ M4OSA_TRACE3_3(
+ "VSS: M4VSS3GPP_registerWriter called with pContext=0x%x, pWtrGlobalInterface=0x%x,\
+ pWtrDataInterface=0x%x",
+ pC, pWtrGlobalInterface, pWtrDataInterface);
+
+ if( ( MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB) )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL )
+ {
+ /* a writer corresponding to this media type has already been registered !*/
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "This media type has already been registered");
+ return M4ERR_PARAMETER;
+ }
+
+ /*
+ * Save writer interface in context */
+ pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
+ pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerVideoEncoder()
+ * @brief This function will register a specific video encoder.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ * or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4ENCODER_Format MediaType,
+ M4ENCODER_GlobalInterface *pEncGlobalInterface )
+{
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+ "VSS: context is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+ M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+ "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+
+ M4OSA_TRACE3_3(
+ "VSS: M4VSS3GPP_registerEncoder called with pContext=0x%x, pEncGlobalInterface=0x%x,\
+ MediaType=0x%x",
+ pC, pEncGlobalInterface, MediaType);
+
+ if( MediaType >= M4ENCODER_kVideo_NB )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "Invalid video encoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL )
+ {
+ /* can be legitimate, in cases where we have one version that can use external encoders
+ but which still has the built-in one to be able to work without an external encoder; in
+ this case the new encoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+
+ free(pC->pVideoEncoderInterface[MediaType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
+ }
+
+ /*
+ * Save encoder interface in context */
+ pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
+ /* The actual userData and external API will be set by the registration function in the case
+ of an external encoder (add it as a parameter to this function in the long run?) */
+ pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
+ pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerAudioEncoder()
+ * @brief This function will register a specific audio encoder.
+ * @note According to the Mediatype, this function will store in the internal
+ * context the encoder context.
+ * @param pContext: (IN) Execution context.
+ * @param mediaType: (IN) The media type.
+ * @param pEncGlobalInterface: (OUT) the encoder interface functions.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4ENCODER_AudioFormat MediaType,
+ M4ENCODER_AudioGlobalInterface *pEncGlobalInterface )
+{
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+ "VSS: context is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+ M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+ "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+
+ M4OSA_TRACE3_3(
+ "VSS: M4VSS3GPP_registerAudioEncoder called pContext=0x%x, pEncGlobalInterface=0x%x,\
+ MediaType=0x%x",
+ pC, pEncGlobalInterface, MediaType);
+
+ if( MediaType >= M4ENCODER_kAudio_NB )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "Invalid audio encoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
+ {
+ free(pC->pAudioEncoderInterface[MediaType]);
+ pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+ }
+ /*
+ * Save encoder interface in context */
+ pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+ pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
+ pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_registerAudioEncoder: pC->pAudioEncoderInterface[0x%x] = 0x%x",
+ MediaType, pC->pAudioEncoderInterface[MediaType]);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerReader()
+ * @brief Register reader.
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4READER_MediaType mediaType,
+ M4READER_GlobalInterface *pRdrGlobalInterface,
+ M4READER_DataInterface *pRdrDataInterface )
+{
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface), M4ERR_PARAMETER,
+ "M4VSS3GPP_registerReader: invalid pointer on global interface");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface), M4ERR_PARAMETER,
+ "M4VSS3GPP_registerReader: invalid pointer on data interface");
+
+ if( mediaType == M4READER_kMediaTypeUnknown
+ || mediaType >= M4READER_kMediaType_NB )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL )
+ {
+ /* a reader corresponding to this media type has already been registered !*/
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "This media type has already been registered");
+ return M4ERR_PARAMETER;
+ }
+
+ pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
+ pC->m_pReaderDataItTable[mediaType] = pRdrDataInterface;
+
+ pC->m_uiNbRegisteredReaders++;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerVideoDecoder()
+ * @brief Register video decoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @param pDecoderInterface (IN) Decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only),
+ * or the decoder type is invalid
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4DECODER_VideoType decoderType,
+ M4DECODER_VideoInterface *pDecoderInterface )
+{
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+ "M4VSS3GPP_registerVideoDecoder: invalid pointer on decoder interface");
+
+ if( decoderType >= M4DECODER_kVideoType_NB )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "Invalid video decoder type");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL )
+ {
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+ /* a decoder corresponding to this media type has already been registered !*/
+
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "Decoder has already been registered");
+ return M4ERR_PARAMETER;
+
+#else /* external decoders are possible */
+ /* can be legitimate, in cases where we have one version that can use external decoders
+ but which still has the built-in one to be able to work without an external decoder; in
+ this case the new decoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+
+ free(pC->m_pVideoDecoderItTable[decoderType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
+ /* oh, and don't forget the user data, too. */
+ if( pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL )
+ {
+ free(pC->m_pVideoDecoderUserDataTable[decoderType]);
+ pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+ }
+#endif /* are external decoders possible? */
+
+ }
+
+ pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+ /* The actual userData will be set by the registration function in the case
+ of an external decoder (add it as a parameter to this function in the long run?) */
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ pC->m_uiNbRegisteredVideoDec++;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_registerAudioDecoder()
+ * @brief Register audio decoder
+ * @note This function is used internaly by the VSS to register NXP audio decoders,
+ * @param context (IN/OUT) VSS context.
+ * @param decoderType (IN) Audio decoder type
+ * @param pDecoderInterface (IN) Audio decoder interface.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4AD_Type decoderType, M4AD_Interface *pDecoderInterface)
+{
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+ "M4VSS3GPP_registerAudioDecoder: invalid pointer on decoder interface");
+
+ if( decoderType >= M4AD_kType_NB )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+ "Invalid audio decoder type");
+ return M4ERR_PARAMETER;
+ }
+ if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+ {
+ free(pC->m_pAudioDecoderItTable[decoderType]);
+ pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+
+ if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+ {
+ free(pC->m_pAudioDecoderItTable[decoderType]);
+ pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+ }
+ }
+
+
+
+ pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+ pC->m_pAudioDecoderFlagTable[decoderType] =
+ M4OSA_FALSE; /* internal decoder */
+ pC->pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllWriters()
+ * @brief Unregister writer
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllWriters( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+
+ for ( i = 0; i < M4WRITER_kType_NB; i++ )
+ {
+ if( pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL )
+ {
+ free(pC->WriterInterface[i].pGlobalFcts);
+ pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+ }
+
+ if( pC->WriterInterface[i].pDataFcts != M4OSA_NULL )
+ {
+ free(pC->WriterInterface[i].pDataFcts);
+ pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+ }
+ }
+
+ pC->pWriterGlobalFcts = M4OSA_NULL;
+ pC->pWriterDataFcts = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders()
+ * @brief Unregister the encoders
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllEncoders: pC=0x%x", pC);
+
+ for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+ {
+ if( pC->pVideoEncoderInterface[i] != M4OSA_NULL )
+ {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+
+ free(pC->pVideoEncoderInterface[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+ }
+ }
+
+ for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+ {
+ if( pC->pAudioEncoderInterface[i] != M4OSA_NULL )
+ {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+ /*Don't free external audio encoders interfaces*/
+
+ if( M4OSA_FALSE == pC->pAudioEncoderFlag[i] )
+ {
+ free(pC->pAudioEncoderInterface[i]);
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+ pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllReaders()
+ * @brief Unregister reader
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllReaders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+
+ for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+ {
+ if( pC->m_pReaderGlobalItTable[i] != M4OSA_NULL )
+ {
+ free(pC->m_pReaderGlobalItTable[i]);
+ pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+ }
+
+ if( pC->m_pReaderDataItTable[i] != M4OSA_NULL )
+ {
+ free(pC->m_pReaderDataItTable[i]);
+ pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->m_uiNbRegisteredReaders = 0;
+ pC->m_pReader = M4OSA_NULL;
+ pC->m_pReaderDataIt = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders()
+ * @brief Unregister the decoders
+ * @param pContext (IN/OUT) VSS context.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+ M4OSA_Int32 i;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllDecoders: pC=0x%x", pC);
+
+ for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+ {
+ if( pC->m_pVideoDecoderItTable[i] != M4OSA_NULL )
+ {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+
+ free(pC->m_pVideoDecoderItTable[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+
+ }
+ }
+
+ for ( i = 0; i < M4AD_kType_NB; i++ )
+ {
+ if( pC->m_pAudioDecoderItTable[i] != M4OSA_NULL )
+ {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+ {
+
+#endif
+ /*Don't free external audio decoders interfaces*/
+
+ if( M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i] )
+ {
+ free(pC->m_pAudioDecoderItTable[i]);
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+ }
+ }
+
+ pC->m_uiNbRegisteredVideoDec = 0;
+ pC->m_pVideoDecoder = M4OSA_NULL;
+
+ pC->m_pAudioDecoder = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentWriter()
+ * @brief Set current writer
+ * @param pContext (IN/OUT) VSS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4VIDEOEDITING_FileType mediaType )
+{
+ M4WRITER_OutputFileType writerType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+
+ switch( mediaType )
+ {
+ case M4VIDEOEDITING_kFileType_3GPP:
+ writerType = M4WRITER_k3GPP;
+ break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+ "Writer type not supported");
+ return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+ }
+
+ pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
+ pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
+
+ if( pC->pWriterGlobalFcts == M4OSA_NULL
+ || pC->pWriterDataFcts == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+ "Writer type not supported");
+ M4OSA_TRACE1_0("Writer type not supported");
+ return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+ }
+
+ pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder()
+ * @brief Set a video encoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4SYS_StreamType mediaType )
+{
+ M4ENCODER_Format encoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoEncoder: pC=0x%x, mediaType=0x%x",
+ pC, mediaType);
+
+ switch( mediaType )
+ {
+ case M4SYS_kH263:
+ encoderType = M4ENCODER_kH263;
+ break;
+
+ case M4SYS_kMPEG_4:
+ encoderType = M4ENCODER_kMPEG4;
+ break;
+
+ case M4SYS_kH264:
+ encoderType = M4ENCODER_kH264;
+ break;
+
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+ "Video encoder type not supported");
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+ }
+
+ pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
+ pC->pCurrentVideoEncoderExternalAPI =
+ pC->pVideoEncoderExternalAPITable[encoderType];
+ pC->pCurrentVideoEncoderUserData =
+ pC->pVideoEncoderUserDataTable[encoderType];
+
+ if( pC->pVideoEncoderGlobalFcts == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+ "Video encoder type not supported");
+ M4OSA_TRACE1_0("Video encoder type not supported");
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder()
+ * @brief Set an audio encoder
+ * @param context (IN/OUT) VSS context.
+ * @param MediaType (IN) Encoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4SYS_StreamType mediaType )
+{
+ M4ENCODER_AudioFormat encoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioEncoder: pC=0x%x, mediaType=0x%x",
+ pC, mediaType);
+
+ switch( mediaType )
+ {
+ case M4SYS_kAMR:
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_setCurrentAudioEncoder: encoder type AMR");
+ encoderType = M4ENCODER_kAMRNB;
+ break;
+
+ case M4SYS_kAAC:
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_setCurrentAudioEncoder: encoder type AAC");
+ encoderType = M4ENCODER_kAAC;
+ break;
+
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+ "Audio encoder type not supported");
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+ }
+
+ pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
+ pC->pCurrentAudioEncoderUserData =
+ pC->pAudioEncoderUserDataTable[encoderType];
+
+ M4OSA_TRACE3_3(
+ "M4VSS3GPP_setCurrentAudioEncoder: pC->pAudioEncoderInterface[0x%x]=0x%x,\
+ pC->pAudioEncoderGlobalFcts = 0x%x",
+ encoderType, pC->pAudioEncoderInterface[encoderType],
+ pC->pAudioEncoderGlobalFcts);
+
+ if( pC->pAudioEncoderGlobalFcts == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+ "Audio encoder type not supported");
+ M4OSA_TRACE1_0("Audio encoder type not supported");
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentReader()
+ * @brief Set current reader
+ * @param pContext (IN/OUT) VSS context.
+ * @param mediaType (IN) Media type.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4VIDEOEDITING_FileType mediaType )
+{
+ M4READER_MediaType readerType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+
+ switch( mediaType )
+ {
+ case M4VIDEOEDITING_kFileType_3GPP:
+ case M4VIDEOEDITING_kFileType_MP4:
+ case M4VIDEOEDITING_kFileType_M4V:
+ readerType = M4READER_kMediaType3GPP;
+ break;
+
+ case M4VIDEOEDITING_kFileType_AMR:
+ readerType = M4READER_kMediaTypeAMR;
+ break;
+
+ case M4VIDEOEDITING_kFileType_MP3:
+ readerType = M4READER_kMediaTypeMP3;
+ break;
+
+ case M4VIDEOEDITING_kFileType_PCM:
+ readerType = M4READER_kMediaTypePCM;
+ break;
+
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+ "Reader type not supported");
+ return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+ }
+
+ pC->m_pReader = pC->m_pReaderGlobalItTable[readerType];
+ pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
+
+ if( pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+ "Reader type not supported");
+ M4OSA_TRACE1_0("Reader type not supported");
+ return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+ }
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder()
+ * @brief Set a video decoder
+ * @param pContext (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ * @return M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED: Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4_StreamType mediaType )
+{
+ M4DECODER_VideoType decoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoDecoder: pC=0x%x, mediaType=0x%x",
+ pC, mediaType);
+
+ switch( mediaType )
+ {
+ case M4DA_StreamTypeVideoMpeg4:
+ case M4DA_StreamTypeVideoH263:
+ decoderType = M4DECODER_kVideoTypeMPEG4;
+ break;
+
+ case M4DA_StreamTypeVideoMpeg4Avc:
+ decoderType = M4DECODER_kVideoTypeAVC;
+ break;
+ case M4DA_StreamTypeVideoARGB8888:
+ decoderType = M4DECODER_kVideoTypeYUV420P;
+ break;
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+ "Video decoder type not supported");
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+ }
+
+ pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+ pC->m_pCurrentVideoDecoderUserData =
+ pC->m_pVideoDecoderUserDataTable[decoderType];
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+ if( pC->m_pVideoDecoder == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+ "Video decoder type not supported");
+ M4OSA_TRACE1_0("Video decoder type not supported");
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder()
+ * @brief Set an audio decoder
+ * @param context (IN/OUT) VSS context.
+ * @param decoderType (IN) Decoder type
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+ M4_StreamType mediaType )
+{
+ M4AD_Type decoderType;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioDecoder: pC=0x%x, mediaType=0x%x",
+ pC, mediaType);
+
+ switch( mediaType )
+ {
+ case M4DA_StreamTypeAudioAmrNarrowBand:
+ decoderType = M4AD_kTypeAMRNB;
+ break;
+
+ case M4DA_StreamTypeAudioAac:
+ case M4DA_StreamTypeAudioAacADTS:
+ case M4DA_StreamTypeAudioAacADIF:
+ decoderType = M4AD_kTypeAAC;
+ break;
+
+ case M4DA_StreamTypeAudioMp3:
+ decoderType = M4AD_kTypeMP3;
+ break;
+
+ case M4DA_StreamTypeAudioPcm:
+ decoderType = M4AD_kTypePCM;
+ break;
+
+ default:
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+ "Audio decoder type not supported");
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+ }
+
+ pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
+ pC->pCurrentAudioDecoderUserData =
+ pC->pAudioDecoderUserDataTable[decoderType];
+
+ if( pC->m_pAudioDecoder == M4OSA_NULL )
+ {
+ M4OSA_DEBUG_IF1(M4OSA_TRUE,
+ M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+ "Audio decoder type not supported");
+ M4OSA_TRACE1_0("Audio decoder type not supported");
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+ }
+
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Edit.c b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
new file mode 100755
index 0000000..df8b7d5
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
@@ -0,0 +1,3475 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_Edit.c
+ * @brief Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+#include "M4OSA_CharStar.h" /**< OSAL string management */
+
+#ifdef WIN32
+#include "string.h" /**< for strcpy (Don't want to get dependencies
+ with M4OSA_String...) */
+
+#endif /* WIN32 */
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+#endif
+
+/************************************************************************/
+/* Static local functions */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+ M4VSS3GPP_ClipSettings *pClip );
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+ M4VSS3GPP_TransitionSettings *pTransition );
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+ M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_Void *pOutputFile );
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+ M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_UInt8 uiMasterClip );
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+ M4VSS3GPP_InternalEditContext *pC );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetVersion()
+ * @brief Get the VSS 3GPP version.
+ * @note Can be called anytime. Do not need any context.
+ * @param pVersionInfo (OUT) Pointer to a version info structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetVersion( M4_VersionInfo *pVersionInfo )
+{
+ M4OSA_TRACE3_1("M4VSS3GPP_GetVersion called with pVersionInfo=0x%x",
+ pVersionInfo);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
+ "M4VSS3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+ pVersionInfo->m_major = M4VSS_VERSION_MAJOR;
+ pVersionInfo->m_minor = M4VSS_VERSION_MINOR;
+ pVersionInfo->m_revision = M4VSS_VERSION_REVISION;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editInit()
+ * @brief Initializes the VSS 3GPP edit operation (allocates an execution context).
+ * @note
+ * @param pContext (OUT) Pointer on the VSS 3GPP edit context to allocate
+ * @param pFileReadPtrFct (IN) Pointer to OSAL file reader functions
+ * @param pFileWritePtrFct (IN) Pointer to OSAL file writer functions
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editInit( M4VSS3GPP_EditContext *pContext,
+ M4OSA_FileReadPointer *pFileReadPtrFct,
+ M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+ M4VSS3GPP_InternalEditContext *pC;
+ M4OSA_ERR err;
+ M4OSA_UInt32 i;
+
+ M4OSA_TRACE3_3(
+ "M4VSS3GPP_editInit called with pContext=0x%x, \
+ pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
+ pContext, pFileReadPtrFct, pFileWritePtrFct);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_editInit: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+ "M4VSS3GPP_editInit: pFileReadPtrFct is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+ "M4VSS3GPP_editInit: pFileWritePtrFct is M4OSA_NULL");
+
+ /**
+ * Allocate the VSS context and return it to the user */
+ pC = (M4VSS3GPP_InternalEditContext
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_InternalEditContext),
+ M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_InternalContext");
+ *pContext = pC;
+ /* Inialization of context Variables */
+ memset((void *)pC, 0,sizeof(M4VSS3GPP_InternalEditContext));
+
+ if( M4OSA_NULL == pC )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editInit(): unable to allocate M4VSS3GPP_InternalContext,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+
+ /* Init the context. */
+ pC->uiClipNumber = 0;
+ pC->pClipList = M4OSA_NULL;
+ pC->pTransitionList = M4OSA_NULL;
+ pC->pEffectsList = M4OSA_NULL;
+ pC->pActiveEffectsList = M4OSA_NULL;
+ pC->pActiveEffectsList1 = M4OSA_NULL;
+ pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
+ pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
+ pC->uiCurrentClip = 0;
+ pC->pC1 = M4OSA_NULL;
+ pC->pC2 = M4OSA_NULL;
+ pC->yuv1[0].pac_data = pC->yuv1[1].pac_data = pC->
+ yuv1[2].pac_data = M4OSA_NULL;
+ pC->yuv2[0].pac_data = pC->yuv2[1].pac_data = pC->
+ yuv2[2].pac_data = M4OSA_NULL;
+ pC->yuv3[0].pac_data = pC->yuv3[1].pac_data = pC->
+ yuv3[2].pac_data = M4OSA_NULL;
+ pC->yuv4[0].pac_data = pC->yuv4[1].pac_data = pC->
+ yuv4[2].pac_data = M4OSA_NULL;
+ pC->bClip1AtBeginCut = M4OSA_FALSE;
+ pC->iClip1ActiveEffect = 0;
+ pC->iClip2ActiveEffect = 0;
+ pC->bTransitionEffect = M4OSA_FALSE;
+ pC->bSupportSilence = M4OSA_FALSE;
+
+ /**
+ * Init PC->ewc members */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dInputVidCts = 0.0;
+ pC->ewc.dOutputVidCts = 0.0;
+ pC->ewc.dATo = 0.0;
+ pC->ewc.iOutputDuration = 0;
+ pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+ pC->ewc.uiVideoBitrate = 0;
+ pC->ewc.uiVideoWidth = 0;
+ pC->ewc.uiVideoHeight = 0;
+ pC->ewc.uiVideoTimeScale = 0;
+ pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+ pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+ pC->ewc.uiVideoOutputDsiSize = 0;
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ pC->ewc.uiNbChannels = 1;
+ pC->ewc.uiAudioBitrate = 0;
+ pC->ewc.uiSamplingFrequency = 0;
+ pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+ pC->ewc.uiAudioOutputDsiSize = 0;
+ pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+ pC->ewc.pAudioEncDSI.infoSize = 0;
+ pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+ pC->ewc.uiSilencePcmSize = 0;
+ pC->ewc.pSilenceFrameData = M4OSA_NULL;
+ pC->ewc.uiSilenceFrameSize = 0;
+ pC->ewc.iSilenceFrameDuration = 0;
+ pC->ewc.scale_audio = 0.0;
+ pC->ewc.pEncContext = M4OSA_NULL;
+ pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+ pC->ewc.iMpeg4GovOffset = 0;
+ pC->ewc.VppError = 0;
+ pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+ pC->ewc.p3gpWriterContext = M4OSA_NULL;
+ pC->ewc.uiVideoMaxAuSize = 0;
+ pC->ewc.uiAudioMaxAuSize = 0;
+ /**
+ * Keep the OSAL file functions pointer set in our context */
+ pC->pOsaFileReadPtr = pFileReadPtrFct;
+ pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+ /*
+ * Reset pointers for media and codecs interfaces */
+
+ err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ /*
+ * Call the media and codecs subscription module */
+ err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Update main state automaton */
+ pC->State = M4VSS3GPP_kEditState_CREATED;
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+ /* The flag is set to false at the beginning of every clip */
+ pC->m_bClipExternalHasStarted = M4OSA_FALSE;
+
+ pC->bIsMMS = M4OSA_FALSE;
+
+ pC->iInOutTimeOffset = 0;
+ pC->bEncodeTillEoF = M4OSA_FALSE;
+ pC->nbActiveEffects = 0;
+ pC->nbActiveEffects1 = 0;
+ pC->bIssecondClip = M4OSA_FALSE;
+ pC->m_air_context = M4OSA_NULL;
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_editInit(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
+ * @brief Allows filling a clip settings structure with default values
+ *
+ * @note WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ * pClipSettings->pFile will be allocated in this function.
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pFile (IN) Clip file name
+ * @param filePathSize (IN) Clip path size (needed for UTF 16 conversion)
+ * @param nbEffects (IN) Nb of effect settings to allocate
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editCreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+ M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+ M4OSA_UInt8 nbEffects )
+{
+ M4OSA_UInt8 uiFx;
+
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_editCreateClipSettings called with pClipSettings=0x%p",
+ pClipSettings);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+ "M4VSS3GPP_editCreateClipSettings: pClipSettings is NULL");
+
+ /**
+ * Set the clip settings to default */
+ pClipSettings->pFile = M4OSA_NULL; /**< no file */
+ pClipSettings->FileType =
+ M4VIDEOEDITING_kFileType_Unsupported; /**< undefined */
+
+ if( M4OSA_NULL != pFile )
+ {
+ //pClipSettings->pFile = (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen(pFile)+1, M4VSS3GPP,
+ // "pClipSettings->pFile");
+ /*FB: add clip path size because of utf 16 conversion*/
+ pClipSettings->pFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(filePathSize + 1, M4VSS3GPP,
+ (M4OSA_Char *)"pClipSettings->pFile");
+
+ if( M4OSA_NULL == pClipSettings->pFile )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCreateClipSettings : ERROR allocating filename");
+ return M4ERR_ALLOC;
+ }
+ //memcpy(pClipSettings->pFile, pFile, strlen(pFile)+1);
+ /*FB: add clip path size because of utf 16 conversion*/
+ memcpy((void *)pClipSettings->pFile, (void *)pFile, filePathSize + 1);
+ }
+
+ /*FB: add file path size to support UTF16 conversion*/
+ pClipSettings->filePathSize = filePathSize + 1;
+ /**/
+ pClipSettings->ClipProperties.bAnalysed = M4OSA_FALSE;
+ pClipSettings->ClipProperties.FileType = 0;
+ pClipSettings->ClipProperties.Version[0] = 0;
+ pClipSettings->ClipProperties.Version[1] = 0;
+ pClipSettings->ClipProperties.Version[2] = 0;
+ pClipSettings->ClipProperties.uiClipDuration = 0;
+
+ pClipSettings->uiBeginCutTime = 0; /**< no begin cut */
+ pClipSettings->uiEndCutTime = 0; /**< no end cut */
+ pClipSettings->ClipProperties.bSetImageData = M4OSA_FALSE;
+
+ /**
+ * Reset video characteristics */
+ pClipSettings->ClipProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+ pClipSettings->ClipProperties.uiClipVideoDuration = 0;
+ pClipSettings->ClipProperties.uiVideoBitrate = 0;
+ pClipSettings->ClipProperties.uiVideoMaxAuSize = 0;
+ pClipSettings->ClipProperties.uiVideoWidth = 0;
+ pClipSettings->ClipProperties.uiVideoHeight = 0;
+ pClipSettings->ClipProperties.uiVideoTimeScale = 0;
+ pClipSettings->ClipProperties.fAverageFrameRate = 0.0;
+ pClipSettings->ClipProperties.uiVideoProfile =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_PROFILE;
+ pClipSettings->ClipProperties.uiVideoLevel =
+ M4VIDEOEDITING_VIDEO_UNKNOWN_LEVEL;
+ pClipSettings->ClipProperties.bMPEG4dataPartition = M4OSA_FALSE;
+ pClipSettings->ClipProperties.bMPEG4rvlc = M4OSA_FALSE;
+ pClipSettings->ClipProperties.bMPEG4resynchMarker = M4OSA_FALSE;
+
+ /**
+ * Reset audio characteristics */
+ pClipSettings->ClipProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+ pClipSettings->ClipProperties.uiClipAudioDuration = 0;
+ pClipSettings->ClipProperties.uiAudioBitrate = 0;
+ pClipSettings->ClipProperties.uiAudioMaxAuSize = 0;
+ pClipSettings->ClipProperties.uiNbChannels = 0;
+ pClipSettings->ClipProperties.uiSamplingFrequency = 0;
+ pClipSettings->ClipProperties.uiExtendedSamplingFrequency = 0;
+ pClipSettings->ClipProperties.uiDecodedPcmSize = 0;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_editSetDefaultSettings(): returning M4NO_ERROR");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
+ * @brief Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param pClipSettingsDest (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pClipSettingsOrig (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param bCopyEffects (IN) Flag to know if we have to duplicate effects
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editDuplicateClipSettings( M4VSS3GPP_ClipSettings *pClipSettingsDest,
+ M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+ M4OSA_Bool bCopyEffects )
+{
+ M4OSA_UInt8 uiFx;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_editDuplicateClipSettings called with dest=0x%p src=0x%p",
+ pClipSettingsDest, pClipSettingsOrig);
+
+ /* Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+ "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsDest is NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+ "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsOrig is NULL");
+
+ /* Copy plain structure */
+ memcpy((void *)pClipSettingsDest,
+ (void *)pClipSettingsOrig, sizeof(M4VSS3GPP_ClipSettings));
+
+ /* Duplicate filename */
+ if( M4OSA_NULL != pClipSettingsOrig->pFile )
+ {
+ //pClipSettingsDest->pFile =
+ // (M4OSA_Char*) M4OSA_32bitAlignedMalloc(strlen(pClipSettingsOrig->pFile)+1, M4VSS3GPP,
+ // "pClipSettingsDest->pFile");
+ /*FB: clip path size is needed for utf 16 conversion*/
+ /*FB 2008/10/16: bad allocation size which raises a crash*/
+ pClipSettingsDest->pFile =
+ (M4OSA_Char *)M4OSA_32bitAlignedMalloc(pClipSettingsOrig->filePathSize + 1,
+ M4VSS3GPP, (M4OSA_Char *)"pClipSettingsDest->pFile");
+
+ if( M4OSA_NULL == pClipSettingsDest->pFile )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating filename");
+ return M4ERR_ALLOC;
+ }
+ /*FB: clip path size is needed for utf 16 conversion*/
+ //memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
+ // strlen(pClipSettingsOrig->pFile)+1);
+ /*FB 2008/10/16: bad allocation size which raises a crash*/
+ memcpy((void *)pClipSettingsDest->pFile, (void *)pClipSettingsOrig->pFile,
+ pClipSettingsOrig->filePathSize/*+1*/);
+ ( (M4OSA_Char
+ *)pClipSettingsDest->pFile)[pClipSettingsOrig->filePathSize] = '\0';
+ }
+
+ /* Duplicate effects */
+ /* Return with no error */
+
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_editDuplicateClipSettings(): returning M4NO_ERROR");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
+ * @brief Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editFreeClipSettings(
+ M4VSS3GPP_ClipSettings *pClipSettings )
+{
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+ "M4VSS3GPP_editFreeClipSettings: pClipSettings is NULL");
+
+ /* free filename */
+ if( M4OSA_NULL != pClipSettings->pFile )
+ {
+ free(pClipSettings->pFile);
+ pClipSettings->pFile = M4OSA_NULL;
+ }
+
+ /* free effects settings */
+ /* if(M4OSA_NULL != pClipSettings->Effects)
+ {
+ free(pClipSettings->Effects);
+ pClipSettings->Effects = M4OSA_NULL;
+ pClipSettings->nbEffects = 0;
+ } RC */
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editOpen()
+ * @brief Set the VSS input and output files.
+ * @note It opens the input file, but the output file may not be created yet.
+ * @param pContext (IN) VSS edit context
+ * @param pSettings (IN) Edit settings
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS is not in an appropriate state for this function to be called
+ * @return M4ERR_ALLOC: There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editOpen( M4VSS3GPP_EditContext pContext,
+ M4VSS3GPP_EditSettings *pSettings )
+{
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+
+ M4OSA_ERR err;
+ M4OSA_Int32 i;
+ M4VIDEOEDITING_FileType outputFileType =
+ M4VIDEOEDITING_kFileType_Unsupported; /**< 3GPP or MP3 (we don't do AMR output) */
+ M4OSA_UInt32 uiC1duration, uiC2duration;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_editOpen called with pContext=0x%x, pSettings=0x%x",
+ pContext, pSettings);
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_editOpen: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+ "M4VSS3GPP_editOpen: pSettings is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings->pClipList), M4ERR_PARAMETER,
+ "M4VSS3GPP_editOpen: pSettings->pClipList is M4OSA_NULL");
+ M4OSA_DEBUG_IF2(( pSettings->uiClipNumber > 1)
+ && (M4OSA_NULL == pSettings->pTransitionList), M4ERR_PARAMETER,
+ "M4VSS3GPP_editOpen: pSettings->pTransitionList is M4OSA_NULL");
+
+ /**
+ * Check state automaton */
+ if( ( pC->State != M4VSS3GPP_kEditState_CREATED)
+ && (pC->State != M4VSS3GPP_kEditState_CLOSED) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: State error (0x%x)! Returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * Free any previously allocated internal settings list */
+ M4VSS3GPP_intFreeSettingsList(pC);
+
+ /**
+ * Copy the user settings in our context */
+ pC->uiClipNumber = pSettings->uiClipNumber;
+
+ /**
+ * Copy the clip list */
+ pC->pClipList =
+ (M4VSS3GPP_ClipSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings)
+ * pC->uiClipNumber, M4VSS3GPP, (M4OSA_Char *)"pC->pClipList");
+
+ if( M4OSA_NULL == pC->pClipList )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pClipList,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ for ( i = 0; i < pSettings->uiClipNumber; i++ )
+ {
+ M4VSS3GPP_editDuplicateClipSettings(&(pC->pClipList[i]),
+ pSettings->pClipList[i], M4OSA_TRUE);
+ }
+
+ /**
+ * Copy effects list RC */
+
+ /*FB bug fix 19.03.2008 if the number of effects is 0 -> crash*/
+ if( pSettings->nbEffects > 0 )
+ {
+ pC->nbEffects = pSettings->nbEffects;
+ pC->pEffectsList = (M4VSS3GPP_EffectSettings
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EffectSettings) * pC->nbEffects,
+ M4VSS3GPP, (M4OSA_Char *)"pC->pEffectsList");
+
+ if( M4OSA_NULL == pC->pEffectsList )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen: unable to allocate pC->pEffectsList, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ for ( i = 0; i < pC->nbEffects; i++ )
+ {
+ memcpy((void *) &(pC->pEffectsList[i]),
+ (void *) &(pSettings->Effects[i]),
+ sizeof(M4VSS3GPP_EffectSettings));
+ }
+
+ /**
+ * Allocate active effects list RC */
+ pC->pActiveEffectsList =
+ (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+ M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+
+ if( M4OSA_NULL == pC->pActiveEffectsList )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ /**
+ * Allocate active effects list */
+ pC->pActiveEffectsList1 =
+ (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+ M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+ if (M4OSA_NULL == pC->pActiveEffectsList1)
+ {
+ M4OSA_TRACE1_0("M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList, \
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ }
+ else
+ {
+ pC->nbEffects = 0;
+ pC->nbActiveEffects = 0;
+ pC->nbActiveEffects1 = 0;
+ pC->pEffectsList = M4OSA_NULL;
+ pC->pActiveEffectsList = M4OSA_NULL;
+ pC->pActiveEffectsList1 = M4OSA_NULL;
+ pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
+ pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
+ }
+
+ /**
+ * Test the clip analysis data, if it is not provided, analyse the clips by ourselves. */
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAnalysed )
+ {
+ /**< Analysis not provided by the integrator */
+ err = M4VSS3GPP_editAnalyseClip(pC->pClipList[i].pFile,
+ pC->pClipList[i].FileType, &pC->pClipList[i].ClipProperties,
+ pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_editAnalyseClip returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Check clip compatibility */
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ if (pC->pClipList[i].FileType !=M4VIDEOEDITING_kFileType_ARGB8888) {
+ /**
+ * Check all the clips are compatible with VSS 3GPP */
+ err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+ &pC->pClipList[i].ClipProperties);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_editOpen:\
+ M4VSS3GPP_intCheckClipCompatibleWithVssEditing(%d) returns 0x%x!",
+ i, err);
+ return err;
+ }
+ }
+
+ /**
+ * Check the master clip versus all the other ones.
+ (including master clip with itself, else variables for master clip
+ are not properly setted) */
+ if(pC->pClipList[i].FileType != M4VIDEOEDITING_kFileType_ARGB8888) {
+
+ err = M4VSS3GPP_editCheckClipCompatibility(
+ &pC->pClipList[pSettings->uiMasterClip].ClipProperties,
+ &pC->pClipList[i].ClipProperties);
+ /* in case of warning regarding audio incompatibility,
+ editing continues */
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_editCheckClipCompatibility \
+ (%d) returns 0x%x!", i, err);
+ return err;
+ }
+ } else {
+ pC->pClipList[i].ClipProperties.bAudioIsCompatibleWithMasterClip =
+ M4OSA_FALSE;
+ }
+ }
+ /* Search audio tracks that cannot be edited :
+ * - delete all audio effects for the clip
+ * - if master clip is editable let the transition
+ (bad track will be replaced later with silence)
+ * - if master clip is not editable switch to a dummy transition (only copy/paste) */
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAudioIsEditable )
+ {
+ M4OSA_UInt8 uiFx;
+
+ for ( uiFx = 0; uiFx < pC->nbEffects; uiFx++ )
+ {
+ pC->pEffectsList[uiFx].AudioEffectType
+ = M4VSS3GPP_kAudioEffectType_None;
+ }
+
+ if( ( i < (pC->uiClipNumber - 1))
+ && (M4OSA_NULL != pSettings->pTransitionList[i])
+ && (M4OSA_FALSE == pC->pClipList[pSettings->
+ uiMasterClip].ClipProperties.bAudioIsEditable) )
+ {
+ pSettings->pTransitionList[i]->AudioTransitionType
+ = M4VSS3GPP_kAudioTransitionType_None;
+ }
+ }
+ }
+
+ /**
+ * We add a transition of duration 0 at the end of the last clip.
+ * It will suppress a whole bunch a test latter in the processing... */
+ pC->pTransitionList = (M4VSS3GPP_TransitionSettings
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings)
+ * (pC->uiClipNumber), M4VSS3GPP, (M4OSA_Char *)"pC->pTransitionList");
+
+ if( M4OSA_NULL == pC->pTransitionList )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pTransitionList,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**< copy transition settings */
+ for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+ {
+ memcpy((void *) &(pC->pTransitionList[i]),
+ (void *)pSettings->pTransitionList[i],
+ sizeof(M4VSS3GPP_TransitionSettings));
+ }
+
+ /**< We fill the last "dummy" transition */
+ pC->pTransitionList[pC->uiClipNumber - 1].uiTransitionDuration = 0;
+ pC->pTransitionList[pC->uiClipNumber
+ - 1].VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+ pC->pTransitionList[pC->uiClipNumber
+ - 1].AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+
+ /**
+ * Avoid weird clip settings */
+ for ( i = 0; i < pSettings->uiClipNumber; i++ )
+ {
+ if (pC->pClipList[i].FileType !=M4VIDEOEDITING_kFileType_ARGB8888) {
+ err = M4VSS3GPP_intClipSettingsSanityCheck(&pC->pClipList[i]);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ }
+
+ for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+ {
+ if (pC->pTransitionList[i].uiTransitionDuration != 0) {
+ if (pC->pClipList[i].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
+ pC->pClipList[i].uiBeginCutTime = 0;
+ pC->pClipList[i].uiEndCutTime =
+ pC->pTransitionList[i].uiTransitionDuration;
+ }
+
+ if (pC->pClipList[i+1].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
+ pC->pClipList[i+1].uiBeginCutTime = 0;
+ pC->pClipList[i+1].uiEndCutTime =
+ pC->pTransitionList[i].uiTransitionDuration;
+ }
+ } else {
+
+ if (pC->pClipList[i].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
+ pC->pClipList[i].uiEndCutTime =
+ pC->pClipList[i].uiEndCutTime - pC->pClipList[i].uiBeginCutTime;
+ pC->pClipList[i].uiBeginCutTime = 0;
+ }
+
+ if (pC->pClipList[i+1].FileType == M4VIDEOEDITING_kFileType_ARGB8888) {
+ pC->pClipList[i+1].uiEndCutTime =
+ pC->pClipList[i+1].uiEndCutTime - pC->pClipList[i+1].uiBeginCutTime;
+ pC->pClipList[i+1].uiBeginCutTime = 0;
+ }
+
+ }
+
+ /**
+ * Maximum transition duration between clip n and clip n+1 is the duration
+ * of the shortest clip */
+ if( 0 == pC->pClipList[i].uiEndCutTime )
+ {
+ uiC1duration = pC->pClipList[i].ClipProperties.uiClipVideoDuration;
+ }
+ else
+ {
+ /**< duration of clip n is the end cut time */
+ uiC1duration = pC->pClipList[i].uiEndCutTime;
+ }
+
+ /**< Substract begin cut */
+ uiC1duration -= pC->pClipList[i].uiBeginCutTime;
+
+ /**< Check that the transition is shorter than clip n */
+ if( pC->pTransitionList[i].uiTransitionDuration > uiC1duration )
+ {
+ pC->pTransitionList[i].uiTransitionDuration = uiC1duration - 1;
+ }
+
+ if( 0 == pC->pClipList[i + 1].uiEndCutTime )
+ {
+ uiC2duration =
+ pC->pClipList[i + 1].ClipProperties.uiClipVideoDuration;
+ }
+ else
+ {
+ /**< duration of clip n+1 is the end cut time */
+ uiC2duration = pC->pClipList[i + 1].uiEndCutTime;
+ }
+
+ /**< Substract begin cut */
+ uiC2duration -= pC->pClipList[i + 1].uiBeginCutTime;
+
+ /**< Check that the transition is shorter than clip n+1 */
+ if( pC->pTransitionList[i].uiTransitionDuration > uiC2duration )
+ {
+ pC->pTransitionList[i].uiTransitionDuration = uiC2duration - 1;
+ }
+
+ /**
+ * Avoid weird transition settings */
+ err =
+ M4VSS3GPP_intTransitionSettingsSanityCheck(&pC->pTransitionList[i]);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Check that two transitions are not overlapping
+ (no overlapping possible for first clip) */
+ if( i > 0 )
+ {
+ /**
+ * There is a transition overlap if the sum of the duration of
+ two consecutive transitions
+ * is higher than the duration of the clip in-between. */
+ if( ( pC->pTransitionList[i - 1].uiTransitionDuration
+ + pC->pTransitionList[i].uiTransitionDuration) >= uiC1duration )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: Overlapping transitions on clip %d,\
+ returning M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS",
+ i);
+ return M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS;
+ }
+ }
+ }
+
+ /**
+ * Output clip duration */
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ /**
+ * Compute the sum of the clip duration */
+ if( 0 == pC->pClipList[i].uiEndCutTime )
+ {
+ pC->ewc.iOutputDuration +=
+ pC->
+ pClipList[
+ i].ClipProperties.
+ uiClipVideoDuration; /* Only video track duration is important to
+ avoid deviation if audio track is longer */
+ }
+ else
+ {
+ pC->ewc.iOutputDuration +=
+ pC->pClipList[i].uiEndCutTime; /**< Add end cut */
+ }
+
+ pC->ewc.iOutputDuration -=
+ pC->pClipList[i].uiBeginCutTime; /**< Remove begin cut */
+
+ /**
+ * Remove the duration of the transition (it is counted twice) */
+ pC->ewc.iOutputDuration -= pC->pTransitionList[i].uiTransitionDuration;
+ }
+
+ /* Get video properties from output properties */
+
+ /* Get output width and height */
+ switch(pC->xVSS.outputVideoSize) {
+ case M4VIDEOEDITING_kSQCIF:
+ pC->ewc.uiVideoWidth = 128;
+ pC->ewc.uiVideoHeight = 96;
+ break;
+ case M4VIDEOEDITING_kQQVGA:
+ pC->ewc.uiVideoWidth = 160;
+ pC->ewc.uiVideoHeight = 120;
+ break;
+ case M4VIDEOEDITING_kQCIF:
+ pC->ewc.uiVideoWidth = 176;
+ pC->ewc.uiVideoHeight = 144;
+ break;
+ case M4VIDEOEDITING_kQVGA:
+ pC->ewc.uiVideoWidth = 320;
+ pC->ewc.uiVideoHeight = 240;
+ break;
+ case M4VIDEOEDITING_kCIF:
+ pC->ewc.uiVideoWidth = 352;
+ pC->ewc.uiVideoHeight = 288;
+ break;
+ case M4VIDEOEDITING_kVGA:
+ pC->ewc.uiVideoWidth = 640;
+ pC->ewc.uiVideoHeight = 480;
+ break;
+ /* +PR LV5807 */
+ case M4VIDEOEDITING_kWVGA:
+ pC->ewc.uiVideoWidth = 800;
+ pC->ewc.uiVideoHeight = 480;
+ break;
+ case M4VIDEOEDITING_kNTSC:
+ pC->ewc.uiVideoWidth = 720;
+ pC->ewc.uiVideoHeight = 480;
+ break;
+ /* -PR LV5807 */
+ /* +CR Google */
+ case M4VIDEOEDITING_k640_360:
+ pC->ewc.uiVideoWidth = 640;
+ pC->ewc.uiVideoHeight = 360;
+ break;
+
+ case M4VIDEOEDITING_k854_480:
+ pC->ewc.uiVideoWidth = M4ENCODER_854_480_Width;
+ pC->ewc.uiVideoHeight = 480;
+ break;
+
+ case M4VIDEOEDITING_k1280_720:
+ pC->ewc.uiVideoWidth = 1280;
+ pC->ewc.uiVideoHeight = 720;
+ break;
+ case M4VIDEOEDITING_k1080_720:
+ pC->ewc.uiVideoWidth = M4ENCODER_1080_720_Width;
+
+ pC->ewc.uiVideoHeight = 720;
+ break;
+ case M4VIDEOEDITING_k960_720:
+ pC->ewc.uiVideoWidth = 960;
+ pC->ewc.uiVideoHeight = 720;
+ break;
+ case M4VIDEOEDITING_k1920_1080:
+ pC->ewc.uiVideoWidth = 1920;
+ pC->ewc.uiVideoHeight = 1088; // need to be multiples of 16
+ break;
+
+ default: /* If output video size is not given, we take QCIF size */
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen: no output video size given, default to QCIF!");
+ pC->ewc.uiVideoWidth = 176;
+ pC->ewc.uiVideoHeight = 144;
+ pC->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+ break;
+ }
+
+ pC->ewc.uiVideoTimeScale = 30;
+ pC->ewc.bVideoDataPartitioning = 0;
+ /* Set output video profile and level */
+ pC->ewc.outputVideoProfile = pC->xVSS.outputVideoProfile;
+ pC->ewc.outputVideoLevel = pC->xVSS.outputVideoLevel;
+
+ switch(pC->xVSS.outputVideoFormat) {
+ case M4VIDEOEDITING_kH263:
+ pC->ewc.VideoStreamType = M4SYS_kH263;
+ break;
+ case M4VIDEOEDITING_kMPEG4:
+ pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+ break;
+ case M4VIDEOEDITING_kH264:
+ pC->ewc.VideoStreamType = M4SYS_kH264;
+ break;
+ default:
+ pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+ break;
+ }
+
+ /**
+ * Copy the audio properties of the master clip to the output properties */
+ pC->ewc.uiNbChannels =
+ pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiNbChannels;
+ pC->ewc.uiAudioBitrate =
+ pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiAudioBitrate;
+ pC->ewc.uiSamplingFrequency = pC->pClipList[pSettings->
+ uiMasterClip].ClipProperties.uiSamplingFrequency;
+ pC->ewc.uiSilencePcmSize =
+ pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiDecodedPcmSize;
+ pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+ switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+ {
+ case M4VIDEOEDITING_kAMR_NB:
+ pC->ewc.AudioStreamType = M4SYS_kAMR;
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+ pC->ewc.iSilenceFrameDuration =
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+ pC->bSupportSilence = M4OSA_TRUE;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ case M4VIDEOEDITING_kAACplus:
+ case M4VIDEOEDITING_keAACplus:
+ pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+ if( pC->ewc.uiNbChannels == 1 )
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+ pC->ewc.uiSilenceFrameSize = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+ pC->bSupportSilence = M4OSA_TRUE;
+ }
+ else
+ {
+ pC->ewc.pSilenceFrameData =
+ (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+ pC->ewc.uiSilenceFrameSize =
+ M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+ pC->bSupportSilence = M4OSA_TRUE;
+ }
+ pC->ewc.iSilenceFrameDuration =
+ 1024; /* AAC is always 1024/Freq sample duration */
+ break;
+
+ case M4VIDEOEDITING_kMP3:
+ pC->ewc.AudioStreamType = M4SYS_kMP3;
+ pC->ewc.pSilenceFrameData = M4OSA_NULL;
+ pC->ewc.uiSilenceFrameSize = 0;
+ pC->ewc.iSilenceFrameDuration = 0;
+ /* Special case, mp3 core reader return a time in ms */
+ pC->ewc.scale_audio = 1.0;
+ break;
+
+ case M4VIDEOEDITING_kEVRC:
+ pC->ewc.AudioStreamType = M4SYS_kEVRC;
+ pC->ewc.pSilenceFrameData = M4OSA_NULL;
+ pC->ewc.uiSilenceFrameSize = 0;
+ pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+ (makes it easier to factorize amr and evrc code) */
+ break;
+
+ default:
+ pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+ break;
+ }
+
+ for (i=0; i<pC->uiClipNumber; i++) {
+ if (pC->pClipList[i].bTranscodingRequired == M4OSA_FALSE) {
+ /** If not transcoded in Analysis phase, check
+ * if transcoding required now
+ */
+ if ((pC->pClipList[i].ClipProperties.VideoStreamType !=
+ pC->xVSS.outputVideoFormat)||
+ (pC->pClipList[i].ClipProperties.uiVideoWidth !=
+ pC->ewc.uiVideoWidth) ||
+ (pC->pClipList[i].ClipProperties.uiVideoHeight !=
+ pC->ewc.uiVideoHeight) ||
+ (pC->pClipList[i].ClipProperties.VideoStreamType ==
+ M4VIDEOEDITING_kH264) ||
+ (pC->pClipList[i].ClipProperties.VideoStreamType ==
+ M4VIDEOEDITING_kMPEG4 &&
+ pC->pClipList[i].ClipProperties.uiVideoTimeScale !=
+ pC->ewc.uiVideoTimeScale)) {
+ pC->pClipList[i].bTranscodingRequired = M4OSA_TRUE;
+ }
+ } else {
+ /** If bTranscodingRequired is true, it means the clip has
+ * been transcoded in Analysis phase.
+ */
+ pC->pClipList[i].bTranscodingRequired = M4OSA_FALSE;
+ }
+ }
+ /**
+ * We produce a 3gpp file, unless it is mp3 */
+ if( M4VIDEOEDITING_kMP3 == pC->
+ pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+ outputFileType = M4VIDEOEDITING_kFileType_MP3;
+ else
+ outputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+ /**
+ * Beware, a null duration would lead to a divide by zero error (better safe than sorry...) */
+ if( 0 == pC->ewc.iOutputDuration )
+ {
+ pC->ewc.iOutputDuration = 1;
+ }
+
+ /**
+ * Open first clip */
+ pC->uiCurrentClip = 0;
+
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dInputVidCts = 0.0;
+ pC->ewc.dOutputVidCts = 0.0;
+ pC->ewc.dATo = 0.0;
+
+ err = M4VSS3GPP_intSwitchToNextClip(pC);
+ /* RC: to know when a file has been processed */
+ if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intSwitchToNextClip() returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Do the video stuff in 3GPP Audio/Video case */
+ if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+ {
+ /**
+ * Compute the Decoder Specific Info for the output video and audio streams */
+ err = M4VSS3GPP_intComputeOutputVideoAndAudioDsi(pC,
+ pSettings->uiMasterClip);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intComputeOutputVideoAndAudioDsi() returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Compute the time increment for the transition file */
+ switch( pSettings->videoFrameRate )
+ {
+ case M4VIDEOEDITING_k5_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 5.0;
+ break;
+
+ case M4VIDEOEDITING_k7_5_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 7.5;
+ break;
+
+ case M4VIDEOEDITING_k10_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 10.0;
+ break;
+
+ case M4VIDEOEDITING_k12_5_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 12.5;
+ break;
+
+ case M4VIDEOEDITING_k15_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 15.0;
+ break;
+
+ case M4VIDEOEDITING_k20_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 20.0;
+ break;
+
+ case M4VIDEOEDITING_k25_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 25.0;
+ break;
+
+ case M4VIDEOEDITING_k30_FPS:
+ pC->dOutputFrameDuration = 1000.0 / 30.0;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen(): invalid videoFrameRate (0x%x),\
+ returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE",
+ pSettings->videoFrameRate);
+ return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+ }
+
+ if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
+ {
+ M4OSA_UInt32 uiAlpha;
+ /**
+ * MPEG-4 case.
+ * Time scale of the transition encoder must be the same than the
+ * timescale of the input files.
+ * So the frame duration must be compatible with this time scale,
+ * but without beeing too short.
+ * For that, we must compute alpha (integer) so that:
+ * (alpha x 1000)/EncoderTimeScale > MinFrameDuration
+ **/
+
+ uiAlpha = (M4OSA_UInt32)(( pC->dOutputFrameDuration
+ * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+ if( uiAlpha > 0 )
+ {
+ pC->dOutputFrameDuration =
+ ( uiAlpha * 1000.0) / pC->ewc.uiVideoTimeScale;
+ }
+ }
+ else if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+ {
+ switch( pSettings->videoFrameRate )
+ {
+ case M4VIDEOEDITING_k12_5_FPS:
+ case M4VIDEOEDITING_k20_FPS:
+ case M4VIDEOEDITING_k25_FPS:
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editOpen(): invalid videoFrameRate for H263,\
+ returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+ return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+ default:
+ break;
+ }
+ }
+ }
+
+ /**
+ * Create the MP3 output file */
+ if( M4VIDEOEDITING_kFileType_MP3 == outputFileType )
+ {
+ M4READER_Buffer mp3tagBuffer;
+ err = M4VSS3GPP_intCreateMP3OutputFile(pC, pSettings->pOutputFile);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intCreateMP3OutputFile returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* The ID3v2 tag could be at any place in the mp3 file */
+ /* The mp3 reader only checks few bytes in the beginning of
+ stream to look for a ID3v2 tag */
+ /* It means that if the ID3v2 tag is not at the beginning of the file the reader do
+ as there is no these metadata */
+
+ /* Retrieve the data of the ID3v2 Tag */
+ err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+ pC->pC1->pReaderContext, M4READER_kOptionID_Mp3Id3v2Tag,
+ (M4OSA_DataOption) &mp3tagBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_editOpen: M4MP3R_getOption returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* Write the data of the ID3v2 Tag in the output file */
+ if( 0 != mp3tagBuffer.m_uiBufferSize )
+ {
+ err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+ (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+
+ /**
+ * Free before the error checking anyway */
+ free(mp3tagBuffer.m_pData);
+
+ /**
+ * Error checking */
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: WriteData(ID3v2Tag) returns 0x%x",
+ err);
+ return err;
+ }
+
+ mp3tagBuffer.m_uiBufferSize = 0;
+ mp3tagBuffer.m_pData = M4OSA_NULL;
+ }
+ }
+ /**
+ * Create the 3GPP output file */
+ else if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+ {
+ pC->ewc.uiVideoBitrate = pSettings->xVSS.outputVideoBitrate;
+
+ /**
+ * 11/12/2008 CR3283 MMS use case in VideoArtist: Set max output file size if needed */
+ if( pC->bIsMMS == M4OSA_TRUE )
+ {
+ err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+ pC->pOsaFileWritPtr, pSettings->pOutputFile,
+ pC->pOsaFileReadPtr, pSettings->pTemporaryFile,
+ pSettings->xVSS.outputFileSize);
+ }
+ else
+ {
+ err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+ pC->pOsaFileWritPtr, pSettings->pOutputFile,
+ pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+ }
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: M4VSS3GPP_intCreate3GPPOutputFile returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ /**
+ * Default error case */
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editOpen: invalid outputFileType = 0x%x,\
+ returning M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR",
+ outputFileType);
+ return
+ M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR; /**< this is an internal error code
+ unknown to the user */
+ }
+
+ /**
+ * Initialize state */
+ if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+ {
+ /**
+ * In the MP3 case we use a special audio state */
+ pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+ }
+ else
+ {
+ /**
+ * We start with the video processing */
+ pC->State = M4VSS3GPP_kEditState_VIDEO;
+ }
+
+ /**
+ * Initialize state.
+ * The first clip is independant to the "virtual previous clips",
+ * so it's like if we where in Read/Write mode before it. */
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_editOpen(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editStep()
+ * @brief Perform one step of editing.
+ * @note
+ * @param pContext (IN) VSS 3GPP edit context
+ * @param pProgress (OUT) Progress percentage (0 to 100) of the editing operation
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS 3GPP is not in an appropriate state for this
+ * function to be called
+ * @return M4VSS3GPP_WAR_EDITING_DONE: Edition is done, user should now call
+ * M4VSS3GPP_editClose()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editStep( M4VSS3GPP_EditContext pContext,
+ M4OSA_UInt8 *pProgress )
+{
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+ M4OSA_UInt32 uiProgressAudio, uiProgressVideo, uiProgress;
+ M4OSA_ERR err;
+
+ M4OSA_TRACE3_1("M4VSS3GPP_editStep called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_editStep: pContext is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
+ "M4VSS3GPP_editStep: pProgress is M4OSA_NULL");
+
+ /**
+ * Check state automaton and select correct processing */
+ switch( pC->State )
+ {
+ case M4VSS3GPP_kEditState_VIDEO:
+ err = M4VSS3GPP_intEditStepVideo(pC);
+ break;
+
+ case M4VSS3GPP_kEditState_AUDIO:
+ err = M4VSS3GPP_intEditStepAudio(pC);
+ break;
+
+ case M4VSS3GPP_kEditState_MP3:
+ err = M4VSS3GPP_intEditStepMP3(pC);
+ break;
+
+ case M4VSS3GPP_kEditState_MP3_JUMP:
+ err = M4VSS3GPP_intEditJumpMP3(pC);
+ break;
+
+ default:
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editStep(): invalid internal state (0x%x), returning M4ERR_STATE");
+ return M4ERR_STATE;
+ }
+
+ /**
+ * Compute progress.
+ * We do the computing with 32bits precision because in some (very) extreme case, we may get
+ * values higher than 256 (...) */
+ uiProgressAudio =
+ ( (M4OSA_UInt32)(pC->ewc.dATo * 100)) / pC->ewc.iOutputDuration;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ uiProgressVideo = ((M4OSA_UInt32)(pC->ewc.dInputVidCts * 100)) / pC->ewc.iOutputDuration;
+
+ uiProgress = uiProgressAudio + uiProgressVideo;
+
+ if( ( pC->ewc.AudioStreamType != M4SYS_kAudioUnknown)
+ && (pC->ewc.VideoStreamType != M4SYS_kVideoUnknown) )
+ uiProgress /= 2;
+
+ /**
+ * Sanity check */
+ if( uiProgress > 100 )
+ {
+ *pProgress = 100;
+ }
+ else
+ {
+ *pProgress = (M4OSA_UInt8)uiProgress;
+ }
+
+ /**
+ * Return the error */
+ M4OSA_TRACE3_1("M4VSS3GPP_editStep(): returning 0x%x", err);
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editClose()
+ * @brief Finish the VSS edit operation.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param pContext (IN) VSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return M4ERR_STATE: VSS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editClose( M4VSS3GPP_EditContext pContext )
+{
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+ M4OSA_ERR err;
+ M4OSA_ERR returnedError = M4NO_ERROR;
+ M4OSA_UInt32 lastCTS;
+
+ M4OSA_TRACE3_1("M4VSS3GPP_editClose called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4VSS3GPP_editClose: pContext is M4OSA_NULL");
+
+ /**
+ * Check state automaton.
+ * In "theory", we should not authorize closing if we are in CREATED state.
+ * But in practice, in case the opening failed, it may have been partially done.
+ * In that case we have to free some opened ressources by calling Close. */
+ if( M4VSS3GPP_kEditState_CLOSED == pC->State )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: Wrong state (0x%x), returning M4ERR_STATE",
+ pC->State);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * There may be an encoder to destroy */
+ err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ returnedError = err;
+ }
+
+ /**
+ * Close the output file */
+ if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+ {
+ /**
+ * MP3 case */
+ if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+ {
+ err = pC->pOsaFileWritPtr->closeWrite(pC->ewc.p3gpWriterContext);
+ pC->ewc.p3gpWriterContext = M4OSA_NULL;
+ }
+ }
+ else
+ {
+ /**
+ * Close the output 3GPP clip, if it has been opened */
+ if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+ {
+ /* Update last Video CTS */
+ lastCTS = pC->ewc.iOutputDuration;
+
+ err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+ pC->ewc.p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+ err);
+ }
+
+ err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+ pC->ewc.p3gpWriterContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: pFctCloseWrite(OUT) returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ if( M4NO_ERROR
+ == returnedError ) /**< we return the first error that happened */
+ {
+ returnedError = err;
+ }
+ }
+ pC->ewc.p3gpWriterContext = M4OSA_NULL;
+ }
+ }
+
+ /**
+ * Free the output video DSI, if it has been created */
+ if( M4OSA_NULL != pC->ewc.pVideoOutputDsi )
+ {
+ free(pC->ewc.pVideoOutputDsi);
+ pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+ }
+
+ /**
+ * Free the output audio DSI, if it has been created */
+ if( M4OSA_NULL != pC->ewc.pAudioOutputDsi )
+ {
+ free(pC->ewc.pAudioOutputDsi);
+ pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+ }
+
+ /**
+ * Close clip1, if needed */
+ if( M4OSA_NULL != pC->pC1 )
+ {
+ err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ if( M4NO_ERROR
+ == returnedError ) /**< we return the first error that happened */
+ {
+ returnedError = err;
+ }
+ }
+ pC->pC1 = M4OSA_NULL;
+ }
+
+ /**
+ * Close clip2, if needed */
+ if( M4OSA_NULL != pC->pC2 )
+ {
+ err = M4VSS3GPP_intClipCleanUp(pC->pC2);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C2) returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ if( M4NO_ERROR
+ == returnedError ) /**< we return the first error that happened */
+ {
+ returnedError = err;
+ }
+ }
+ pC->pC2 = M4OSA_NULL;
+ }
+
+ /**
+ * Free the temporary YUV planes */
+ if( M4OSA_NULL != pC->yuv1[0].pac_data )
+ {
+ free(pC->yuv1[0].pac_data);
+ pC->yuv1[0].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv1[1].pac_data )
+ {
+ free(pC->yuv1[1].pac_data);
+ pC->yuv1[1].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv1[2].pac_data )
+ {
+ free(pC->yuv1[2].pac_data);
+ pC->yuv1[2].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv2[0].pac_data )
+ {
+ free(pC->yuv2[0].pac_data);
+ pC->yuv2[0].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv2[1].pac_data )
+ {
+ free(pC->yuv2[1].pac_data);
+ pC->yuv2[1].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv2[2].pac_data )
+ {
+ free(pC->yuv2[2].pac_data);
+ pC->yuv2[2].pac_data = M4OSA_NULL;
+ }
+
+ /* RC */
+ if( M4OSA_NULL != pC->yuv3[0].pac_data )
+ {
+ free(pC->yuv3[0].pac_data);
+ pC->yuv3[0].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv3[1].pac_data )
+ {
+ free(pC->yuv3[1].pac_data);
+ pC->yuv3[1].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv3[2].pac_data )
+ {
+ free(pC->yuv3[2].pac_data);
+ pC->yuv3[2].pac_data = M4OSA_NULL;
+ }
+
+ /* RC */
+ if( M4OSA_NULL != pC->yuv4[0].pac_data )
+ {
+ free(pC->yuv4[0].pac_data);
+ pC->yuv4[0].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv4[1].pac_data )
+ {
+ free(pC->yuv4[1].pac_data);
+ pC->yuv4[1].pac_data = M4OSA_NULL;
+ }
+
+ if( M4OSA_NULL != pC->yuv4[2].pac_data )
+ {
+ free(pC->yuv4[2].pac_data);
+ pC->yuv4[2].pac_data = M4OSA_NULL;
+ }
+
+ /**
+ * RC Free effects list */
+ if( pC->pEffectsList != M4OSA_NULL )
+ {
+ free(pC->pEffectsList);
+ pC->pEffectsList = M4OSA_NULL;
+ }
+
+ /**
+ * RC Free active effects list */
+ if( pC->pActiveEffectsList != M4OSA_NULL )
+ {
+ free(pC->pActiveEffectsList);
+ pC->pActiveEffectsList = M4OSA_NULL;
+ }
+ /**
+ * Free active effects list */
+ if(pC->pActiveEffectsList1 != M4OSA_NULL)
+ {
+ free(pC->pActiveEffectsList1);
+ pC->pActiveEffectsList1 = M4OSA_NULL;
+ }
+ if(pC->m_air_context != M4OSA_NULL) {
+ free(pC->m_air_context);
+ pC->m_air_context = M4OSA_NULL;
+ }
+ /**
+ * Update state automaton */
+ pC->State = M4VSS3GPP_kEditState_CLOSED;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_1("M4VSS3GPP_editClose(): returning 0x%x", returnedError);
+ return returnedError;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCleanUp()
+ * @brief Free all resources used by the VSS edit operation.
+ * @note The context is no more valid after this call
+ * @param pContext (IN) VSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCleanUp( M4VSS3GPP_EditContext pContext )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+
+ M4OSA_TRACE3_1("M4VSS3GPP_editCleanUp called with pContext=0x%x", pContext);
+
+ /**
+ * Check input parameter */
+ if( M4OSA_NULL == pContext )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_editCleanUp(): pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ /**
+ * Close, if needed.
+ * In "theory", we should not close if we are in CREATED state.
+ * But in practice, in case the opening failed, it may have been partially done.
+ * In that case we have to free some opened ressources by calling Close. */
+ if( M4VSS3GPP_kEditState_CLOSED != pC->State )
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): calling M4VSS3GPP_editClose");
+ err = M4VSS3GPP_editClose(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editCleanUp(): M4VSS3GPP_editClose returns 0x%x",
+ err);
+ }
+ }
+
+ /**
+ * Free the video encoder dummy AU */
+ if( M4OSA_NULL != pC->ewc.pDummyAuBuffer )
+ {
+ free(pC->ewc.pDummyAuBuffer);
+ pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+ }
+
+ /**
+ * Free the Audio encoder context */
+ if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+ {
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+ pC->ewc.pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+ pC->ewc.pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Free the shells interfaces */
+ M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+ M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+ /**
+ * Free the settings copied in the internal context */
+ M4VSS3GPP_intFreeSettingsList(pC);
+
+ /**
+ * Finally, Free context */
+ free(pC);
+ pC = M4OSA_NULL;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+#ifdef WIN32
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
+ * @brief Return a string describing the given error code
+ * @note The input string must be already allocated (and long enough!)
+ * @param err (IN) Error code to get the description from
+ * @param sMessage (IN/OUT) Allocated string in which the description will be copied
+ * @return M4NO_ERROR: Input error is from the VSS3GPP module
+ * @return M4ERR_PARAMETER:Input error is not from the VSS3GPP module
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_GetErrorMessage( M4OSA_ERR err, M4OSA_Char *sMessage )
+{
+ switch( err )
+ {
+ case M4VSS3GPP_WAR_EDITING_DONE:
+ strcpy(sMessage, "M4VSS3GPP_WAR_EDITING_DONE");
+ break;
+
+ case M4VSS3GPP_WAR_END_OF_AUDIO_MIXING:
+ strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_AUDIO_MIXING");
+ break;
+
+ case M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE:
+ strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_FILE_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_FILE_TYPE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_EFFECT_KIND:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_EFFECT_KIND");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+ break;
+
+ case M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
+ break;
+
+ case M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL");
+ break;
+
+ case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
+ strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION");
+ break;
+
+ case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT");
+ break;
+
+ case M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS:
+ strcpy(sMessage, "M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_3GPP_FILE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+ break;
+
+ case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED:
+ strcpy(sMessage, "M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+ break;
+
+ case M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE");
+ break;
+
+ case M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE");
+ break;
+
+ case M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU");
+ break;
+
+ case M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR:
+ strcpy(sMessage, "M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE:
+ strcpy(sMessage,
+ "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+ break;
+
+ case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+ strcpy(sMessage,
+ "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+ break;
+
+ case M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+ break;
+
+ case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE");
+ break;
+
+ case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE");
+ break;
+
+ case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING:
+ strcpy(sMessage,
+ "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING");
+ break;
+
+ case M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY:
+ strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+ break;
+
+ case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE:
+ strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE");
+ break;
+
+ case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS:
+ strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS");
+ break;
+
+ case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY:
+ strcpy(sMessage,
+ "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY");
+ break;
+
+ case M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+ break;
+
+ case M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO:
+ strcpy(sMessage, "M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+ break;
+
+ case M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION:
+ strcpy(sMessage, "M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+ break;
+
+ case M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT:
+ strcpy(sMessage, "M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT");
+ break;
+
+ case M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM:
+ strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+ break;
+
+ case M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED:
+ strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED");
+ break;
+
+ case M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK:
+ strcpy(sMessage,
+ "M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK");
+ break;
+
+ case M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED:
+ strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED");
+ break;
+
+ case M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP:
+ strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+ break;
+
+ case M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP:
+ strcpy(sMessage, "M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP");
+ break;
+
+ case M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED:
+ strcpy(sMessage, "M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+ break;
+
+ case M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+ strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+ break;
+
+ default: /**< Not a VSS3GPP error */
+ strcpy(sMessage, "");
+ return M4ERR_PARAMETER;
+ }
+ return M4NO_ERROR;
+}
+
+#endif /* WIN32 */
+
+/********************************************************/
+/********************************************************/
+/********************************************************/
+/**************** STATIC FUNCTIONS ******************/
+/********************************************************/
+/********************************************************/
+/********************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck()
+ * @brief Simplify the given clip settings
+ * @note This function may modify the given structure
+ * @param pClip (IN/OUT) Clip settings
+ * @return M4NO_ERROR: No error
+ * @return M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+ M4VSS3GPP_ClipSettings *pClip )
+{
+ M4OSA_UInt8 uiFx;
+ M4OSA_UInt32
+ uiClipActualDuration; /**< the clip duration once the cuts are done */
+ M4OSA_UInt32 uiDuration;
+ M4VSS3GPP_EffectSettings *pFx;
+
+ /**
+ * If begin cut is too far, return an error */
+ uiDuration = pClip->ClipProperties.uiClipDuration;
+
+ if( pClip->uiBeginCutTime > uiDuration )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+ returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
+ pClip->uiBeginCutTime, uiDuration);
+ return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
+ }
+
+ /**
+ * If end cut is too far, set to zero (it means no end cut) */
+ if( pClip->uiEndCutTime > uiDuration )
+ {
+ pClip->uiEndCutTime = 0;
+ }
+
+ /**
+ * Compute actual clip duration (once cuts are done) */
+ if( 0 == pClip->uiEndCutTime )
+ {
+ /**
+ * No end cut */
+ uiClipActualDuration = uiDuration - pClip->uiBeginCutTime;
+ }
+ else
+ {
+ if( pClip->uiBeginCutTime >= pClip->uiEndCutTime )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+ returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT",
+ pClip->uiBeginCutTime, pClip->uiEndCutTime);
+ return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT;
+ }
+ uiClipActualDuration = pClip->uiEndCutTime - pClip->uiBeginCutTime;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck()
+ * @brief Simplify the given transition settings
+ * @note This function may modify the given structure
+ * @param pTransition (IN/OUT) Transition settings
+ * @return M4NO_ERROR: No error
+ * @return M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+ M4VSS3GPP_TransitionSettings *pTransition )
+{
+ /**
+ * No transition */
+ if( 0 == pTransition->uiTransitionDuration )
+ {
+ pTransition->VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+ pTransition->AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+ }
+ else if( ( M4VSS3GPP_kVideoTransitionType_None
+ == pTransition->VideoTransitionType)
+ && (M4VSS3GPP_kAudioTransitionType_None
+ == pTransition->AudioTransitionType) )
+ {
+ pTransition->uiTransitionDuration = 0;
+ }
+
+ /**
+ * Check external transition function is set */
+ if( ( pTransition->VideoTransitionType
+ >= M4VSS3GPP_kVideoTransitionType_External)
+ && (M4OSA_NULL == pTransition->ExtVideoTransitionFct) )
+ {
+ return M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL;
+ }
+
+ /**
+ * Set minimal transition duration */
+ if( ( pTransition->uiTransitionDuration > 0)
+ && (pTransition->uiTransitionDuration
+ < M4VSS3GPP_MINIMAL_TRANSITION_DURATION) )
+ {
+ pTransition->uiTransitionDuration =
+ M4VSS3GPP_MINIMAL_TRANSITION_DURATION;
+ }
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intFreeSettingsList()
+ * @brief Free the settings copied in the internal context
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+ M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_UInt32 i;
+
+ /**
+ * Free the settings list */
+ if( M4OSA_NULL != pC->pClipList )
+ {
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ M4VSS3GPP_editFreeClipSettings(&(pC->pClipList[i]));
+ }
+
+ free(pC->pClipList);
+ pC->pClipList = M4OSA_NULL;
+ }
+
+ /**
+ * Free the transition list */
+ if( M4OSA_NULL != pC->pTransitionList )
+ {
+ free(pC->pTransitionList);
+ pC->pTransitionList = M4OSA_NULL;
+ }
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateMP3OutputFile()
+ * @brief Creates and prepare the output MP file
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_Void *pOutputFile )
+{
+ M4OSA_ERR err;
+
+ err =
+ pC->pOsaFileWritPtr->openWrite(&pC->ewc.p3gpWriterContext, pOutputFile,
+ M4OSA_kFileWrite);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateMP3OutputFile: WriteOpen returns 0x%x!", err);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
+ * @brief Creates and prepare the output MP3 file
+ * @note Creates the writer, Creates the output file, Adds the streams,
+ Readies the writing process
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intCreate3GPPOutputFile( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+ M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+ M4OSA_FileWriterPointer *pOsaFileWritPtr,
+ M4OSA_Void *pOutputFile,
+ M4OSA_FileReadPointer *pOsaFileReadPtr,
+ M4OSA_Void *pTempFile,
+ M4OSA_UInt32 maxOutputFileSize )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 uiVersion;
+ M4SYS_StreamIDValue temp;
+
+ M4OSA_TRACE3_2(
+ "M4VSS3GPP_intCreate3GPPOutputFile called with pC_ewc=0x%x, pOutputFile=0x%x",
+ pC_ewc, pOutputFile);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pC_ewc), M4ERR_PARAMETER,
+ "M4VSS3GPP_intCreate3GPPOutputFile: pC_ewc is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pOutputFile), M4ERR_PARAMETER,
+ "M4VSS3GPP_intCreate3GPPOutputFile: pOutputFile is M4OSA_NULL");
+
+ /* Set writer */
+ err =
+ M4VSS3GPP_setCurrentWriter(pC_ShellAPI, M4VIDEOEDITING_kFileType_3GPP);
+ M4ERR_CHECK_RETURN(err);
+
+ /**
+ * Create the output file */
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctOpen(&pC_ewc->p3gpWriterContext,
+ pOutputFile, pOsaFileWritPtr, pTempFile, pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile: pWriterGlobalFcts->pFctOpen returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the signature option of the writer */
+ err =
+ pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+ M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : VSS ");
+
+ if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /*11/12/2008 CR3283 MMS use case for VideoArtist:
+ Set the max output file size option in the writer so that the output file will be
+ smaller than the given file size limitation*/
+ if( maxOutputFileSize > 0 )
+ {
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+ pC_ewc->p3gpWriterContext,
+ M4WRITER_kMaxFileSize, &maxOutputFileSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ writer set option M4WRITER_kMaxFileSize returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Set the version option of the writer */
+ uiVersion =
+ (M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
+ + M4VIDEOEDITING_VERSION_REVISION);
+ err =
+ pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+ M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
+
+ if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+ != err) ) /* this option may not be implemented by some writers */
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ if( M4SYS_kVideoUnknown != pC_ewc->VideoStreamType )
+ {
+ /**
+ * Set the video stream properties */
+ pC_ewc->WriterVideoStreamInfo.height = pC_ewc->uiVideoHeight;
+ pC_ewc->WriterVideoStreamInfo.width = pC_ewc->uiVideoWidth;
+ pC_ewc->WriterVideoStreamInfo.fps =
+ 0.0; /**< Not used by the shell/core writer */
+ pC_ewc->WriterVideoStreamInfo.Header.pBuf =
+ pC_ewc->pVideoOutputDsi; /**< Previously computed output DSI */
+ pC_ewc->WriterVideoStreamInfo.Header.Size = pC_ewc->
+ uiVideoOutputDsiSize; /**< Previously computed output DSI size */
+
+ pC_ewc->WriterVideoStream.streamType = pC_ewc->VideoStreamType;
+
+ switch( pC_ewc->VideoStreamType )
+ {
+ case M4SYS_kMPEG_4:
+ case M4SYS_kH263:
+ case M4SYS_kH264:
+ /**< We HAVE to put a value here... */
+ pC_ewc->WriterVideoStream.averageBitrate =
+ pC_ewc->uiVideoBitrate;
+ pC_ewc->WriterVideoStream.maxBitrate = pC_ewc->uiVideoBitrate;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile: unknown input video format (0x%x),\
+ returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT!",
+ pC_ewc->VideoStreamType);
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+ }
+
+ pC_ewc->WriterVideoStream.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+ pC_ewc->WriterVideoStream.timeScale =
+ 0; /**< Not used by the shell/core writer */
+ pC_ewc->WriterVideoStream.profileLevel =
+ 0; /**< Not used by the shell/core writer */
+ pC_ewc->WriterVideoStream.duration =
+ 0; /**< Not used by the shell/core writer */
+
+ pC_ewc->WriterVideoStream.decoderSpecificInfoSize =
+ sizeof(M4WRITER_StreamVideoInfos);
+ pC_ewc->WriterVideoStream.decoderSpecificInfo =
+ (M4OSA_MemAddr32) &(pC_ewc->WriterVideoStreamInfo);
+
+ /**
+ * Add the video stream */
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+ pC_ewc->p3gpWriterContext, &pC_ewc->WriterVideoStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Update AU properties for video stream */
+ pC_ewc->WriterVideoAU.attribute = AU_RAP;
+ pC_ewc->WriterVideoAU.CTS = 0;
+ pC_ewc->WriterVideoAU.DTS = 0; /** Reset time */
+ pC_ewc->WriterVideoAU.frag = M4OSA_NULL;
+ pC_ewc->WriterVideoAU.nbFrag = 0; /** No fragment */
+ pC_ewc->WriterVideoAU.size = 0;
+ pC_ewc->WriterVideoAU.dataAddress = M4OSA_NULL;
+ pC_ewc->WriterVideoAU.stream = &(pC_ewc->WriterVideoStream);
+
+ /**
+ * Set the writer max video AU size */
+ pC_ewc->uiVideoMaxAuSize = (M4OSA_UInt32)(1.5F
+ *(M4OSA_Float)(pC_ewc->WriterVideoStreamInfo.width
+ * pC_ewc->WriterVideoStreamInfo.height)
+ * M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+ temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+ temp.value = pC_ewc->uiVideoMaxAuSize;
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+ pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+ (M4OSA_DataOption) &temp);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the writer max video chunk size */
+ temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+ temp.value = (M4OSA_UInt32)(pC_ewc->uiVideoMaxAuSize \
+ * M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO); /**< from max AU size to
+ max Chunck size */
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+ pC_ewc->p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+ (M4OSA_DataOption) &temp);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ if( M4SYS_kAudioUnknown != pC_ewc->AudioStreamType )
+ {
+ M4WRITER_StreamAudioInfos streamAudioInfo;
+
+ streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
+ streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
+ streamAudioInfo.nbChannels = 1; /**< unused by our shell writer */
+
+ if( pC_ewc->pAudioOutputDsi != M4OSA_NULL )
+ {
+ /* If we copy the stream from the input, we copy its DSI */
+ streamAudioInfo.Header.Size = pC_ewc->uiAudioOutputDsiSize;
+ streamAudioInfo.Header.pBuf = pC_ewc->pAudioOutputDsi;
+ }
+ else
+ {
+ /* Writer will put a default DSI */
+ streamAudioInfo.Header.Size = 0;
+ streamAudioInfo.Header.pBuf = M4OSA_NULL;
+ }
+
+ pC_ewc->WriterAudioStream.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+ pC_ewc->WriterAudioStream.streamType = pC_ewc->AudioStreamType;
+ pC_ewc->WriterAudioStream.duration =
+ 0; /**< Not used by the shell/core writer */
+ pC_ewc->WriterAudioStream.profileLevel =
+ 0; /**< Not used by the shell/core writer */
+ pC_ewc->WriterAudioStreamInfo.nbSamplesPerSec =
+ pC_ewc->uiSamplingFrequency;
+ pC_ewc->WriterAudioStream.timeScale = pC_ewc->uiSamplingFrequency;
+ pC_ewc->WriterAudioStreamInfo.nbChannels =
+ (M4OSA_UInt16)pC_ewc->uiNbChannels;
+ pC_ewc->WriterAudioStreamInfo.nbBitsPerSample =
+ 0; /**< Not used by the shell/core writer */
+
+ /**
+ * Add the audio stream */
+ switch( pC_ewc->AudioStreamType )
+ {
+ case M4SYS_kAMR:
+ pC_ewc->WriterAudioStream.averageBitrate =
+ 0; /**< It is not used by the shell, the DSI is taken into account instead */
+ pC_ewc->WriterAudioStream.maxBitrate =
+ 0; /**< Not used by the shell/core writer */
+ break;
+
+ case M4SYS_kAAC:
+ pC_ewc->WriterAudioStream.averageBitrate =
+ pC_ewc->uiAudioBitrate;
+ pC_ewc->WriterAudioStream.maxBitrate = pC_ewc->uiAudioBitrate;
+ break;
+
+ case M4SYS_kEVRC:
+ pC_ewc->WriterAudioStream.averageBitrate =
+ 0; /**< It is not used by the shell, the DSI is taken into account instead */
+ pC_ewc->WriterAudioStream.maxBitrate =
+ 0; /**< Not used by the shell/core writer */
+ break;
+
+ case M4SYS_kMP3: /**< there can't be MP3 track in 3GPP file -> error */
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile: unknown output audio format (0x%x),\
+ returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT!",
+ pC_ewc->AudioStreamType);
+ return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+ }
+
+ /**
+ * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
+ in the DSI pointer... */
+ pC_ewc->WriterAudioStream.decoderSpecificInfo =
+ (M4OSA_MemAddr32) &streamAudioInfo;
+
+ /**
+ * Link the AU and the stream */
+ pC_ewc->WriterAudioAU.stream = &(pC_ewc->WriterAudioStream);
+ pC_ewc->WriterAudioAU.dataAddress = M4OSA_NULL;
+ pC_ewc->WriterAudioAU.size = 0;
+ pC_ewc->WriterAudioAU.CTS =
+ -pC_ewc->iSilenceFrameDuration; /** Reset time */
+ pC_ewc->WriterAudioAU.DTS = 0;
+ pC_ewc->WriterAudioAU.attribute = 0;
+ pC_ewc->WriterAudioAU.nbFrag = 0; /** No fragment */
+ pC_ewc->WriterAudioAU.frag = M4OSA_NULL;
+
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+ pC_ewc->p3gpWriterContext, &pC_ewc->WriterAudioStream);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the writer max audio AU size */
+ pC_ewc->uiAudioMaxAuSize = M4VSS3GPP_AUDIO_MAX_AU_SIZE;
+ temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+ temp.value = pC_ewc->uiAudioMaxAuSize;
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+ pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+ (M4OSA_DataOption) &temp);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Set the writer max audio chunck size */
+ temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+ temp.value = M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE;
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+ pC_ewc->p3gpWriterContext,
+ (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+ (M4OSA_DataOption) &temp);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * All streams added, we're now ready to write */
+ err = pC_ShellAPI->pWriterGlobalFcts->pFctStartWriting(
+ pC_ewc->p3gpWriterContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreate3GPPOutputFile:\
+ pWriterGlobalFcts->pFctStartWriting() returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intCreate3GPPOutputFile(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intComputeOutputVideoAndAudioDsi()
+ * @brief Generate a H263 or MPEG-4 decoder specific info compatible with all input video
+ * tracks. Copy audio dsi from master clip.
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_UInt8 uiMasterClip )
+{
+ M4OSA_Int32 iResynchMarkerDsiIndex;
+ M4_StreamHandler *pStreamForDsi;
+ M4VSS3GPP_ClipContext *pClip;
+ M4OSA_ERR err;
+ M4OSA_UInt32 i;
+ M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
+ M4DECODER_VideoSize dummySize;
+ M4OSA_Bool bGetDSiFromEncoder = M4OSA_FALSE;
+
+ M4ENCODER_Header *encHeader;
+ M4SYS_StreamIDmemAddr streamHeader;
+
+ pStreamForDsi = M4OSA_NULL;
+ pClip = M4OSA_NULL;
+
+ /**
+ * H263 case */
+ if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+ {
+ /**
+ * H263 output DSI is always 7 bytes */
+ pC->ewc.uiVideoOutputDsiSize = 7;
+ pC->ewc.pVideoOutputDsi =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(pC->ewc.uiVideoOutputDsiSize,
+ M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H263)");
+
+ if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+ unable to allocate pVideoOutputDsi (H263), returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * (We override the input vendor info.
+ * At least we know that nothing special will be tried with PHLP-stamped
+ edited streams...) */
+ pC->ewc.pVideoOutputDsi[0] = 'P';
+ pC->ewc.pVideoOutputDsi[1] = 'H';
+ pC->ewc.pVideoOutputDsi[2] = 'L';
+ pC->ewc.pVideoOutputDsi[3] = 'P';
+
+ /**
+ * Decoder version is 0 */
+ pC->ewc.pVideoOutputDsi[4] = 0;
+
+ /**
+ * Level is the sixth byte in the DSI */
+ pC->ewc.pVideoOutputDsi[5] = pC->xVSS.outputVideoLevel;
+
+ /**
+ * Profile is the seventh byte in the DSI*/
+ pC->ewc.pVideoOutputDsi[6] = pC->xVSS.outputVideoProfile;
+ }
+
+ /**
+ * MPEG-4 case */
+ else if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType ||
+ M4SYS_kH264 == pC->ewc.VideoStreamType) {
+
+ /* For MPEG4 and H.264 encoder case
+ * Fetch the DSI from the shell video encoder, and feed it to the writer before
+ closing it. */
+
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: get DSI for H264 stream");
+
+ if( M4OSA_NULL == pC->ewc.pEncContext )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: pC->ewc.pEncContext is NULL");
+ err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ M4VSS3GPP_intCreateVideoEncoder returned error 0x%x",
+ err);
+ }
+ }
+
+ if( M4OSA_NULL != pC->ewc.pEncContext )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+ pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+ (M4OSA_DataOption) &encHeader);
+
+ if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ failed to get the encoder header (err 0x%x)",
+ err);
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: encHeader->pBuf=0x%x, size=0x%x",
+ encHeader->pBuf, encHeader->Size);
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ send DSI for video stream to 3GP writer");
+
+ /**
+ * Allocate and copy the new DSI */
+ pC->ewc.pVideoOutputDsi =
+ (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(encHeader->Size, M4VSS3GPP,
+ (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+ if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+ unable to allocate pVideoOutputDsi, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+ memcpy((void *)pC->ewc.pVideoOutputDsi, (void *)encHeader->pBuf,
+ encHeader->Size);
+ }
+
+ err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ M4VSS3GPP_intDestroyVideoEncoder returned error 0x%x",
+ err);
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ pC->ewc.pEncContext is NULL, cannot get the DSI");
+ }
+ }
+
+ pStreamForDsi = M4OSA_NULL;
+ pClip = M4OSA_NULL;
+
+ /* Compute Audio DSI */
+ if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+ {
+ if( uiMasterClip == 0 )
+ {
+ /* Clip is already opened */
+ pStreamForDsi = &(pC->pC1->pAudioStream->m_basicProperties);
+ }
+ else
+ {
+ /**
+ * We can use the fast open mode to get the DSI */
+ err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ M4VSS3GPP_intClipInit() returns 0x%x!",
+ err);
+
+ if( pClip != M4OSA_NULL )
+ {
+ M4VSS3GPP_intClipCleanUp(pClip);
+ }
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipOpen(pClip, &pC->pClipList[uiMasterClip],
+ M4OSA_FALSE, M4OSA_TRUE, M4OSA_TRUE);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ M4VSS3GPP_intClipOpen() returns 0x%x!",
+ err);
+ M4VSS3GPP_intClipCleanUp(pClip);
+ return err;
+ }
+
+ pStreamForDsi = &(pClip->pAudioStream->m_basicProperties);
+ }
+
+ /**
+ * Allocate and copy the new DSI */
+ pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)M4OSA_32bitAlignedMalloc(
+ pStreamForDsi->m_decoderSpecificInfoSize,
+ M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pAudioOutputDsi");
+
+ if( M4OSA_NULL == pC->ewc.pAudioOutputDsi )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+ unable to allocate pAudioOutputDsi, returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+ pC->ewc.uiAudioOutputDsiSize =
+ (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
+ memcpy((void *)pC->ewc.pAudioOutputDsi,
+ (void *)pStreamForDsi->m_pDecoderSpecificInfo,
+ pC->ewc.uiAudioOutputDsiSize);
+
+ /**
+ * If a clip has been temporarily opened to get its DSI, close it */
+ if( M4OSA_NULL != pClip )
+ {
+ err = M4VSS3GPP_intClipCleanUp(pClip);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+ M4VSS3GPP_intClipCleanUp() returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intComputeOutputVideoAndAudioDsi(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSwitchToNextClip()
+ * @brief Switch from the current clip to the next one
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+ M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+
+ if( M4OSA_NULL != pC->pC1 )
+ {
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[0].pac_data) {
+ free(pC->pC1->m_pPreResizeFrame[0].pac_data);
+ pC->pC1->m_pPreResizeFrame[0].pac_data = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[1].pac_data) {
+ free(pC->pC1->m_pPreResizeFrame[1].pac_data);
+ pC->pC1->m_pPreResizeFrame[1].pac_data = M4OSA_NULL;
+ }
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame[2].pac_data) {
+ free(pC->pC1->m_pPreResizeFrame[2].pac_data);
+ pC->pC1->m_pPreResizeFrame[2].pac_data = M4OSA_NULL;
+ }
+ free(pC->pC1->m_pPreResizeFrame);
+ pC->pC1->m_pPreResizeFrame = M4OSA_NULL;
+ }
+ /**
+ * Close the current first clip */
+ err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * increment clip counter */
+ pC->uiCurrentClip++;
+ }
+
+ /**
+ * Check if we reached the last clip */
+ if( pC->uiCurrentClip >= pC->uiClipNumber )
+ {
+ pC->pC1 = M4OSA_NULL;
+ pC->State = M4VSS3GPP_kEditState_FINISHED;
+
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intSwitchToNextClip:\
+ M4VSS3GPP_intClipClose(C1) returns M4VSS3GPP_WAR_EDITING_DONE");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+
+ /**
+ * If the next clip has already be opened, set it as first clip */
+ if( M4OSA_NULL != pC->pC2 )
+ {
+ pC->pC1 = pC->pC2;
+ if(M4OSA_NULL != pC->pC2->m_pPreResizeFrame) {
+ pC->pC1->m_pPreResizeFrame = pC->pC2->m_pPreResizeFrame;
+ }
+ pC->pC2 = M4OSA_NULL;
+ pC->bClip1ActiveFramingEffect = pC->bClip2ActiveFramingEffect;
+ pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
+ }
+ /**
+ * else open it */
+ else
+ {
+ err = M4VSS3GPP_intOpenClip(pC, &pC->pC1,
+ &pC->pClipList[pC->uiCurrentClip]);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intOpenClip() returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * If the second clip has not been opened yet,
+ that means that there has been no transition.
+ * So both output video and audio times are OK.
+ * So we can set both video2 and audio offsets */
+
+ /**
+ * Add current video output CTS to the clip video offset */
+
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->pC1->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+ /**
+ * Add current audio output CTS to the clip audio offset */
+ pC->pC1->iAoffset +=
+ (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ /**
+ * 2005-03-24: BugFix for audio-video synchro:
+ * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+ * It leads to an audible desynchro when there are a lot of clips assembled.
+ * This bug fix allows to resynch the audio track when the delta is higher
+ * than one audio AU duration.
+ * We Step one AU in the second clip and we change the audio offset accordingly. */
+ if( ( pC->pC1->iAoffset
+ - (M4OSA_Int32)(pC->pC1->iVoffset *pC->pC1->scale_audio + 0.5))
+ > pC->ewc.iSilenceFrameDuration )
+ {
+ /**
+ * Advance one AMR frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intSwitchToNextClip:\
+ M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+ /**
+ * Update audio offset accordingly*/
+ pC->pC1->iAoffset -= pC->ewc.iSilenceFrameDuration;
+ }
+ }
+
+ /**
+ * Init starting state for this clip processing */
+ if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+ {
+ /**
+ * In the MP3 case we use a special audio state */
+ pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+ }
+ else
+ {
+ /**
+ * We start with the video processing */
+ pC->State = M4VSS3GPP_kEditState_VIDEO;
+
+ if( pC->Vstate != M4VSS3GPP_kEditVideoState_TRANSITION )
+ {
+ /* if not a transition then reset previous video state */
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ }
+ }
+ /* The flags are set to false at the beginning of every clip */
+ pC->m_bClipExternalHasStarted = M4OSA_FALSE;
+ pC->bEncodeTillEoF = M4OSA_FALSE;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intSwitchToNextClip(): returning M4NO_ERROR");
+ /* RC: to know when a file has been processed */
+ return M4VSS3GPP_WAR_SWITCH_CLIP;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
+ * @brief Do what to do when the end of a clip video track is reached
+ * @note If there is audio on the current clip, process it, else switch to the next clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Video is done for this clip, now we do the audio */
+ if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+ {
+ pC->State = M4VSS3GPP_kEditState_AUDIO;
+ }
+ else
+ {
+ /**
+ * Clip done, do the next one */
+ err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intReachedEndOfVideo: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfVideo(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
+ * @brief Do what to do when the end of a clip audio track is reached
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Clip done, do the next one */
+ err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intReachedEndOfAudio: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Start with the video */
+ if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+ {
+ pC->State = M4VSS3GPP_kEditState_VIDEO;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfAudio(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intOpenClip()
+ * @brief Open next clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intOpenClip( M4VSS3GPP_InternalEditContext *pC,
+ M4VSS3GPP_ClipContext ** hClip,
+ M4VSS3GPP_ClipSettings *pClipSettings )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_ClipContext *pClip; /**< shortcut */
+ M4VIDEOEDITING_ClipProperties *pClipProperties = M4OSA_NULL;
+ M4OSA_Int32 iCts;
+ M4OSA_UInt32 i;
+
+ M4OSA_TRACE2_1("M4VSS3GPP_intOpenClip: \"%s\"",
+ (M4OSA_Char *)pClipSettings->pFile);
+
+ err = M4VSS3GPP_intClipInit(hClip, pC->pOsaFileReadPtr);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+ err);
+
+ if( *hClip != M4OSA_NULL )
+ {
+ M4VSS3GPP_intClipCleanUp(*hClip);
+ }
+ return err;
+ }
+
+ /**
+ * Set shortcut */
+ pClip = *hClip;
+
+ if (pClipSettings->FileType == M4VIDEOEDITING_kFileType_ARGB8888 ) {
+ pClipProperties = &pClipSettings->ClipProperties;
+ pClip->pSettings = pClipSettings;
+ pClip->iEndTime = pClipSettings->uiEndCutTime;
+ }
+
+ err = M4VSS3GPP_intClipOpen(pClip, pClipSettings,
+ M4OSA_FALSE, M4OSA_FALSE, M4OSA_FALSE);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intOpenClip: \
+ M4VSS3GPP_intClipOpen() returns 0x%x!", err);
+ M4VSS3GPP_intClipCleanUp(pClip);
+ *hClip = M4OSA_NULL;
+ return err;
+ }
+
+ if (pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888 ) {
+ pClipProperties = &pClip->pSettings->ClipProperties;
+ }
+
+ /**
+ * Copy common 'silence frame stuff' to ClipContext */
+ pClip->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+ pClip->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+ pClip->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+ pClip->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+ pClip->scale_audio = pC->ewc.scale_audio;
+
+ pClip->iAudioFrameCts = -pClip->iSilenceFrameDuration; /* Reset time */
+
+ /**
+ * If the audio track is not compatible with the output audio format,
+ * we remove it. So it will be replaced by silence */
+ if( M4OSA_FALSE == pClipProperties->bAudioIsCompatibleWithMasterClip )
+ {
+ M4VSS3GPP_intClipDeleteAudioTrack(pClip);
+ }
+
+ /**
+ * Actual begin cut */
+ if( 0 == pClipSettings->uiBeginCutTime )
+ {
+ pClip->iVoffset = 0;
+ pClip->iAoffset = 0;
+ pClip->iActualVideoBeginCut = 0;
+ pClip->iActualAudioBeginCut = 0;
+ }
+ else if(pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888) {
+ if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+ {
+ /**
+ * Jump the video to the target begin cut to get the actual begin cut value */
+ pClip->iActualVideoBeginCut =
+ (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+ iCts = pClip->iActualVideoBeginCut;
+
+ err = pClip->ShellAPI.m_pReader->m_pFctJump(pClip->pReaderContext,
+ (M4_StreamHandler *)pClip->pVideoStream, &iCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intOpenClip: m_pFctJump(V) returns 0x%x!", err);
+ return err;
+ }
+
+ /**
+ * Update clip offset with the video begin cut */
+ pClip->iVoffset = -pClip->iActualVideoBeginCut;
+ }
+
+ if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+ {
+ /**
+ * Jump the audio to the video actual begin cut */
+ if( M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType )
+ {
+ pClip->iActualAudioBeginCut = pClip->iActualVideoBeginCut;
+ iCts = (M4OSA_Int32)(pClip->iActualAudioBeginCut
+ * pClip->scale_audio + 0.5);
+
+ err = M4VSS3GPP_intClipJumpAudioAt(pClip, &iCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+ err);
+ return err;
+ }
+ /**
+ * Update clip offset with the audio begin cut */
+ pClip->iAoffset = -iCts;
+ }
+ else
+ {
+ /**
+ * For the MP3, the jump is not done because of the VBR,
+ it could be not enough accurate */
+ pClip->iActualAudioBeginCut =
+ (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+ }
+ }
+ }
+
+ if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+ {
+ if ((pClipSettings->FileType != M4VIDEOEDITING_kFileType_ARGB8888 )) {
+
+ /**
+ * Read the first Video AU of the clip */
+ err = pClip->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pClip->pReaderContext,
+ (M4_StreamHandler *)pClip->pVideoStream, &pClip->VideoAU);
+
+ if( M4WAR_NO_MORE_AU == err )
+ {
+ /**
+ * If we (already!) reach the end of the clip, we filter the error.
+ * It will be correctly managed at the first step. */
+ err = M4NO_ERROR;
+ }
+ else if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intOpenClip: \
+ m_pReaderDataIt->m_pFctGetNextAu() returns 0x%x!", err);
+ return err;
+ }
+ } else {
+ pClipProperties->uiVideoWidth = pClipProperties->uiStillPicWidth;
+ pClipProperties->uiVideoHeight = pClipProperties->uiStillPicHeight;
+ }
+ /* state check not to allocate buffer during save start */
+
+
+ /******************************/
+ /* Video resize management */
+ /******************************/
+ /**
+ * If the input clip is a rotate video or the output resolution is different
+ * from the input resolution, then the video frame needs to be rotated
+ * or resized, force to resize mode */
+ if (((M4OSA_UInt32)pC->ewc.uiVideoWidth !=
+ pClipProperties->uiVideoWidth) ||
+ ((M4OSA_UInt32)pC->ewc.uiVideoHeight !=
+ pClipProperties->uiVideoHeight) ||
+ pClipProperties->videoRotationDegrees != 0) {
+
+ if (pClip->m_pPreResizeFrame == M4OSA_NULL) {
+ /**
+ * Allocate the intermediate video plane that will
+ receive the decoded image before resizing */
+ pClip->m_pPreResizeFrame =
+ (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
+ 3*sizeof(M4VIFI_ImagePlane), M4VSS3GPP,
+ (M4OSA_Char *)"pPreResizeFrame");
+ if (M4OSA_NULL == pClip->m_pPreResizeFrame) {
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
+ unable to allocate m_pPreResizeFrame");
+ return M4ERR_ALLOC;
+ }
+
+ pClip->m_pPreResizeFrame[0].pac_data = M4OSA_NULL;
+ pClip->m_pPreResizeFrame[1].pac_data = M4OSA_NULL;
+ pClip->m_pPreResizeFrame[2].pac_data = M4OSA_NULL;
+
+ /**
+ * Allocate the Y plane */
+ pClip->m_pPreResizeFrame[0].u_topleft = 0;
+ pClip->m_pPreResizeFrame[0].u_width =
+ pClipProperties->uiVideoWidth;
+ pClip->m_pPreResizeFrame[0].u_height =
+ pClipProperties->uiVideoHeight;
+ pClip->m_pPreResizeFrame[0].u_stride =
+ pClip->m_pPreResizeFrame[0].u_width;
+
+ pClip->m_pPreResizeFrame[0].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
+ pClip->m_pPreResizeFrame[0].u_stride * pClip->m_pPreResizeFrame[0].u_height,
+ M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[0].pac_data");
+ if (M4OSA_NULL == pClip->m_pPreResizeFrame[0].pac_data) {
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
+ unable to allocate m_pPreResizeFrame[0].pac_data");
+ free(pClip->m_pPreResizeFrame);
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Allocate the U plane */
+ pClip->m_pPreResizeFrame[1].u_topleft = 0;
+ pClip->m_pPreResizeFrame[1].u_width =
+ pClip->m_pPreResizeFrame[0].u_width >> 1;
+ pClip->m_pPreResizeFrame[1].u_height =
+ pClip->m_pPreResizeFrame[0].u_height >> 1;
+ pClip->m_pPreResizeFrame[1].u_stride =
+ pClip->m_pPreResizeFrame[1].u_width;
+
+ pClip->m_pPreResizeFrame[1].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
+ pClip->m_pPreResizeFrame[1].u_stride * pClip->m_pPreResizeFrame[1].u_height,
+ M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
+ if (M4OSA_NULL == pClip->m_pPreResizeFrame[1].pac_data) {
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
+ unable to allocate m_pPreResizeFrame[1].pac_data");
+ free(pClip->m_pPreResizeFrame[0].pac_data);
+ free(pClip->m_pPreResizeFrame);
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Allocate the V plane */
+ pClip->m_pPreResizeFrame[2].u_topleft = 0;
+ pClip->m_pPreResizeFrame[2].u_width =
+ pClip->m_pPreResizeFrame[1].u_width;
+ pClip->m_pPreResizeFrame[2].u_height =
+ pClip->m_pPreResizeFrame[1].u_height;
+ pClip->m_pPreResizeFrame[2].u_stride =
+ pClip->m_pPreResizeFrame[2].u_width;
+
+ pClip->m_pPreResizeFrame[2].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc (
+ pClip->m_pPreResizeFrame[2].u_stride * pClip->m_pPreResizeFrame[2].u_height,
+ M4MCS, (M4OSA_Char *)"m_pPreResizeFrame[2].pac_data");
+ if (M4OSA_NULL == pClip->m_pPreResizeFrame[2].pac_data) {
+ M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder(): \
+ unable to allocate m_pPreResizeFrame[2].pac_data");
+ free(pClip->m_pPreResizeFrame[0].pac_data);
+ free(pClip->m_pPreResizeFrame[1].pac_data);
+ free(pClip->m_pPreResizeFrame);
+ return M4ERR_ALLOC;
+ }
+ }
+ }
+
+ /**
+ * The video is currently in reading mode */
+ pClip->Vstatus = M4VSS3GPP_kClipStatus_READ;
+ }
+
+ if( ( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType)
+ && (M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType) )
+ {
+ /**
+ * Read the first Audio AU of the clip */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * The audio is currently in reading mode */
+ pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intOpenClip(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intComputeOutputAverageVideoBitrate()
+ * @brief Average bitrate of the output file, computed from input bitrates,
+ * durations, transitions and cuts.
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+ M4VSS3GPP_InternalEditContext *pC )
+{
+ M4VSS3GPP_ClipSettings *pCS_0, *pCS_1, *pCS_2;
+ M4VSS3GPP_TransitionSettings *pT0, *pT2;
+ M4OSA_Int32 i;
+
+ M4OSA_UInt32 t0_duration, t2_duration;
+ M4OSA_UInt32 t0_bitrate, t2_bitrate;
+ M4OSA_UInt32 c1_duration;
+
+ M4OSA_UInt32 total_duration;
+ M4OSA_UInt32 total_bitsum;
+
+ total_duration = 0;
+ total_bitsum = 0;
+
+ /* Loop on the number of clips */
+ for ( i = 0; i < pC->uiClipNumber; i++ )
+ {
+ pCS_1 = &pC->pClipList[i];
+
+ t0_duration = 0;
+ t0_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+ t2_duration = 0;
+ t2_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+
+ /* Transition with the previous clip */
+ if( i > 0 )
+ {
+ pCS_0 = &pC->pClipList[i - 1];
+ pT0 = &pC->pTransitionList[i - 1];
+
+ if( pT0->VideoTransitionType
+ != M4VSS3GPP_kVideoTransitionType_None )
+ {
+ t0_duration = pT0->uiTransitionDuration;
+
+ if( pCS_0->ClipProperties.uiVideoBitrate > t0_bitrate )
+ {
+ t0_bitrate = pCS_0->ClipProperties.uiVideoBitrate;
+ }
+ }
+ }
+
+ /* Transition with the next clip */
+ if( i < pC->uiClipNumber - 1 )
+ {
+ pCS_2 = &pC->pClipList[i + 1];
+ pT2 = &pC->pTransitionList[i];
+
+ if( pT2->VideoTransitionType
+ != M4VSS3GPP_kVideoTransitionType_None )
+ {
+ t2_duration = pT2->uiTransitionDuration;
+
+ if( pCS_2->ClipProperties.uiVideoBitrate > t2_bitrate )
+ {
+ t2_bitrate = pCS_2->ClipProperties.uiVideoBitrate;
+ }
+ }
+ }
+
+ /* Check for cut times */
+ if( pCS_1->uiEndCutTime > 0 )
+ c1_duration = pCS_1->uiEndCutTime;
+ else
+ c1_duration = pCS_1->ClipProperties.uiClipVideoDuration;
+
+ if( pCS_1->uiBeginCutTime > 0 )
+ c1_duration -= pCS_1->uiBeginCutTime;
+
+ c1_duration -= t0_duration + t2_duration;
+
+ /* Compute bitsum and duration */
+ total_duration += c1_duration + t0_duration / 2 + t2_duration / 2;
+
+ total_bitsum +=
+ c1_duration * (pCS_1->ClipProperties.uiVideoBitrate / 1000)
+ + (t0_bitrate / 1000) * t0_duration / 2
+ + (t2_bitrate / 1000) * t2_duration / 2;
+ }
+
+ pC->ewc.uiVideoBitrate = ( total_bitsum / total_duration) * 1000;
+}
+
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
new file mode 100755
index 0000000..746883d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
@@ -0,0 +1,2013 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_EditAudio.c
+ * @brief Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+
+#define PWR_FXP_FRACT_MAX (32768)
+
+/************************************************************************/
+/* Static local functions */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+ *pC );
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_UInt8 uiClip1orClip2,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize );
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_Int16 *pPCMdata1,
+ M4OSA_Int16 *pPCMdata2,
+ M4OSA_UInt32 uiPCMsize );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
+ * @brief One step of jumping processing for the MP3 clip.
+ * @note On one step, the jump of several AU is done
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditJumpMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+ M4OSA_Int32 JumpCts;
+
+ JumpCts = pClip->iActualAudioBeginCut;
+
+ err = M4VSS3GPP_intClipJumpAudioAt(pClip, &JumpCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ if( JumpCts >= pClip->iActualAudioBeginCut )
+ {
+ pC->State = M4VSS3GPP_kEditState_MP3;
+
+ /**
+ * Update clip offset with the audio begin cut */
+ pClip->iAoffset = -JumpCts;
+
+ /**
+ * The audio is currently in reading mode */
+ pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+ }
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
+ * @brief One step of audio processing for the MP3 clip
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+
+ /**
+ * Copy the input AU to the output AU */
+ err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+ pClip->pAudioFramePtr, (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+ /**
+ * Read the next audio frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+ M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!", err);
+ return err;
+ }
+ else
+ {
+ /**
+ * Update current time (to=tc+T) */
+ pC->ewc.dATo =
+ ( pClip->iAudioFrameCts + pClip->iAoffset) / pClip->scale_audio;
+
+ if( (M4OSA_Int32)(pClip->iAudioFrameCts / pClip->scale_audio + 0.5)
+ >= pClip->iEndTime )
+ {
+ M4READER_Buffer mp3tagBuffer;
+
+ /**
+ * The duration is better respected if the first AU and last AU are both above
+ the cut time */
+ err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+ pClip->pAudioFramePtr,
+ (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+ /* The ID3v1 tag is always at the end of the mp3 file so the end of the cutting
+ process is waited */
+ /* before writing the metadata in the output file*/
+
+ /* Retrieve the data of the ID3v1 Tag */
+ err = pClip->ShellAPI.m_pReader->m_pFctGetOption(
+ pClip->pReaderContext, M4READER_kOptionID_Mp3Id3v1Tag,
+ (M4OSA_DataOption) &mp3tagBuffer);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepMP3: M4MP3R_getOption returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* Write the data of the ID3v1 Tag in the output file */
+ if( 0 != mp3tagBuffer.m_uiBufferSize )
+ {
+ err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+ (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+ /**
+ * Free before the error checking anyway */
+ free(mp3tagBuffer.m_pData);
+
+ /**
+ * Error checking */
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepMP3:\
+ pOsaFileWritPtr->writeData(ID3v1Tag) returns 0x%x", err);
+ return err;
+ }
+
+ mp3tagBuffer.m_uiBufferSize = 0;
+ mp3tagBuffer.m_pData = M4OSA_NULL;
+ }
+
+ /* The End Cut has been reached */
+ err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepMP3 : M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ if( ( M4WAR_NO_MORE_AU == err) && (M4OSA_FALSE
+ == pC->bSupportSilence) ) /**< Reached end of clip */
+ {
+ err = M4VSS3GPP_intReachedEndOfAudio(
+ pC); /**< Clip done, do the next one */
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+ M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepMP3: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
+ * @brief One step of audio processing
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ int32_t auTimeStamp = -1;
+
+ M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+ M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+ M4OSA_Time
+ frameTimeDelta; /**< Duration of the encoded (then written) data */
+ M4OSA_Bool bStopAudio;
+
+ /**
+ * Check if we reached end cut */
+ if( ( pC->ewc.dATo - pC->pC1->iAoffset / pC->pC1->scale_audio + 0.5)
+ >= pC->pC1->iEndTime )
+ {
+ /**
+ * Audio is done for this clip */
+ err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+ /* RC: to know when a file has been processed */
+ if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+ err);
+ }
+
+ return err;
+ }
+
+ /**
+ * Check Audio Mode, depending on the current output CTS */
+ err = M4VSS3GPP_intCheckAudioMode(
+ pC); /**< This function change the pC->Astate variable! */
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intCheckAudioMode returns 0x%x!",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE2_3(" AUDIO step : dATo = %f state = %d offset = %ld",
+ pC->ewc.dATo, pC->Astate, pC->pC1->iAoffset);
+
+ bStopAudio = M4OSA_FALSE;
+
+ switch( pC->Astate )
+ {
+ /* _________________ */
+ /*| |*/
+ /*| READ_WRITE MODE |*/
+ /*|_________________|*/
+
+ case M4VSS3GPP_kEditAudioState_READ_WRITE:
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio READ_WRITE");
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio:\
+ READ_WRITE: pWriterDataFcts->pStartAU returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Compute output audio CTS */
+ pC->ewc.WriterAudioAU.CTS =
+ pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+ /**
+ * BZZZ bug fix (read-write case):
+ * Replace the first AMR AU of the stream with a silence AU.
+ * It removes annoying "BZZZ" audio glitch.
+ * It is not needed if there is a begin cut.
+ * It is not needed for the first clip.
+ * Because of another bugfix (2005-03-24), the first AU written may be
+ * the second one which CTS is 20. Hence the cts<21 test.
+ * (the BZZZ effect occurs even with the second AU!) */
+ if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+ && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+ < (pC->ewc.iSilenceFrameDuration + 1)) )
+ {
+ /**
+ * Copy a silence AU to the output */
+ pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+ M4OSA_TRACE2_0("A #### silence AU");
+ }
+ else if( (M4OSA_UInt32)pC->pC1->uiAudioFrameSize
+ < pC->ewc.uiAudioMaxAuSize )
+ {
+ /**
+ * Copy the input AU to the output AU */
+ pC->ewc.WriterAudioAU.size =
+ (M4OSA_UInt32)pC->pC1->uiAudioFrameSize;
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->pC1->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+ }
+ else
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intEditStepAudio: READ_WRITE: AU size greater than MaxAuSize \
+ (%d>%d)! returning M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE",
+ pC->pC1->uiAudioFrameSize, pC->ewc.uiAudioMaxAuSize);
+ return M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE;
+ }
+
+ /**
+ * This boolean is only used to fix the BZZ bug... */
+ pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+ M4OSA_TRACE2_2("B ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ /**
+ * Write the AU */
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ /*11/12/2008 CR 3283 MMS use case for VideoArtist
+ the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+ size is reached
+ The editing is then finished,
+ the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio:\
+ READ_WRITE: pWriterDataFcts->pProcessAU returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Audio is now in read mode (there may be a "if(status!=READ)" here,
+ but it is removed for optimization) */
+ pC->pC1->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+ /**
+ * Read the next audio frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+ M4OSA_TRACE2_3("C .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: READ_WRITE:\
+ M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+ else
+ {
+ /**
+ * Update current time (to=tc+T) */
+ pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+ / pC->pC1->scale_audio;
+
+ if( ( M4WAR_NO_MORE_AU == err)
+ && (M4OSA_FALSE == pC->bSupportSilence) )
+ {
+ /**
+ * If output is other than AMR or AAC
+ (i.e. EVRC,we can't write silence into it)
+ * So we simply end here.*/
+ bStopAudio = M4OSA_TRUE;
+ }
+ }
+ }
+ break;
+
+ /* ____________________ */
+ /*| |*/
+ /*| DECODE_ENCODE MODE |*/
+ /*|____________________|*/
+
+ case M4VSS3GPP_kEditAudioState_DECODE_ENCODE:
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio DECODE_ENCODE");
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+ pWriterDataFcts->pStartAU returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * If we were reading the clip, we must jump a few AU backward to decode/encode
+ (without writing result) from that point. */
+ if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+ {
+ M4OSA_Int32 iTargetCts, iCurrentCts;
+
+ if( 0
+ != pC->pC1->
+ iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning. */
+ {
+ /**
+ * Jump a few AUs backward */
+ iCurrentCts = pC->pC1->iAudioFrameCts;
+ iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+ * pC->ewc.iSilenceFrameDuration;
+
+ if( iTargetCts < 0 )
+ {
+ iTargetCts = 0; /**< Sanity check */
+ }
+
+ err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+ M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipReadNextAudioFrame(
+ pC->pC1); /**< read AU where we jumped */
+
+ M4OSA_TRACE2_3("D .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+ M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Decode/encode up to the wanted position */
+ while( pC->pC1->iAudioFrameCts < iCurrentCts )
+ {
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch: \
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pC1->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pC1->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("E **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /*OMX Audio decoder used.
+ * OMX Audio dec shell does internal buffering and hence does not return
+ a PCM buffer for every decodeStep call.*
+ * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+ if( 0 != pEncInBuffer.pTableBufferSize[0] )
+ {
+#endif
+ /**
+ * Encode the PCM audio */
+
+ err =
+ pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt,
+ &pEncInBuffer, &pEncOutBuffer);
+
+ if( ( M4NO_ERROR != err)
+ && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ } //if(0 != pEncInBuffer.pTableBufferSize[0])
+
+#endif
+ pC->pC1->pAudioFramePtr = M4OSA_NULL;
+
+ // Get timestamp of last read AU
+ pC->pC1->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
+ pC->pC1->pAudioDecCtxt, M4AD_kOptionID_AuCTS,
+ (M4OSA_DataOption) &auTimeStamp);
+
+ if (auTimeStamp == -1) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intEditStepAudio: \
+ invalid audio timestamp returned");
+ return M4WAR_INVALID_TIME;
+ }
+
+ pC->pC1->iAudioFrameCts = auTimeStamp;
+
+ }
+ }
+
+ /**
+ * Audio is now OK for decoding */
+ pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+ }
+
+ /**
+ * Decode the input audio */
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->pC1->pAudioFramePtr = M4OSA_NULL;
+
+ // Get timestamp of last read AU
+ pC->pC1->ShellAPI.m_pAudioDecoder->m_pFctGetOptionAudioDec(
+ pC->pC1->pAudioDecCtxt, M4AD_kOptionID_AuCTS,
+ (M4OSA_DataOption) &auTimeStamp);
+
+ if (auTimeStamp == -1) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intEditStepAudio: invalid audio \
+ timestamp returned");
+ return M4WAR_INVALID_TIME;
+ }
+
+ pC->pC1->iAudioFrameCts = auTimeStamp;
+
+ /**
+ * Apply the effect */
+ if( pC->iClip1ActiveEffect >= 0 )
+ {
+ err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+ *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+ pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+ M4VSS3GPP_intEndAudioEffect returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Compute output audio CTS */
+ pC->ewc.WriterAudioAU.CTS =
+ pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+ /* May happen with corrupted input files (which have stts entries not
+ multiple of SilenceFrameDuration) */
+ if( pC->ewc.WriterAudioAU.CTS < 0 )
+ {
+ pC->ewc.WriterAudioAU.CTS = 0;
+ }
+
+ /**
+ * BZZZ bug fix (decode-encode case):
+ * (Yes, the Bzz bug may also occur when we re-encode. It doesn't
+ * occur at the decode before the encode, but at the playback!)
+ * Replace the first AMR AU of the encoded stream with a silence AU.
+ * It removes annoying "BZZZ" audio glitch.
+ * It is not needed if there is a begin cut.
+ * It is not needed for the first clip.
+ * Because of another bugfix (2005-03-24), the first AU written may be
+ * the second one which CTS is 20. Hence the cts<21 test.
+ * (the BZZZ effect occurs even with the second AU!) */
+ if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+ && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+ < (pC->ewc.iSilenceFrameDuration + 1)) )
+ {
+ /**
+ * Copy a silence AMR AU to the output */
+ pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+ memcpy((void *)pC->ewc.WriterAudioAU.dataAddress,
+ (void *)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+ M4OSA_TRACE2_0("G #### silence AU");
+ }
+ else
+ {
+ /**
+ * Encode the filtered PCM audio directly into the output AU */
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pC1->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pC1->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("H ++++ encode AU");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /*OMX Audio decoder used.
+ * OMX Audio dec shell does internal buffering and hence does not return
+ a PCM buffer for every decodeStep call.*
+ * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+ if( 0 != pEncInBuffer.pTableBufferSize[0] )
+ {
+
+#endif
+
+ /**
+ * Encode the PCM audio */
+
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt,
+ &pEncInBuffer, &pEncOutBuffer);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ /**
+ * Set AU size */
+
+ pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+ 0]; /**< Get the size of encoded data */
+ }
+
+ /**
+ * This boolean is only used to fix the BZZ bug... */
+ pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+ M4OSA_TRACE2_2("I ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ /**
+ * Write the AU */
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ /*11/12/2008 CR 3283 MMS use case for VideoArtist
+ the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+ size is reached
+ The editing is then finished,
+ the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+ pWriterDataFcts->pProcessAU returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Read the next audio frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+ M4OSA_TRACE2_3("J .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+ M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+ else
+ {
+ /**
+ * Update current time (to=tc+T) */
+ pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+ / pC->pC1->scale_audio;
+
+ if( ( M4WAR_NO_MORE_AU == err)
+ && (M4OSA_FALSE == pC->bSupportSilence) )
+ {
+ /**
+ * If output is other than AMR or AAC
+ (i.e. EVRC,we can't write silence into it)
+ * So we simply end here.*/
+ bStopAudio = M4OSA_TRUE;
+ }
+ }
+ }
+ break;
+
+ /* _________________ */
+ /*| |*/
+ /*| TRANSITION MODE |*/
+ /*|_________________|*/
+
+ case M4VSS3GPP_kEditAudioState_TRANSITION:
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio TRANSITION");
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ pWriterDataFcts->pStartAU returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * If we were reading the clip, we must jump a few AU backward to decode/encode
+ (without writing result) from that point. */
+ if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+ {
+ M4OSA_Int32 iTargetCts, iCurrentCts;
+
+ if( 0
+ != pC->pC1->
+ iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning.*/
+ {
+ /**
+ * Jump a few AUs backward */
+ iCurrentCts = pC->pC1->iAudioFrameCts;
+ iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+ * pC->ewc.iSilenceFrameDuration;
+
+ if( iTargetCts < 0 )
+ {
+ iTargetCts = 0; /**< Sanity check */
+ }
+
+ err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+ M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+ err);
+ return err;
+ }
+
+ err = M4VSS3GPP_intClipReadNextAudioFrame(
+ pC->pC1); /**< read AU where we jumped */
+
+ M4OSA_TRACE2_3("K .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+ M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Decode/encode up to the wanted position */
+ while( pC->pC1->iAudioFrameCts < iCurrentCts )
+ {
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pC1->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pC1->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta =
+ pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("L **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /*OMX Audio decoder used.
+ * OMX Audio dec shell does internal buffering and hence does not return
+ a PCM buffer for every decodeStep call.*
+ * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+ if( 0 != pEncInBuffer.pTableBufferSize[0] )
+ {
+
+#endif
+ /**
+ * Encode the PCM audio */
+
+ err =
+ pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt,
+ &pEncInBuffer, &pEncOutBuffer);
+
+ if( ( M4NO_ERROR != err)
+ && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+ M4OSA_TRACE2_3(
+ "M .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+ M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+ }
+
+ /**
+ * Audio is now OK for decoding */
+ pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+ }
+
+ /**
+ * Decode the first input audio */
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame(C1) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Decode the second input audio */
+ err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC2);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intClipDecodeCurrentAudioFrame(C2) returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Check both clips decoded the same amount of PCM samples */
+ if( pC->pC1->AudioDecBufferOut.m_bufferSize
+ != pC->pC2->AudioDecBufferOut.m_bufferSize )
+ {
+ M4OSA_TRACE1_2(
+ "ERR : AudioTransition: both clips AU must have the same decoded\
+ PCM size! pc1 size=0x%x, pC2 size = 0x%x",
+ pC->pC1->AudioDecBufferOut.m_bufferSize,
+ pC->pC2->AudioDecBufferOut.m_bufferSize);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /*OMX Audio decoder used.
+ * OMX Audio dec shell does internal buffering and hence does not return
+ a PCM buffer for every decodeStep call.*
+ * So PCM buffer sizes might be 0 or different for clip1 and clip2.
+ * So no need to return error in this case */
+
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intEditStepAudio: , pc1 AudBuff size=0x%x,\
+ pC2 AudBuff size = 0x%x",
+ pC->pC1->AudioDecBufferOut.m_bufferSize,
+ pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+#else
+
+ return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+
+#endif // M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+ /**
+ * Apply the audio effect on clip1 */
+ if( pC->iClip1ActiveEffect >= 0 )
+ {
+ err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+ *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+ pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intApplyAudioEffect(C1) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Apply the audio effect on clip2 */
+ if( pC->iClip2ActiveEffect >= 0 )
+ {
+ err = M4VSS3GPP_intApplyAudioEffect(pC, 2, (M4OSA_Int16
+ *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+ pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intApplyAudioEffect(C2) returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Apply the transition effect */
+ err = M4VSS3GPP_intAudioTransition(pC,
+ (M4OSA_Int16 *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+ (M4OSA_Int16 *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+ pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intAudioTransition returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* [Mono] or [Stereo interleaved] : all is in one buffer */
+ pEncInBuffer.pTableBuffer[0] =
+ pC->pC1->AudioDecBufferOut.m_dataAddress;
+ pEncInBuffer.pTableBufferSize[0] =
+ pC->pC1->AudioDecBufferOut.m_bufferSize;
+ pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+ pEncInBuffer.pTableBufferSize[1] = 0;
+
+ /* Time in ms from data size, because it is PCM16 samples */
+ frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+ / pC->ewc.uiNbChannels;
+
+ /**
+ * Prepare output buffer */
+ pEncOutBuffer.pTableBuffer[0] =
+ (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+ pEncOutBuffer.pTableBufferSize[0] = 0;
+
+ M4OSA_TRACE2_0("N **** blend AUs");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ /*OMX Audio decoder used.
+ * OMX Audio dec shell does internal buffering and hence does not return
+ a PCM buffer for every decodeStep call.*
+ * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+ if( 0 != pEncInBuffer.pTableBufferSize[0] )
+ {
+
+#endif
+
+ /**
+ * Encode the PCM audio */
+
+ err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+ pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio():\
+ pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+ err);
+ return err;
+ }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif
+
+ /**
+ * Set AU cts and size */
+
+ pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+ 0]; /**< Get the size of encoded data */
+ pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+ M4OSA_TRACE2_2("O ---- write : cts = %ld [ 0x%x ]",
+ (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+ pC->ewc.WriterAudioAU.size);
+
+ /**
+ * Write the AU */
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+ pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+ &pC->ewc.WriterAudioAU);
+
+ if( M4NO_ERROR != err )
+ {
+ /*11/12/2008 CR 3283 MMS use case for VideoArtist
+ the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+ file size is reached
+ The editing is then finished,the warning M4VSS3GPP_WAR_EDITING_DONE
+ is returned*/
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ pWriterDataFcts->pProcessAU returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Read the next audio frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+ M4OSA_TRACE2_3("P .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+ pC->pC1->iAoffset / pC->pC1->scale_audio,
+ pC->pC1->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intClipReadNextAudioFrame(C1) returns 0x%x!",
+ err);
+ return err;
+ }
+ else
+ {
+ M4OSA_ERR secondaryError;
+
+ /**
+ * Update current time (to=tc+T) */
+ pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+ / pC->pC1->scale_audio;
+
+ /**
+ * Read the next audio frame in the second clip */
+ secondaryError = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+ M4OSA_TRACE2_3("Q .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+ pC->pC2->iAoffset / pC->pC2->scale_audio,
+ pC->pC2->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(secondaryError) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+ M4VSS3GPP_intClipReadNextAudioFrame(C2) returns 0x%x!",
+ secondaryError);
+ return err;
+ }
+
+ if( ( ( M4WAR_NO_MORE_AU == err)
+ || (M4WAR_NO_MORE_AU == secondaryError))
+ && (M4OSA_FALSE == pC->bSupportSilence) )
+ {
+ /**
+ * If output is other than AMR or AAC
+ (i.e. EVRC,we can't write silence into it)
+ * So we simply end here.*/
+ bStopAudio = M4OSA_TRUE;
+ }
+ }
+ }
+ break;
+
+ /* ____________ */
+ /*| |*/
+ /*| ERROR CASE |*/
+ /*|____________|*/
+
+ default:
+
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intEditStepAudio: invalid internal state (0x%x), \
+ returning M4VSS3GPP_ERR_INTERNAL_STATE",
+ pC->Astate);
+ return M4VSS3GPP_ERR_INTERNAL_STATE;
+ }
+
+ /**
+ * Check if we are forced to stop audio */
+ if( M4OSA_TRUE == bStopAudio )
+ {
+ /**
+ * Audio is done for this clip */
+ err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckAudioMode()
+ * @brief Check which audio process mode we must use, depending on the output CTS.
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+ *pC )
+{
+ M4OSA_ERR err;
+ const M4OSA_Int32 TD = pC->pTransitionList[pC->
+ uiCurrentClip].uiTransitionDuration; /**< Transition duration */
+
+ const M4VSS3GPP_EditAudioState previousAstate = pC->Astate;
+
+ /**
+ * Check if Clip1 is on its begin cut, or in its begin effect or end effect zone */
+ M4VSS3GPP_intCheckAudioEffects(pC, 1);
+
+ /**
+ * Check if we are in the transition with next clip */
+ if( ( TD > 0) && ((M4OSA_Int32)(pC->ewc.dATo - pC->pC1->iAoffset
+ / pC->pC1->scale_audio + 0.5) >= (pC->pC1->iEndTime - TD)) )
+ {
+ /**
+ * We are in a transition */
+ pC->Astate = M4VSS3GPP_kEditAudioState_TRANSITION;
+ pC->bTransitionEffect = M4OSA_TRUE;
+
+ /**
+ * Do we enter the transition section ? */
+ if( M4VSS3GPP_kEditAudioState_TRANSITION != previousAstate )
+ {
+ /**
+ * Open second clip for transition, if not yet opened */
+ if( M4OSA_NULL == pC->pC2 )
+ {
+ err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+ &pC->pClipList[pC->uiCurrentClip + 1]);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intOpenClip() returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * In case of short transition and bad luck (...), there may be no video AU
+ * in the transition. In that case, the second clip has not been opened.
+ * So we must update the video offset here. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ /**< Add current video output CTS to the clip offset */
+ pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+ }
+
+ /**
+ * Add current audio output CTS to the clip offset
+ * (video offset has already been set when doing the video transition) */
+ pC->pC2->iAoffset +=
+ (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+ /**
+ * 2005-03-24: BugFix for audio-video synchro:
+ * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+ * It leads to an audible desynchro when there are a lot of clips assembled.
+ * This bug fix allows to resynch the audio track when the delta is higher
+ * than one audio AU duration.
+ * We Step one AU in the second clip and we change the audio offset accordingly. */
+ if( ( pC->pC2->iAoffset
+ - (M4OSA_Int32)(pC->pC2->iVoffset *pC->pC2->scale_audio + 0.5))
+ > pC->ewc.iSilenceFrameDuration )
+ {
+ /**
+ * Advance one AMR frame */
+ err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+ M4OSA_TRACE2_3("Z .... read : cts = %.0f + %.0f [ 0x%x ]",
+ pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+ pC->pC2->iAoffset / pC->pC2->scale_audio,
+ pC->pC2->uiAudioFrameSize);
+
+ if( M4OSA_ERR_IS_ERROR(err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckAudioMode:\
+ M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+ err);
+ return err;
+ }
+ /**
+ * Update audio offset accordingly*/
+ pC->pC2->iAoffset -= pC->ewc.iSilenceFrameDuration;
+ }
+ }
+
+ /**
+ * Check begin and end effects for clip2 */
+ M4VSS3GPP_intCheckAudioEffects(pC, 2);
+ }
+ else
+ {
+ /**
+ * We are not in a transition */
+ pC->bTransitionEffect = M4OSA_FALSE;
+
+ /**
+ * Check if current mode is Read/Write or Decode/Encode */
+ if( pC->iClip1ActiveEffect >= 0 )
+ {
+ pC->Astate = M4VSS3GPP_kEditAudioState_DECODE_ENCODE;
+ }
+ else
+ {
+ pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+ }
+ }
+
+ /**
+ * Check if we create/destroy an encoder */
+ if( ( M4VSS3GPP_kEditAudioState_READ_WRITE == previousAstate)
+ && /**< read mode */
+ (M4VSS3GPP_kEditAudioState_READ_WRITE != pC->Astate) ) /**< encode mode */
+ {
+ M4OSA_UInt32 uiAudioBitrate;
+
+ /* Compute max bitrate depending on input files bitrates and transitions */
+ if( pC->Astate == M4VSS3GPP_kEditAudioState_TRANSITION )
+ {
+ /* Max of the two blended files */
+ if( pC->pC1->pSettings->ClipProperties.uiAudioBitrate
+ > pC->pC2->pSettings->ClipProperties.uiAudioBitrate )
+ uiAudioBitrate =
+ pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+ else
+ uiAudioBitrate =
+ pC->pC2->pSettings->ClipProperties.uiAudioBitrate;
+ }
+ else
+ {
+ /* Same as input file */
+ uiAudioBitrate = pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+ }
+
+ /**
+ * Create the encoder */
+ err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+ uiAudioBitrate);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intResetAudioEncoder() returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intCheckAudioMode(): returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckAudioEffects()
+ * @brief Check which audio effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_UInt8 uiClipNumber )
+{
+ M4OSA_UInt8 uiClipIndex;
+ M4OSA_UInt8 uiFxIndex;
+ M4VSS3GPP_ClipContext *pClip;
+ M4VSS3GPP_EffectSettings *pFx;
+ M4OSA_Int32 BC, EC;
+ M4OSA_Int8 *piClipActiveEffect;
+ M4OSA_Int32 t;
+
+ if( 1 == uiClipNumber )
+ {
+ uiClipIndex = pC->uiCurrentClip;
+ pClip = pC->pC1;
+ piClipActiveEffect = &(pC->iClip1ActiveEffect);
+ }
+ else /**< (2 == uiClipNumber) */
+ {
+ uiClipIndex = pC->uiCurrentClip + 1;
+ pClip = pC->pC2;
+ piClipActiveEffect = &(pC->iClip2ActiveEffect);
+ }
+
+ /**
+ * Shortcuts for code readability */
+ BC = pClip->iActualAudioBeginCut;
+ EC = pClip->iEndTime;
+
+ /**
+ Change the absolut time to clip related time
+ RC t = (M4OSA_Int32)(pC->ewc.dATo - pClip->iAoffset/pClip->scale_audio + 0.5);
+ < rounding */;
+ t = (M4OSA_Int32)(pC->ewc.dATo/*- pClip->iAoffset/pClip->scale_audio*/
+ + 0.5); /**< rounding */
+ ;
+
+ /**
+ * Default: no effect active */
+ *piClipActiveEffect = -1;
+
+ /**
+ * Check the three effects */
+ // RC for (uiFxIndex=0; uiFxIndex<pC->pClipList[uiClipIndex].nbEffects; uiFxIndex++)
+ for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+ {
+ /** Shortcut, reverse order because of priority between effects
+ ( EndEffect always clean ) */
+ pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+ if( M4VSS3GPP_kAudioEffectType_None != pFx->AudioEffectType )
+ {
+ /**
+ * Check if there is actually a video effect */
+ if( ( t >= (M4OSA_Int32)(/*BC +*/pFx->uiStartTime))
+ && /**< Are we after the start time of the effect? */
+ (t < (M4OSA_Int32)(/*BC +*/pFx->uiStartTime + pFx->
+ uiDuration)) ) /**< Are we into the effect duration? */
+ {
+ /**
+ * Set the active effect */
+ *piClipActiveEffect = pC->nbEffects - 1 - uiFxIndex;
+
+ /**
+ * The first effect has the highest priority, then the second one,
+ then the thirs one.
+ * Hence, as soon as we found an active effect, we can get out of this loop */
+ uiFxIndex = pC->nbEffects; /** get out of the for loop */
+ }
+ /**
+ * Bugfix: The duration of the end effect has been set according to the
+ announced clip duration.
+ * If the announced duration is smaller than the real one, the end effect
+ won't be applied at
+ * the very end of the clip. To solve this issue we force the end effect. */
+
+ }
+ }
+
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyAudioEffect()
+ * @brief Apply audio effect to pPCMdata
+ * @param pC (IN/OUT) Internal edit context
+ * @param uiClip1orClip2 (IN/OUT) 1 for first clip, 2 for second clip
+ * @param pPCMdata (IN/OUT) Input and Output PCM audio data
+ * @param uiPCMsize (IN) Size of pPCMdata
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_UInt8 uiClip1orClip2,
+ M4OSA_Int16 *pPCMdata,
+ M4OSA_UInt32 uiPCMsize )
+{
+ M4VSS3GPP_ClipContext *pClip;
+ M4VSS3GPP_ClipSettings *pClipSettings;
+ M4VSS3GPP_EffectSettings *pFx;
+ M4OSA_Int32
+ i32sample; /**< we will cast each Int16 sample into this Int32 variable */
+ M4OSA_Int32 iPos;
+ M4OSA_Int32 iDur;
+
+ M4OSA_DEBUG_IF2(( 1 != uiClip1orClip2) && (2 != uiClip1orClip2),
+ M4ERR_PARAMETER,
+ "M4VSS3GPP_intBeginAudioEffect: uiClip1orClip2 invalid");
+
+ if( 1 == uiClip1orClip2 )
+ {
+ pClip = pC->pC1;
+ pClipSettings = &(pC->pClipList[pC->
+ uiCurrentClip]); /**< Get a shortcut to the clip settings */
+ // RC pFx = &(pClipSettings->Effects[pC->iClip1ActiveEffect]);/**< Get a shortcut
+ // to the active effect */
+ pFx = &(pC->
+ pEffectsList[pC->
+ iClip1ActiveEffect]); /**< Get a shortcut to the active effect */
+ M4OSA_DEBUG_IF2(( pC->iClip1ActiveEffect < 0)
+ || (pC->iClip1ActiveEffect > 2), M4ERR_PARAMETER,
+ "M4VSS3GPP_intApplyAudioEffect: iClip1ActiveEffect invalid");
+ }
+ else /**< if (2==uiClip1orClip2) */
+ {
+ pClip = pC->pC2;
+ pClipSettings = &(pC->pClipList[pC->uiCurrentClip
+ + 1]); /**< Get a shortcut to the clip settings */
+ // RC pFx = &(pClipSettings->Effects[pC->iClip2ActiveEffect]);/**< Get a shortcut
+ // to the active effect */
+ pFx = &(pC->
+ pEffectsList[pC->
+ iClip2ActiveEffect]); /**< Get a shortcut to the active effect */
+ M4OSA_DEBUG_IF2(( pC->iClip2ActiveEffect < 0)
+ || (pC->iClip2ActiveEffect > 2), M4ERR_PARAMETER,
+ "M4VSS3GPP_intApplyAudioEffect: iClip2ActiveEffect invalid");
+ }
+
+ iDur = (M4OSA_Int32)pFx->uiDuration;
+
+ /**
+ * Compute how far from the beginning of the effect we are, in clip-base time.
+ * It is done with integers because the offset and begin cut have been rounded already. */
+ iPos =
+ (M4OSA_Int32)(pC->ewc.dATo + 0.5 - pClip->iAoffset / pClip->scale_audio)
+ - pClip->iActualAudioBeginCut - pFx->uiStartTime;
+
+ /**
+ * Sanity check */
+ if( iPos > iDur )
+ {
+ iPos = iDur;
+ }
+ else if( iPos < 0 )
+ {
+ iPos = 0;
+ }
+
+ /**
+ * At this point, iPos is the effect progress, in a 0 to iDur base */
+ switch( pFx->AudioEffectType )
+ {
+ case M4VSS3GPP_kAudioEffectType_FadeIn:
+
+ /**
+ * Original samples are signed 16bits.
+ * We convert it to signed 32bits and multiply it by iPos.
+ * So we must assure that iPos is not higher that 16bits max.
+ * iPos max value is iDur, so we test iDur. */
+ while( iDur > PWR_FXP_FRACT_MAX )
+ {
+ iDur >>=
+ 2; /**< divide by 2 would be more logical (instead of 4),
+ but we have enough dynamic..) */
+ iPos >>= 2; /**< idem */
+ }
+
+ /**
+ * From buffer size (bytes) to number of sample (int16): divide by two */
+ uiPCMsize >>= 1;
+
+ /**
+ * Loop on samples */
+ while( uiPCMsize-- > 0 ) /**< decrementing to optimize */
+ {
+ i32sample = *pPCMdata;
+ i32sample *= iPos;
+ i32sample /= iDur;
+ *pPCMdata++ = (M4OSA_Int16)i32sample;
+ }
+
+ break;
+
+ case M4VSS3GPP_kAudioEffectType_FadeOut:
+
+ /**
+ * switch from 0->Dur to Dur->0 in order to do fadeOUT instead of fadeIN */
+ iPos = iDur - iPos;
+
+ /**
+ * Original samples are signed 16bits.
+ * We convert it to signed 32bits and multiply it by iPos.
+ * So we must assure that iPos is not higher that 16bits max.
+ * iPos max value is iDur, so we test iDur. */
+ while( iDur > PWR_FXP_FRACT_MAX )
+ {
+ iDur >>=
+ 2; /**< divide by 2 would be more logical (instead of 4),
+ but we have enough dynamic..) */
+ iPos >>= 2; /**< idem */
+ }
+
+ /**
+ * From buffer size (bytes) to number of sample (int16): divide by two */
+ uiPCMsize >>= 1;
+
+ /**
+ * Loop on samples, apply the fade factor on each */
+ while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+ {
+ i32sample = *pPCMdata;
+ i32sample *= iPos;
+ i32sample /= iDur;
+ *pPCMdata++ = (M4OSA_Int16)i32sample;
+ }
+
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyAudioEffect: unknown audio effect type (0x%x),\
+ returning M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE",
+ pFx->AudioEffectType);
+ return M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intApplyAudioEffect: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioTransition()
+ * @brief Apply transition effect to two PCM buffer
+ * @note The result of the transition is put in the first buffer.
+ * I know it's not beautiful, but it fits my current needs, and it's efficient!
+ * So why bother with a third output buffer?
+ * @param pC (IN/OUT) Internal edit context
+ * @param pPCMdata1 (IN/OUT) First input and Output PCM audio data
+ * @param pPCMdata2 (IN) Second input PCM audio data
+ * @param uiPCMsize (IN) Size of both PCM buffers
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+ *pC, M4OSA_Int16 *pPCMdata1,
+ M4OSA_Int16 *pPCMdata2,
+ M4OSA_UInt32 uiPCMsize )
+{
+ M4OSA_Int32 i32sample1,
+ i32sample2; /**< we will cast each Int16 sample into this Int32 variable */
+ M4OSA_Int32 iPos1, iPos2;
+ M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+ pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+ /**
+ * Compute how far from the end cut we are, in clip-base time.
+ * It is done with integers because the offset and begin cut have been rounded already. */
+ iPos1 = pC->pC1->iEndTime - (M4OSA_Int32)(pC->ewc.dATo
+ + 0.5 - pC->pC1->iAoffset / pC->pC1->scale_audio);
+
+ /**
+ * Sanity check */
+ if( iPos1 > iDur )
+ {
+ iPos1 = iDur;
+ }
+ else if( iPos1 < 0 )
+ {
+ iPos1 = 0;
+ }
+
+ /**
+ * Position of second clip in the transition */
+ iPos2 = iDur - iPos1;
+
+ /**
+ * At this point, iPos2 is the transition progress, in a 0 to iDur base.
+ * iPos1 is the transition progress, in a iDUr to 0 base. */
+ switch( pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType )
+ {
+ case M4VSS3GPP_kAudioTransitionType_CrossFade:
+
+ /**
+ * Original samples are signed 16bits.
+ * We convert it to signed 32bits and multiply it by iPos.
+ * So we must assure that iPos is not higher that 16bits max.
+ * iPos max value is iDur, so we test iDur. */
+ while( iDur > PWR_FXP_FRACT_MAX )
+ {
+ iDur >>=
+ 2; /**< divide by 2 would be more logical (instead of 4),
+ but we have enough dynamic..) */
+ iPos1 >>= 2; /**< idem */
+ iPos2 >>= 2; /**< idem */
+ }
+
+ /**
+ * From buffer size (bytes) to number of sample (int16): divide by two */
+ uiPCMsize >>= 1;
+
+ /**
+ * Loop on samples, apply the fade factor on each */
+ while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+ {
+ i32sample1 = *pPCMdata1; /**< Get clip1 sample */
+ i32sample1 *= iPos1; /**< multiply by fade numerator */
+ i32sample1 /= iDur; /**< divide by fade denominator */
+
+ i32sample2 = *pPCMdata2; /**< Get clip2 sample */
+ i32sample2 *= iPos2; /**< multiply by fade numerator */
+ i32sample2 /= iDur; /**< divide by fade denominator */
+
+ *pPCMdata1++ = (M4OSA_Int16)(i32sample1
+ + i32sample2); /**< mix the two samples */
+ pPCMdata2++; /**< don't forget to increment the second buffer */
+ }
+ break;
+
+ case M4VSS3GPP_kAudioTransitionType_None:
+ /**
+ * This is a stupid-non optimized version of the None transition...
+ * We copy the PCM frames */
+ if( iPos1 < (iDur >> 1) ) /**< second half of transition */
+ {
+ /**
+ * Copy the input PCM to the output buffer */
+ memcpy((void *)pPCMdata1,
+ (void *)pPCMdata2, uiPCMsize);
+ }
+ /**
+ * the output must be put in the first buffer.
+ * For the first half of the non-transition it's already the case!
+ * So we have nothing to do here...
+ */
+
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intAudioTransition: unknown transition type (0x%x),\
+ returning M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE",
+ pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType);
+ return M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAudioTransition: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
+ * @brief Reset the audio encoder (Create it if needed)
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+ M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+ M4OSA_UInt32 uiAudioBitrate )
+{
+ M4OSA_ERR err;
+
+ /**
+ * If an encoder already exist, we destroy it */
+ if( M4OSA_NULL != pC_ewc->pAudioEncCtxt )
+ {
+ err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctClose(
+ pC_ewc->pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctCleanUp(
+ pC_ewc->pAudioEncCtxt);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intResetAudioEncoder:\
+ pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x", err);
+ /**< don't return, we still have stuff to free */
+ }
+
+ pC_ewc->pAudioEncCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Creates a new encoder */
+ switch( pC_ewc->AudioStreamType )
+ {
+ //EVRC
+ // case M4SYS_kEVRC:
+ //
+ // err = M4VSS3GPP_setCurrentAudioEncoder(&pC->ShellAPI,
+ // pC_ewc->AudioStreamType);
+ // M4ERR_CHECK_RETURN(err);
+ //
+ // pC_ewc->AudioEncParams.Format = M4ENCODER_kEVRC;
+ // pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ // pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+ // pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+ // break;
+
+ case M4SYS_kAMR:
+
+ err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+ pC_ewc->AudioStreamType);
+ M4ERR_CHECK_RETURN(err);
+
+ pC_ewc->AudioEncParams.Format = M4ENCODER_kAMRNB;
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+ pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+ pC_ewc->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
+ break;
+
+ case M4SYS_kAAC:
+
+ err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+ pC_ewc->AudioStreamType);
+ M4ERR_CHECK_RETURN(err);
+
+ pC_ewc->AudioEncParams.Format = M4ENCODER_kAAC;
+
+ switch( pC_ewc->uiSamplingFrequency )
+ {
+ case 8000:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+ break;
+
+ case 16000:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+ break;
+
+ case 22050:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+ break;
+
+ case 24000:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+ break;
+
+ case 32000:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+ break;
+
+ case 44100:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+ break;
+
+ case 48000:
+ pC_ewc->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateAudioEncoder: invalid input AAC sampling frequency\
+ (%d Hz), returning M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED",
+ pC_ewc->uiSamplingFrequency);
+ return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+ }
+ pC_ewc->AudioEncParams.ChannelNum = (pC_ewc->uiNbChannels == 1)
+ ? M4ENCODER_kMono : M4ENCODER_kStereo;
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.Regulation =
+ M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+ /* unused */
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
+ /* TODO change into highspeed asap */
+ pC_ewc->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+ M4OSA_FALSE;
+
+ /* Quantify value (ceil one) */
+ if( uiAudioBitrate <= 16000 )
+ pC_ewc->AudioEncParams.Bitrate = 16000;
+
+ else if( uiAudioBitrate <= 24000 )
+ pC_ewc->AudioEncParams.Bitrate = 24000;
+
+ else if( uiAudioBitrate <= 32000 )
+ pC_ewc->AudioEncParams.Bitrate = 32000;
+
+ else if( uiAudioBitrate <= 48000 )
+ pC_ewc->AudioEncParams.Bitrate = 48000;
+
+ else if( uiAudioBitrate <= 64000 )
+ pC_ewc->AudioEncParams.Bitrate = 64000;
+
+ else
+ pC_ewc->AudioEncParams.Bitrate = 96000;
+
+ /* Special requirement of our encoder */
+ if( ( pC_ewc->uiNbChannels == 2)
+ && (pC_ewc->AudioEncParams.Bitrate < 32000) )
+ pC_ewc->AudioEncParams.Bitrate = 32000;
+
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intResetAudioEncoder: Undefined output audio format (%d),\
+ returning M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT",
+ pC_ewc->AudioStreamType);
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+ }
+
+ /* Initialise the audio encoder */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ M4OSA_TRACE3_1(
+ "M4VSS3GPP_intResetAudioEncoder:\
+ pAudioEncoderGlobalFcts->pFctInit called with userdata 0x%x",
+ pC_ShellAPI->pCurrentAudioEncoderUserData);
+ err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+ pC_ShellAPI->pCurrentAudioEncoderUserData);
+
+#else
+
+ err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+ M4OSA_NULL /* no HW encoder */);
+
+#endif
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
+ err);
+ return err;
+ }
+
+ /* Open the audio encoder */
+ err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctOpen(pC_ewc->pAudioEncCtxt,
+ &pC_ewc->AudioEncParams, &pC_ewc->pAudioEncDSI,
+ M4OSA_NULL /* no grabbing */);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intResetAudioEncoder: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
new file mode 100755
index 0000000..59d57e5
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
@@ -0,0 +1,3922 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VSS3GPP_EditVideo.c
+ * @brief Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h" /**< OSAL debug management */
+
+/**
+ * component includes */
+#include "M4VFL_transition.h" /**< video effects */
+
+/*for transition behaviour*/
+#include <math.h>
+#include "M4AIR_API.h"
+#include "M4VSS3GPP_Extended_API.h"
+/** Determine absolute value of a. */
+#define M4xVSS_ABS(a) ( ( (a) < (0) ) ? (-(a)) : (a) )
+#define Y_PLANE_BORDER_VALUE 0x00
+#define U_PLANE_BORDER_VALUE 0x80
+#define V_PLANE_BORDER_VALUE 0x80
+
+/************************************************************************/
+/* Static local functions */
+/************************************************************************/
+
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+ M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR M4VSS3GPP_intApplyVideoEffect(
+ M4VSS3GPP_InternalEditContext *pC, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut, M4OSA_Bool bSkipFramingEffect);
+
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+ M4VIFI_ImagePlane *pPlaneOut );
+
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+ M4SYS_AccessUnit *pAU );
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt8 uiCts );
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt32 uiCtsSec );
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt32 *pCtsSec );
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+ M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight );
+static M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(
+ M4OSA_Void* pFileIn, M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height);
+static M4OSA_ERR M4VSS3GPP_intApplyRenderingMode(
+ M4VSS3GPP_InternalEditContext *pC,
+ M4xVSS_MediaRendering renderingMode,
+ M4VIFI_ImagePlane* pInplane,
+ M4VIFI_ImagePlane* pOutplane);
+
+static M4OSA_ERR M4VSS3GPP_intSetYuv420PlaneFromARGB888 (
+ M4VSS3GPP_InternalEditContext *pC,
+ M4VSS3GPP_ClipContext* pClipCtxt);
+static M4OSA_ERR M4VSS3GPP_intRenderFrameWithEffect(
+ M4VSS3GPP_InternalEditContext *pC,
+ M4VSS3GPP_ClipContext* pClipCtxt,
+ M4_MediaTime ts,
+ M4OSA_Bool bIsClip1,
+ M4VIFI_ImagePlane *pResizePlane,
+ M4VIFI_ImagePlane *pPlaneNoResize,
+ M4VIFI_ImagePlane *pPlaneOut);
+
+static M4OSA_ERR M4VSS3GPP_intRotateVideo(M4VIFI_ImagePlane* pPlaneIn,
+ M4OSA_UInt32 rotationDegree);
+
+static M4OSA_ERR M4VSS3GPP_intSetYUV420Plane(M4VIFI_ImagePlane* planeIn,
+ M4OSA_UInt32 width, M4OSA_UInt32 height);
+
+static M4OSA_ERR M4VSS3GPP_intApplyVideoOverlay (
+ M4VSS3GPP_InternalEditContext *pC,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
+ * @brief One step of video processing
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ M4OSA_Int32 iCts, iNextCts;
+ M4ENCODER_FrameMode FrameMode;
+ M4OSA_Bool bSkipFrame;
+ M4OSA_UInt16 offset;
+
+ /**
+ * Check if we reached end cut. Decorrelate input and output encoding
+ * timestamp to handle encoder prefetch
+ */
+ if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset
+ + pC->iInOutTimeOffset) >= pC->pC1->iEndTime )
+ {
+ /* Re-adjust video to precise cut time */
+ pC->iInOutTimeOffset = ((M4OSA_Int32)(pC->ewc.dInputVidCts))
+ - pC->pC1->iVoffset + pC->iInOutTimeOffset - pC->pC1->iEndTime;
+ if ( pC->iInOutTimeOffset < 0 ) {
+ pC->iInOutTimeOffset = 0;
+ }
+
+ /**
+ * Video is done for this clip */
+ err = M4VSS3GPP_intReachedEndOfVideo(pC);
+
+ /* RC: to know when a file has been processed */
+ if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP)
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x",
+ err);
+ }
+
+ return err;
+ }
+
+ /* Don't change the states if we are in decodeUpTo() */
+ if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+ && (( pC->pC2 == M4OSA_NULL)
+ || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) )
+ {
+ /**
+ * Check Video Mode, depending on the current output CTS */
+ err = M4VSS3GPP_intCheckVideoMode(
+ pC); /**< This function change the pC->Vstate variable! */
+
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+
+ switch( pC->Vstate )
+ {
+ /* _________________ */
+ /*| |*/
+ /*| READ_WRITE MODE |*/
+ /*|_________________|*/
+
+ case M4VSS3GPP_kEditVideoState_READ_WRITE:
+ case M4VSS3GPP_kEditVideoState_AFTER_CUT:
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE");
+
+ bSkipFrame = M4OSA_FALSE;
+
+ /**
+ * If we were decoding the clip, we must jump to be sure
+ * to get to the good position. */
+ if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus )
+ {
+ /**
+ * Jump to target video time (tc = to-T) */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset;
+ err = pC->pC1->ShellAPI.m_pReader->m_pFctJump(
+ pC->pC1->pReaderContext,
+ (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo:\
+ READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pC->pC1->pReaderContext,
+ (M4_StreamHandler *)pC->pC1->pVideoStream,
+ &pC->pC1->VideoAU);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo:\
+ READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE2_3("A .... read : cts = %.0f + %ld [ 0x%x ]",
+ pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+ pC->pC1->VideoAU.m_size);
+
+ /* This frame has been already written in BEGIN CUT step -> skip it */
+ if( pC->pC1->VideoAU.m_CTS == iCts
+ && pC->pC1->iVideoRenderCts >= iCts )
+ {
+ bSkipFrame = M4OSA_TRUE;
+ }
+ }
+
+ /* This frame has been already written in BEGIN CUT step -> skip it */
+ if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT)
+ && (pC->pC1->VideoAU.m_CTS
+ + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) )
+ {
+ bSkipFrame = M4OSA_TRUE;
+ }
+
+ /**
+ * Remember the clip reading state */
+ pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ // Rounding is to compensate reader imprecision (m_CTS is actually an integer)
+ iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1;
+ iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1;
+ /* Avoid to write a last frame of duration 0 */
+ if( iNextCts > pC->pC1->iEndTime )
+ iNextCts = pC->pC1->iEndTime;
+
+ /**
+ * If the AU is good to be written, write it, else just skip it */
+ if( ( M4OSA_FALSE == bSkipFrame)
+ && (( pC->pC1->VideoAU.m_CTS >= iCts)
+ && (pC->pC1->VideoAU.m_CTS < iNextCts)
+ && (pC->pC1->VideoAU.m_size > 0)) )
+ {
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+ pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+ &pC->ewc.WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+ pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Copy the input AU to the output AU */
+ pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS +
+ (M4OSA_Time)pC->pC1->iVoffset;
+ pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+ offset = 0;
+ /* for h.264 stream do not read the 1st 4 bytes as they are header
+ indicators */
+ if( pC->pC1->pVideoStream->m_basicProperties.m_streamType
+ == M4DA_StreamTypeVideoMpeg4Avc )
+ offset = 4;
+
+ pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset;
+ if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize )
+ {
+ M4OSA_TRACE1_2(
+ "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\
+ MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+ pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize);
+ return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+ }
+
+ memcpy((void *)pC->ewc.WriterVideoAU.dataAddress,
+ (void *)(pC->pC1->VideoAU.m_dataAddress + offset),
+ (pC->ewc.WriterVideoAU.size));
+
+ /**
+ * Update time info for the Counter Time System to be equal to the bit
+ -stream time*/
+ M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU);
+ M4OSA_TRACE2_2("B ---- write : cts = %lu [ 0x%x ]",
+ pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+ /**
+ * Write the AU */
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+ pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+ &pC->ewc.WriterVideoAU);
+
+ if( M4NO_ERROR != err )
+ {
+ /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+ file size is reached
+ The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+ is returned*/
+ if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+ pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Read next AU for next step */
+ err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pC->pC1->pReaderContext,
+ (M4_StreamHandler *)pC->pC1->pVideoStream,
+ &pC->pC1->VideoAU);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+ m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE2_3("C .... read : cts = %.0f + %ld [ 0x%x ]",
+ pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+ pC->pC1->VideoAU.m_size);
+ }
+ else
+ {
+ /**
+ * Decide wether to read or to increment time increment */
+ if( ( pC->pC1->VideoAU.m_size == 0)
+ || (pC->pC1->VideoAU.m_CTS >= iNextCts) )
+ {
+ /*Increment time by the encoding period (NO_MORE_AU or reader in advance */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+
+ /* Switch (from AFTER_CUT) to normal mode because time is
+ no more frozen */
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ }
+ else
+ {
+ /* In other cases (reader late), just let the reader catch up
+ pC->ewc.dVTo */
+ err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+ pC->pC1->pReaderContext,
+ (M4_StreamHandler *)pC->pC1->pVideoStream,
+ &pC->pC1->VideoAU);
+
+ if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+ m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+ err);
+ return err;
+ }
+
+ M4OSA_TRACE2_3("D .... read : cts = %.0f + %ld [ 0x%x ]",
+ pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+ pC->pC1->VideoAU.m_size);
+ }
+ }
+ }
+ break;
+
+ /* ____________________ */
+ /*| |*/
+ /*| DECODE_ENCODE MODE |*/
+ /*| BEGIN_CUT MODE |*/
+ /*|____________________|*/
+
+ case M4VSS3GPP_kEditVideoState_DECODE_ENCODE:
+ case M4VSS3GPP_kEditVideoState_BEGIN_CUT:
+ {
+ M4OSA_TRACE3_0(
+ "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT");
+
+ if ((pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (M4OSA_FALSE ==
+ pC->pC1->pSettings->ClipProperties.bSetImageData)) {
+
+ err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC1);
+ if( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+ M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
+ return err;
+ }
+ }
+ /**
+ * Decode the video up to the target time
+ (will jump to the previous RAP if needed ) */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+ M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x",
+ err);
+ return err;
+ }
+
+ /* If the decoding is not completed, do one more step with time frozen */
+ if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+ {
+ return M4NO_ERROR;
+ }
+
+ /**
+ * Reset the video pre-processing error before calling the encoder */
+ pC->ewc.VppError = M4NO_ERROR;
+
+ M4OSA_TRACE2_0("E ++++ encode AU");
+
+ /**
+ * Encode the frame(rendering,filtering and writing will be done
+ in encoder callbacks)*/
+ if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT )
+ FrameMode = M4ENCODER_kIFrame;
+ else
+ FrameMode = M4ENCODER_kNormalFrame;
+
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+ pC->ewc.dInputVidCts, FrameMode);
+ /**
+ * Check if we had a VPP error... */
+ if( M4NO_ERROR != pC->ewc.VppError )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+ pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+ pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+ {
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+ return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+ }
+ else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+ {
+ if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+ returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+ return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+ }
+ /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+ file size is reached
+ The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+ is returned*/
+ else if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+ pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Increment time by the encoding period (for begin cut, do not increment to not
+ loose P-frames) */
+ if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate )
+ {
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+ }
+ }
+ break;
+
+ /* _________________ */
+ /*| |*/
+ /*| TRANSITION MODE |*/
+ /*|_________________|*/
+
+ case M4VSS3GPP_kEditVideoState_TRANSITION:
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION");
+
+ /* Don't decode more than needed */
+ if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+ && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) )
+ {
+ /**
+ * Decode the clip1 video up to the target time
+ (will jump to the previous RAP if needed */
+ if ((pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (M4OSA_FALSE ==
+ pC->pC1->pSettings->ClipProperties.bSetImageData)) {
+
+ err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC1);
+ if( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
+ return err;
+ }
+ }
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1,
+ (M4OSA_Int32)pC->ewc.dInputVidCts);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x",
+ err);
+ return err;
+ }
+
+ /* If the decoding is not completed, do one more step with time frozen */
+ if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+ {
+ return M4NO_ERROR;
+ }
+ }
+
+ /* Don't decode more than needed */
+ if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)
+ && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) )
+ {
+ /**
+ * Decode the clip2 video up to the target time
+ (will jump to the previous RAP if needed) */
+ if ((pC->pC2->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (M4OSA_FALSE ==
+ pC->pC2->pSettings->ClipProperties.bSetImageData)) {
+
+ err = M4VSS3GPP_intSetYuv420PlaneFromARGB888(pC, pC->pC2);
+ if( M4NO_ERROR != err ) {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ M4VSS3GPP_intSetYuv420PlaneFromARGB888 err=%x", err);
+ return err;
+ }
+ }
+
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2,
+ (M4OSA_Int32)pC->ewc.dInputVidCts);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x",
+ err);
+ return err;
+ }
+
+ /* If the decoding is not completed, do one more step with time frozen */
+ if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus )
+ {
+ return M4NO_ERROR;
+ }
+ }
+
+ /**
+ * Reset the video pre-processing error before calling the encoder */
+ pC->ewc.VppError = M4NO_ERROR;
+
+ M4OSA_TRACE2_0("F **** blend AUs");
+
+ /**
+ * Encode the frame (rendering, filtering and writing will be done
+ in encoder callbacks */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+ pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame);
+
+ /**
+ * If encode returns a process frame error, it is likely to be a VPP error */
+ if( M4NO_ERROR != pC->ewc.VppError )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+ pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+ {
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+ return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+ }
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+ }
+ else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+ {
+ if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+ return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+ }
+
+ /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+ file size is reached
+ The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is
+ returned*/
+ else if( M4WAR_WRITER_STOP_REQ == err )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+ return M4VSS3GPP_WAR_EDITING_DONE;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+ pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Increment time by the encoding period */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+ }
+ break;
+
+ /* ____________ */
+ /*| |*/
+ /*| ERROR CASE |*/
+ /*|____________|*/
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\
+ returning M4VSS3GPP_ERR_INTERNAL_STATE",
+ pC->Vstate);
+ return M4VSS3GPP_ERR_INTERNAL_STATE;
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckVideoMode()
+ * @brief Check which video process mode we must use, depending on the output CTS.
+ * @param pC (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+ M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ const M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+ /**< Transition duration */
+ const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+ M4OSA_Int32 iTmp;
+
+ const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate;
+
+ /**
+ * Check if Clip1 is on its begin cut, or in an effect zone */
+ M4VSS3GPP_intCheckVideoEffects(pC, 1);
+
+ /**
+ * Check if we are in the transition with next clip */
+ if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) )
+ {
+ /**
+ * We are in a transition */
+ pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION;
+ pC->bTransitionEffect = M4OSA_TRUE;
+
+ /**
+ * Open second clip for transition, if not yet opened */
+ if( M4OSA_NULL == pC->pC2 )
+ {
+ pC->pC1->bGetYuvDataFromDecoder = M4OSA_TRUE;
+
+ err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+ &pC->pClipList[pC->uiCurrentClip + 1]);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Add current video output CTS to the clip offset
+ * (audio output CTS is not yet at the transition, so audio
+ * offset can't be updated yet). */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+
+ /**
+ * 2005-03-24: BugFix for audio-video synchro:
+ * Update transition duration due to the actual video transition beginning time.
+ * It will avoid desynchronization when doing the audio transition. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\
+ - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset);
+ if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration)
+ /**< Test in case of a very short transition */
+ {
+ pC->pTransitionList[pC->
+ uiCurrentClip].uiTransitionDuration -= iTmp;
+
+ /**
+ * Don't forget to also correct the total duration used for the progress bar
+ * (it was computed with the original transition duration). */
+ pC->ewc.iOutputDuration += iTmp;
+ }
+ /**< No "else" here because it's hard predict the effect of 0 duration transition...*/
+ }
+
+ /**
+ * Check effects for clip2 */
+ M4VSS3GPP_intCheckVideoEffects(pC, 2);
+ }
+ else
+ {
+ /**
+ * We are not in a transition */
+ pC->bTransitionEffect = M4OSA_FALSE;
+
+ /* If there is an effect we go to decode/encode mode */
+ if((pC->nbActiveEffects > 0) || (pC->nbActiveEffects1 > 0) ||
+ (pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) ||
+ (pC->pC1->pSettings->bTranscodingRequired == M4OSA_TRUE)) {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+ }
+ /* We do a begin cut, except if already done (time is not progressing because we want
+ to catch all P-frames after the cut) */
+ else if( M4OSA_TRUE == pC->bClip1AtBeginCut )
+ {
+ if(pC->pC1->pSettings->ClipProperties.VideoStreamType == M4VIDEOEDITING_kH264) {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+ pC->bEncodeTillEoF = M4OSA_TRUE;
+ } else if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) ) {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+ } else {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+ }
+ }
+ /* Else we are in default copy/paste mode */
+ else
+ {
+ if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
+ {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+ }
+ else if( pC->bIsMMS == M4OSA_TRUE )
+ {
+ M4OSA_UInt32 currentBitrate;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Do we need to reencode the video to downgrade the bitrate or not ? */
+ /* Let's compute the cirrent bitrate of the current edited clip */
+ err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+ pC->pC1->pReaderContext,
+ M4READER_kOptionID_Bitrate, &currentBitrate);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckVideoMode:\
+ Error when getting next bitrate of edited clip: 0x%x",
+ err);
+ return err;
+ }
+
+ /* Remove audio bitrate */
+ currentBitrate -= 12200;
+
+ /* Test if we go into copy/paste mode or into decode/encode mode */
+ if( currentBitrate > pC->uiMMSVideoBitrate )
+ {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+ }
+ else
+ {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ }
+ }
+ else if(!((pC->m_bClipExternalHasStarted == M4OSA_TRUE) &&
+ (pC->Vstate == M4VSS3GPP_kEditVideoState_DECODE_ENCODE)) &&
+ pC->bEncodeTillEoF == M4OSA_FALSE)
+ {
+ /**
+ * Test if we go into copy/paste mode or into decode/encode mode
+ * If an external effect has been applied on the current clip
+ * then continue to be in decode/encode mode till end of
+ * clip to avoid H.264 distortion.
+ */
+ pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+ }
+ }
+ }
+
+ /**
+ * Check if we create an encoder */
+ if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+ == previousVstate)) /**< read mode */
+ && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate)
+ || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate)
+ || (M4VSS3GPP_kEditVideoState_TRANSITION
+ == pC->Vstate)) /**< encode mode */
+ && pC->bIsMMS == M4OSA_FALSE )
+ {
+ /**
+ * Create the encoder, if not created already*/
+ if (pC->ewc.encoderState == M4VSS3GPP_kNoEncoder) {
+ err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder \
+ returns 0x%x!", err);
+ return err;
+ }
+ }
+ }
+ else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL )
+ {
+ /**
+ * Create the encoder */
+ err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * When we go from filtering to read/write, we must act like a begin cut,
+ * because the last filtered image may be different than the original image. */
+ else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_TRANSITION
+ == previousVstate)) /**< encode mode */
+ && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */
+ && (pC->bEncodeTillEoF == M4OSA_FALSE) )
+ {
+ pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+ }
+
+ /**
+ * Check if we destroy an encoder */
+ else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+ || (M4VSS3GPP_kEditVideoState_TRANSITION
+ == previousVstate)) /**< encode mode */
+ && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate)
+ || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+ == pC->Vstate)) /**< read mode */
+ && pC->bIsMMS == M4OSA_FALSE )
+ {
+ /**
+ * Destroy the previously created encoder */
+ err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intStartAU()
+ * @brief StartAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param pContext: (IN) It is the VSS 3GPP context in our case
+ * @param streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param pAU: (IN/OUT) Access Unit to be prepared.
+ * @return M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext,
+ M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 uiMaxAuSize;
+
+ /**
+ * Given context is actually the VSS3GPP context */
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+
+ /**
+ * Get the output AU to write into */
+ err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intProcessAU()
+ * @brief ProcessAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param pContext: (IN) It is the VSS 3GPP context in our case
+ * @param streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param pAU: (IN/OUT) Access Unit to be written
+ * @return M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext,
+ M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+ M4OSA_ERR err;
+
+ /**
+ * Given context is actually the VSS3GPP context */
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+
+ /**
+ * Fix the encoded AU time */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ pC->ewc.dOutputVidCts = pAU->CTS;
+ /**
+ * Update time info for the Counter Time System to be equal to the bit-stream time */
+ M4VSS3GPP_intUpdateTimeInfo(pC, pAU);
+
+ /**
+ * Write the AU */
+ err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+ M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+ err);
+ return err;
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVPP()
+ * @brief We implement our own VideoPreProcessing function
+ * @note It is called by the video encoder
+ * @param pContext (IN) VPP context, which actually is the VSS 3GPP context in our case
+ * @param pPlaneIn (IN)
+ * @param pPlaneOut (IN/OUT) Pointer to an array of 3 planes that will contain the output
+ * YUV420 image
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4_MediaTime ts;
+ M4VIFI_ImagePlane *pTmp = M4OSA_NULL;
+ M4VIFI_ImagePlane *pLastDecodedFrame = M4OSA_NULL ;
+ M4VIFI_ImagePlane *pDecoderRenderFrame = M4OSA_NULL;
+ M4VIFI_ImagePlane pTemp1[3],pTemp2[3];
+ M4VIFI_ImagePlane pTempPlaneClip1[3],pTempPlaneClip2[3];
+ M4OSA_UInt32 i = 0, yuvFrameWidth = 0, yuvFrameHeight = 0;
+ M4OSA_Bool bSkipFrameEffect = M4OSA_FALSE;
+ /**
+ * VPP context is actually the VSS3GPP context */
+ M4VSS3GPP_InternalEditContext *pC =
+ (M4VSS3GPP_InternalEditContext *)pContext;
+
+ memset((void *)pTemp1, 0, 3*sizeof(M4VIFI_ImagePlane));
+ memset((void *)pTemp2, 0, 3*sizeof(M4VIFI_ImagePlane));
+ memset((void *)pTempPlaneClip1, 0, 3*sizeof(M4VIFI_ImagePlane));
+ memset((void *)pTempPlaneClip2, 0, 3*sizeof(M4VIFI_ImagePlane));
+
+ /**
+ * Reset VPP error remembered in context */
+ pC->ewc.VppError = M4NO_ERROR;
+
+ /**
+ * At the end of the editing, we may be called when no more clip is loaded.
+ * (because to close the encoder properly it must be stepped one or twice...) */
+ if( M4OSA_NULL == pC->pC1 )
+ {
+ /**
+ * We must fill the input of the encoder with a dummy image, because
+ * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
+ memset((void *)pPlaneOut[0].pac_data,0,
+ pPlaneOut[0].u_stride * pPlaneOut[0].u_height);
+ memset((void *)pPlaneOut[1].pac_data,0,
+ pPlaneOut[1].u_stride * pPlaneOut[1].u_height);
+ memset((void *)pPlaneOut[2].pac_data,0,
+ pPlaneOut[2].u_stride * pPlaneOut[2].u_height);
+
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)");
+ return M4NO_ERROR;
+ }
+
+ /**
+ **************** Transition case ****************/
+ if( M4OSA_TRUE == pC->bTransitionEffect )
+ {
+
+ err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \
+ returning M4NO_ERROR", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+
+ err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \
+ returning M4NO_ERROR", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+
+ err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+ returning M4NO_ERROR",
+ err);
+ pC->ewc.VppError = err;
+ return
+ M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+
+ err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+ returning M4NO_ERROR",
+ err);
+ pC->ewc.VppError = err;
+ return
+ M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+
+ err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+ returning M4NO_ERROR",
+ err);
+ pC->ewc.VppError = err;
+ return
+ M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+
+ /**
+ * Compute the time in the clip1 base: ts = to - Offset */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ ts = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+
+ /**
+ * Render Clip1 */
+ if( pC->pC1->isRenderDup == M4OSA_FALSE )
+ {
+ err = M4VSS3GPP_intRenderFrameWithEffect(pC, pC->pC1, ts, M4OSA_TRUE,
+ pTempPlaneClip1, pTemp1,
+ pPlaneOut);
+ if ((M4NO_ERROR != err) &&
+ (M4WAR_VIDEORENDERER_NO_NEW_FRAME != err)) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intRenderFrameWithEffect returns 0x%x", err);
+ pC->ewc.VppError = err;
+ /** Return no error to the encoder core
+ * else it may leak in some situations.*/
+ return M4NO_ERROR;
+ }
+ }
+ if ((pC->pC1->isRenderDup == M4OSA_TRUE) ||
+ (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
+ pTmp = pC->yuv1;
+ if (pC->pC1->lastDecodedPlane != M4NO_ERROR) {
+ /* Copy last decoded plane to output plane */
+ memcpy((void *)pTmp[0].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[0].pac_data,
+ (pTmp[0].u_height * pTmp[0].u_width));
+ memcpy((void *)pTmp[1].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[1].pac_data,
+ (pTmp[1].u_height * pTmp[1].u_width));
+ memcpy((void *)pTmp[2].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[2].pac_data,
+ (pTmp[2].u_height * pTmp[2].u_width));
+ }
+ pC->pC1->lastDecodedPlane = pTmp;
+ }
+
+ /**
+ * Compute the time in the clip2 base: ts = to - Offset */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ ts = pC->ewc.dInputVidCts - pC->pC2->iVoffset;
+ /**
+ * Render Clip2 */
+ if( pC->pC2->isRenderDup == M4OSA_FALSE )
+ {
+
+ err = M4VSS3GPP_intRenderFrameWithEffect(pC, pC->pC2, ts, M4OSA_FALSE,
+ pTempPlaneClip2, pTemp2,
+ pPlaneOut);
+ if ((M4NO_ERROR != err) &&
+ (M4WAR_VIDEORENDERER_NO_NEW_FRAME != err)) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intRenderFrameWithEffect returns 0x%x", err);
+ pC->ewc.VppError = err;
+ /** Return no error to the encoder core
+ * else it may leak in some situations.*/
+ return M4NO_ERROR;
+ }
+ }
+ if ((pC->pC2->isRenderDup == M4OSA_TRUE) ||
+ (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
+ pTmp = pC->yuv2;
+ if (pC->pC2->lastDecodedPlane != M4NO_ERROR) {
+ /* Copy last decoded plane to output plane */
+ memcpy((void *)pTmp[0].pac_data,
+ (void *)pC->pC2->lastDecodedPlane[0].pac_data,
+ (pTmp[0].u_height * pTmp[0].u_width));
+ memcpy((void *)pTmp[1].pac_data,
+ (void *)pC->pC2->lastDecodedPlane[1].pac_data,
+ (pTmp[1].u_height * pTmp[1].u_width));
+ memcpy((void *)pTmp[2].pac_data,
+ (void *)pC->pC2->lastDecodedPlane[2].pac_data,
+ (pTmp[2].u_height * pTmp[2].u_width));
+ }
+ pC->pC2->lastDecodedPlane = pTmp;
+ }
+
+
+ pTmp = pPlaneOut;
+ err = M4VSS3GPP_intVideoTransition(pC, pTmp);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\
+ returning M4NO_ERROR",
+ err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+ for (i=0; i < 3; i++)
+ {
+ if(pTempPlaneClip2[i].pac_data != M4OSA_NULL) {
+ free(pTempPlaneClip2[i].pac_data);
+ pTempPlaneClip2[i].pac_data = M4OSA_NULL;
+ }
+
+ if(pTempPlaneClip1[i].pac_data != M4OSA_NULL) {
+ free(pTempPlaneClip1[i].pac_data);
+ pTempPlaneClip1[i].pac_data = M4OSA_NULL;
+ }
+
+ if (pTemp2[i].pac_data != M4OSA_NULL) {
+ free(pTemp2[i].pac_data);
+ pTemp2[i].pac_data = M4OSA_NULL;
+ }
+
+ if (pTemp1[i].pac_data != M4OSA_NULL) {
+ free(pTemp1[i].pac_data);
+ pTemp1[i].pac_data = M4OSA_NULL;
+ }
+ }
+ }
+ /**
+ **************** No Transition case ****************/
+ else
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: NO transition case");
+ /**
+ * Compute the time in the clip base: ts = to - Offset */
+ ts = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+ pC->bIssecondClip = M4OSA_FALSE;
+ /**
+ * Render */
+ if (pC->pC1->isRenderDup == M4OSA_FALSE) {
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: renderdup false");
+ /**
+ * Check if resizing is needed */
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
+ if ((pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (pC->nbActiveEffects == 0) &&
+ (pC->pC1->bGetYuvDataFromDecoder == M4OSA_FALSE)) {
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pC->pC1->pViDecCtxt,
+ M4DECODER_kOptionID_EnableYuvWithEffect,
+ (M4OSA_DataOption)M4OSA_TRUE);
+ if (M4NO_ERROR == err ) {
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pC->pC1->pViDecCtxt, &ts,
+ pPlaneOut, M4OSA_TRUE);
+ }
+ } else {
+ if (pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) {
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pC->pC1->pViDecCtxt,
+ M4DECODER_kOptionID_EnableYuvWithEffect,
+ (M4OSA_DataOption)M4OSA_FALSE);
+ }
+ if (M4NO_ERROR == err) {
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pC->pC1->pViDecCtxt, &ts,
+ pC->pC1->m_pPreResizeFrame, M4OSA_TRUE);
+ }
+ }
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ m_pFctRender() returns error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ if (pC->pC1->pSettings->FileType !=
+ M4VIDEOEDITING_kFileType_ARGB8888) {
+ if (0 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees) {
+ // Save width and height of un-rotated frame
+ yuvFrameWidth = pC->pC1->m_pPreResizeFrame[0].u_width;
+ yuvFrameHeight = pC->pC1->m_pPreResizeFrame[0].u_height;
+ err = M4VSS3GPP_intRotateVideo(pC->pC1->m_pPreResizeFrame,
+ pC->pC1->pSettings->ClipProperties.videoRotationDegrees);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ rotateVideo() returns error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+ }
+
+ if (pC->nbActiveEffects > 0) {
+ pC->pC1->bGetYuvDataFromDecoder = M4OSA_TRUE;
+ /**
+ * If we do modify the image, we need an intermediate
+ * image plane */
+ err = M4VSS3GPP_intAllocateYUV420(pTemp1,
+ pC->pC1->m_pPreResizeFrame[0].u_width,
+ pC->pC1->m_pPreResizeFrame[0].u_height);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ /* If video frame need to be resized, then apply the overlay after
+ * the frame was rendered with rendering mode.
+ * Here skip the framing(overlay) effect when applying video Effect. */
+ bSkipFrameEffect = M4OSA_TRUE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC,
+ pC->pC1->m_pPreResizeFrame, pTemp1, bSkipFrameEffect);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intApplyVideoEffect() error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pDecoderRenderFrame= pTemp1;
+
+ } else {
+ pDecoderRenderFrame = pC->pC1->m_pPreResizeFrame;
+ }
+ /* Prepare overlay temporary buffer if overlay exist */
+ if (pC->bClip1ActiveFramingEffect) {
+ err = M4VSS3GPP_intAllocateYUV420(pTemp2,
+ pPlaneOut[0].u_width, pPlaneOut[0].u_height);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 \
+ returns 0x%x, returning M4NO_ERROR", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pTmp = pTemp2;
+ } else {
+ pTmp = pPlaneOut;
+ }
+
+ /* Do rendering mode. */
+ if ((pC->pC1->bGetYuvDataFromDecoder == M4OSA_TRUE) ||
+ (pC->pC1->pSettings->FileType !=
+ M4VIDEOEDITING_kFileType_ARGB8888)) {
+
+ err = M4VSS3GPP_intApplyRenderingMode(pC,
+ pC->pC1->pSettings->xVSS.MediaRendering,
+ pDecoderRenderFrame, pTmp);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intApplyRenderingMode) error 0x%x ", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+
+ /* Apply overlay if overlay is exist */
+ if (pC->bClip1ActiveFramingEffect) {
+ pDecoderRenderFrame = pTmp;
+ pTmp = pPlaneOut;
+ err = M4VSS3GPP_intApplyVideoOverlay(pC,
+ pDecoderRenderFrame, pTmp);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intApplyVideoOverlay) error 0x%x ", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+
+ if ((pC->pC1->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (pC->nbActiveEffects == 0) &&
+ (pC->pC1->bGetYuvDataFromDecoder == M4OSA_TRUE)) {
+
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pC->pC1->pViDecCtxt,
+ M4DECODER_kOptionID_YuvWithEffectNonContiguous,
+ (M4OSA_DataOption)pTmp);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pC->pC1->bGetYuvDataFromDecoder = M4OSA_FALSE;
+ }
+
+ // Reset original width and height for resize frame plane
+ if (0 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees &&
+ 180 != pC->pC1->pSettings->ClipProperties.videoRotationDegrees) {
+
+ M4VSS3GPP_intSetYUV420Plane(pC->pC1->m_pPreResizeFrame,
+ yuvFrameWidth, yuvFrameHeight);
+ }
+ }
+ else
+ {
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: NO resize required");
+ if (pC->nbActiveEffects > 0) {
+ /** If we do modify the image, we need an
+ * intermediate image plane */
+ err = M4VSS3GPP_intAllocateYUV420(pTemp1,
+ pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pDecoderRenderFrame = pTemp1;
+ }
+ else {
+ pDecoderRenderFrame = pPlaneOut;
+ }
+
+ pTmp = pPlaneOut;
+ err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pC->pC1->pViDecCtxt, &ts,
+ pDecoderRenderFrame, M4OSA_TRUE);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+
+ if (pC->nbActiveEffects > 0) {
+ /* Here we do not skip the overlay effect since
+ * overlay and video frame are both of same resolution */
+ bSkipFrameEffect = M4OSA_FALSE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC,
+ pDecoderRenderFrame,pPlaneOut,bSkipFrameEffect);
+ }
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+ pC->pC1->lastDecodedPlane = pTmp;
+ pC->pC1->iVideoRenderCts = (M4OSA_Int32)ts;
+
+ } else {
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: renderdup true");
+
+ if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
+ /**
+ * Copy last decoded plane to output plane */
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[0].pac_data,
+ (pC->pC1->m_pPreResizeFrame[0].u_height * pC->pC1->m_pPreResizeFrame[0].u_width));
+
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[1].pac_data,
+ (pC->pC1->m_pPreResizeFrame[1].u_height * pC->pC1->m_pPreResizeFrame[1].u_width));
+
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[2].pac_data,
+ (pC->pC1->m_pPreResizeFrame[2].u_height * pC->pC1->m_pPreResizeFrame[2].u_width));
+
+ if(pC->nbActiveEffects > 0) {
+ /**
+ * If we do modify the image, we need an
+ * intermediate image plane */
+ err = M4VSS3GPP_intAllocateYUV420(pTemp1,
+ pC->pC1->m_pPreResizeFrame[0].u_width,
+ pC->pC1->m_pPreResizeFrame[0].u_height);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ /* If video frame need to be resized, then apply the overlay after
+ * the frame was rendered with rendering mode.
+ * Here skip the framing(overlay) effect when applying video Effect. */
+ bSkipFrameEffect = M4OSA_TRUE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC,
+ pC->pC1->m_pPreResizeFrame,pTemp1, bSkipFrameEffect);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pDecoderRenderFrame= pTemp1;
+ } else {
+ pDecoderRenderFrame = pC->pC1->m_pPreResizeFrame;
+ }
+ /* Prepare overlay temporary buffer if overlay exist */
+ if (pC->bClip1ActiveFramingEffect) {
+ err = M4VSS3GPP_intAllocateYUV420(
+ pTemp2, pC->ewc.uiVideoWidth, pC->ewc.uiVideoHeight);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 \
+ returns 0x%x, returning M4NO_ERROR", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pTmp = pTemp2;
+ } else {
+ pTmp = pPlaneOut;
+ }
+ /* Do rendering mode */
+ err = M4VSS3GPP_intApplyRenderingMode(pC,
+ pC->pC1->pSettings->xVSS.MediaRendering,
+ pDecoderRenderFrame, pTmp);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ /* Apply overlay if overlay is exist */
+ pTmp = pPlaneOut;
+ if (pC->bClip1ActiveFramingEffect) {
+ err = M4VSS3GPP_intApplyVideoOverlay(pC,
+ pTemp2, pTmp);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intApplyRenderingMode) error 0x%x ", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+ } else {
+
+ err = M4VSS3GPP_intAllocateYUV420(pTemp1,
+ pC->ewc.uiVideoWidth,
+ pC->ewc.uiVideoHeight);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ /**
+ * Copy last decoded plane to output plane */
+ memcpy((void *)pLastDecodedFrame[0].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[0].pac_data,
+ (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width));
+
+ memcpy((void *)pLastDecodedFrame[1].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[1].pac_data,
+ (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width));
+
+ memcpy((void *)pLastDecodedFrame[2].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[2].pac_data,
+ (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width));
+
+ pTmp = pPlaneOut;
+ /**
+ * Check if there is a effect */
+ if(pC->nbActiveEffects > 0) {
+ /* Here we do not skip the overlay effect since
+ * overlay and video are both of same resolution */
+ bSkipFrameEffect = M4OSA_FALSE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC,
+ pLastDecodedFrame, pTmp,bSkipFrameEffect);
+ if (M4NO_ERROR != err) {
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ }
+ }
+ pC->pC1->lastDecodedPlane = pTmp;
+ }
+
+ M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", ts);
+
+ for (i=0; i<3; i++) {
+ if (pTemp1[i].pac_data != M4OSA_NULL) {
+ free(pTemp1[i].pac_data);
+ pTemp1[i].pac_data = M4OSA_NULL;
+ }
+ }
+ for (i=0; i<3; i++) {
+ if (pTemp2[i].pac_data != M4OSA_NULL) {
+ free(pTemp2[i].pac_data);
+ pTemp2[i].pac_data = M4OSA_NULL;
+ }
+ }
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyVideoOverlay()
+ * @brief Apply video overlay from pPlaneIn to pPlaneOut
+ * @param pC (IN/OUT) Internal edit context
+ * @param pInputPlanes (IN) Input raw YUV420 image
+ * @param pOutputPlanes (IN/OUT) Output raw YUV420 image
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoOverlay (M4VSS3GPP_InternalEditContext *pC,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut) {
+
+ M4VSS3GPP_ClipContext *pClip;
+ M4VSS3GPP_EffectSettings *pFx;
+ M4VSS3GPP_ExternalProgress extProgress;
+ M4OSA_Double VideoEffectTime;
+ M4OSA_Double PercentageDone;
+ M4OSA_UInt8 NumActiveEffects =0;
+ M4OSA_UInt32 Cts = 0;
+ M4OSA_Int32 nextEffectTime;
+ M4OSA_Int32 tmp;
+ M4OSA_UInt8 i;
+ M4OSA_ERR err;
+
+ pClip = pC->pC1;
+ if (pC->bIssecondClip == M4OSA_TRUE) {
+ NumActiveEffects = pC->nbActiveEffects1;
+ } else {
+ NumActiveEffects = pC->nbActiveEffects;
+ }
+ for (i=0; i<NumActiveEffects; i++) {
+ if (pC->bIssecondClip == M4OSA_TRUE) {
+ pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
+ /* Compute how far from the beginning of the effect we are, in clip-base time. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
+ pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration - pFx->uiStartTime;
+ } else {
+ pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
+ /* Compute how far from the beginning of the effect we are, in clip-base time. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
+ }
+ /* Do the framing(overlay) effect only,
+ * skip other color effect which had been applied */
+ if (pFx->xVSS.pFramingBuffer == M4OSA_NULL) {
+ continue;
+ }
+
+ /* To calculate %, substract timeIncrement because effect should finish
+ * on the last frame which is presented from CTS = eof-timeIncrement till CTS = eof */
+ PercentageDone = VideoEffectTime / ((M4OSA_Float)pFx->uiDuration);
+
+ if (PercentageDone < 0.0) {
+ PercentageDone = 0.0;
+ }
+ if (PercentageDone > 1.0) {
+ PercentageDone = 1.0;
+ }
+ /**
+ * Compute where we are in the effect (scale is 0->1000) */
+ tmp = (M4OSA_Int32)(PercentageDone * 1000);
+
+ /**
+ * Set the progress info provided to the external function */
+ extProgress.uiProgress = (M4OSA_UInt32)tmp;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+ extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
+ extProgress.bIsLast = M4OSA_FALSE;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
+ + pC->dOutputFrameDuration);
+ if (nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) {
+ extProgress.bIsLast = M4OSA_TRUE;
+ }
+ err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
+ pPlaneIn, pPlaneOut, &extProgress,
+ pFx->VideoEffectType - M4VSS3GPP_kVideoEffectType_External);
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoOverlay: \
+ External video effect function returns 0x%x!",
+ err);
+ return err;
+ }
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoOverlay: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect()
+ * @brief Apply video effect from pPlaneIn to pPlaneOut
+ * @param pC (IN/OUT) Internal edit context
+ * @param uiClip1orClip2 (IN/OUT) 1 for first clip, 2 for second clip
+ * @param pInputPlanes (IN) Input raw YUV420 image
+ * @param pOutputPlanes (IN/OUT) Output raw YUV420 image
+ * @param bSkipFramingEffect (IN) skip framing effect flag
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoEffect (M4VSS3GPP_InternalEditContext *pC,
+ M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut,
+ M4OSA_Bool bSkipFramingEffect) {
+
+ M4OSA_ERR err;
+
+ M4VSS3GPP_ClipContext *pClip;
+ M4VSS3GPP_EffectSettings *pFx;
+ M4VSS3GPP_ExternalProgress extProgress;
+
+ M4OSA_Double VideoEffectTime;
+ M4OSA_Double PercentageDone;
+ M4OSA_Int32 tmp;
+
+ M4VIFI_ImagePlane *pPlaneTempIn;
+ M4VIFI_ImagePlane *pPlaneTempOut;
+ M4VIFI_ImagePlane pTempYuvPlane[3];
+ M4OSA_UInt8 i;
+ M4OSA_UInt8 NumActiveEffects =0;
+
+
+ pClip = pC->pC1;
+ if (pC->bIssecondClip == M4OSA_TRUE)
+ {
+ NumActiveEffects = pC->nbActiveEffects1;
+ }
+ else
+ {
+ NumActiveEffects = pC->nbActiveEffects;
+ }
+
+ memset((void *)pTempYuvPlane, 0, 3*sizeof(M4VIFI_ImagePlane));
+
+ /**
+ * Allocate temporary plane if needed RC */
+ if (NumActiveEffects > 1) {
+ err = M4VSS3GPP_intAllocateYUV420(pTempYuvPlane, pPlaneOut->u_width,
+ pPlaneOut->u_height);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+ returning M4NO_ERROR",
+ err);
+ pC->ewc.VppError = err;
+ return
+ M4NO_ERROR; /**< Return no error to the encoder core
+ (else it may leak in some situations...) */
+ }
+ }
+
+ if (NumActiveEffects % 2 == 0)
+ {
+ pPlaneTempIn = pPlaneIn;
+ pPlaneTempOut = pTempYuvPlane;
+ }
+ else
+ {
+ pPlaneTempIn = pPlaneIn;
+ pPlaneTempOut = pPlaneOut;
+ }
+
+ for (i=0; i<NumActiveEffects; i++)
+ {
+ if (pC->bIssecondClip == M4OSA_TRUE)
+ {
+
+
+ pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
+ /* Compute how far from the beginning of the effect we are, in clip-base time. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
+ pC->pTransitionList[pC->uiCurrentClip].
+ uiTransitionDuration- pFx->uiStartTime;
+ }
+ else
+ {
+ pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
+ /* Compute how far from the beginning of the effect we are, in clip-base time. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
+ }
+
+
+
+ /* To calculate %, substract timeIncrement because effect should finish on the last frame*/
+ /* which is presented from CTS = eof-timeIncrement till CTS = eof */
+ PercentageDone = VideoEffectTime
+ / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/);
+
+ if( PercentageDone < 0.0 )
+ PercentageDone = 0.0;
+
+ if( PercentageDone > 1.0 )
+ PercentageDone = 1.0;
+
+ switch( pFx->VideoEffectType )
+ {
+ case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
+ /**
+ * Compute where we are in the effect (scale is 0->1024). */
+ tmp = (M4OSA_Int32)(PercentageDone * 1024);
+
+ /**
+ * Apply the darkening effect */
+ err =
+ M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+ (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoEffect:\
+ M4VFL_modifyLumaWithScale returns error 0x%x,\
+ returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+ err);
+ return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+ }
+ break;
+
+ case M4VSS3GPP_kVideoEffectType_FadeToBlack:
+ /**
+ * Compute where we are in the effect (scale is 0->1024) */
+ tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024);
+
+ /**
+ * Apply the darkening effect */
+ err =
+ M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+ (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoEffect:\
+ M4VFL_modifyLumaWithScale returns error 0x%x,\
+ returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+ err);
+ return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+ }
+ break;
+
+ default:
+ if( pFx->VideoEffectType
+ >= M4VSS3GPP_kVideoEffectType_External )
+ {
+ M4OSA_UInt32 Cts = 0;
+ M4OSA_Int32 nextEffectTime;
+
+ /**
+ * Compute where we are in the effect (scale is 0->1000) */
+ tmp = (M4OSA_Int32)(PercentageDone * 1000);
+
+ /**
+ * Set the progress info provided to the external function */
+ extProgress.uiProgress = (M4OSA_UInt32)tmp;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+ extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
+ extProgress.bIsLast = M4OSA_FALSE;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
+ + pC->dOutputFrameDuration);
+ if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration))
+ {
+ extProgress.bIsLast = M4OSA_TRUE;
+ }
+ /* Here skip the framing effect,
+ * do the framing effect after apply rendering mode */
+ if ((pFx->xVSS.pFramingBuffer != M4OSA_NULL) &&
+ bSkipFramingEffect == M4OSA_TRUE) {
+ memcpy(pPlaneTempOut[0].pac_data, pPlaneTempIn[0].pac_data,
+ pPlaneTempIn[0].u_height * pPlaneTempIn[0].u_width);
+ memcpy(pPlaneTempOut[1].pac_data, pPlaneTempIn[1].pac_data,
+ pPlaneTempIn[1].u_height * pPlaneTempIn[1].u_width);
+ memcpy(pPlaneTempOut[2].pac_data, pPlaneTempIn[2].pac_data,
+ pPlaneTempIn[2].u_height * pPlaneTempIn[2].u_width);
+
+ } else {
+ err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
+ pPlaneTempIn, pPlaneTempOut, &extProgress,
+ pFx->VideoEffectType
+ - M4VSS3GPP_kVideoEffectType_External);
+ }
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoEffect: \
+ External video effect function returns 0x%x!",
+ err);
+ return err;
+ }
+ break;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\
+ returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE",
+ pFx->VideoEffectType);
+ return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE;
+ }
+ }
+ /**
+ * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */
+ if (((i % 2 == 0) && (NumActiveEffects % 2 == 0))
+ || ((i % 2 != 0) && (NumActiveEffects % 2 != 0)))
+ {
+ pPlaneTempIn = pTempYuvPlane;
+ pPlaneTempOut = pPlaneOut;
+ }
+ else
+ {
+ pPlaneTempIn = pPlaneOut;
+ pPlaneTempOut = pTempYuvPlane;
+ }
+ }
+
+ for(i=0; i<3; i++) {
+ if(pTempYuvPlane[i].pac_data != M4OSA_NULL) {
+ free(pTempYuvPlane[i].pac_data);
+ pTempYuvPlane[i].pac_data = M4OSA_NULL;
+ }
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVideoTransition()
+ * @brief Apply video transition effect pC1+pC2->pPlaneOut
+ * @param pC (IN/OUT) Internal edit context
+ * @param pOutputPlanes (IN/OUT) Output raw YUV420 image
+ * @return M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+ M4VIFI_ImagePlane *pPlaneOut )
+{
+ M4OSA_ERR err;
+ M4OSA_Int32 iProgress;
+ M4VSS3GPP_ExternalProgress extProgress;
+ M4VIFI_ImagePlane *pPlane;
+ M4OSA_Int32 i;
+ const M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+ pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+ /**
+ * Compute how far from the end cut we are, in clip-base time.
+ * It is done with integers because the offset and begin cut have been rounded already. */
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts +
+ ((M4OSA_Double)pC->pC1->iVoffset);
+ /**
+ * We must remove the duration of one frame, else we would almost never reach the end
+ * (It's kind of a "pile and intervals" issue). */
+ iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration;
+
+ if( iProgress < 0 ) /**< Sanity checks */
+ {
+ iProgress = 0;
+ }
+
+ /**
+ * Compute where we are in the transition, on a base 1000 */
+ iProgress = ( ( iDur - iProgress) * 1000) / iDur;
+
+ /**
+ * Sanity checks */
+ if( iProgress < 0 )
+ {
+ iProgress = 0;
+ }
+ else if( iProgress > 1000 )
+ {
+ iProgress = 1000;
+ }
+
+ switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour )
+ {
+ case M4VSS3GPP_TransitionBehaviour_SpeedUp:
+ iProgress = ( iProgress * iProgress) / 1000;
+ break;
+
+ case M4VSS3GPP_TransitionBehaviour_Linear:
+ /*do nothing*/
+ break;
+
+ case M4VSS3GPP_TransitionBehaviour_SpeedDown:
+ iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000));
+ break;
+
+ case M4VSS3GPP_TransitionBehaviour_SlowMiddle:
+ if( iProgress < 500 )
+ {
+ iProgress = (M4OSA_Int32)(sqrt(iProgress * 500));
+ }
+ else
+ {
+ iProgress =
+ (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500))
+ / 500) + 500);
+ }
+ break;
+
+ case M4VSS3GPP_TransitionBehaviour_FastMiddle:
+ if( iProgress < 500 )
+ {
+ iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500);
+ }
+ else
+ {
+ iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500);
+ }
+ break;
+
+ default:
+ /*do nothing*/
+ break;
+ }
+
+ switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType )
+ {
+ case M4VSS3GPP_kVideoTransitionType_CrossFade:
+ /**
+ * Apply the transition effect */
+ err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL,
+ (M4ViComImagePlane *)pC->yuv1,
+ (M4ViComImagePlane *)pC->yuv2,
+ (M4ViComImagePlane *)pPlaneOut, iProgress);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVideoTransition:\
+ M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\
+ returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR",
+ err);
+ return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR;
+ }
+ break;
+
+ case M4VSS3GPP_kVideoTransitionType_None:
+ /**
+ * This is a stupid-non optimized version of the None transition...
+ * We copy the YUV frame */
+ if( iProgress < 500 ) /**< first half of transition */
+ {
+ pPlane = pC->yuv1;
+ }
+ else /**< second half of transition */
+ {
+ pPlane = pC->yuv2;
+ }
+ /**
+ * Copy the input YUV frames */
+ i = 3;
+
+ while( i-- > 0 )
+ {
+ memcpy((void *)pPlaneOut[i].pac_data,
+ (void *)pPlane[i].pac_data,
+ pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+ }
+ break;
+
+ default:
+ if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType
+ >= M4VSS3GPP_kVideoTransitionType_External )
+ {
+ /**
+ * Set the progress info provided to the external function */
+ extProgress.uiProgress = (M4OSA_UInt32)iProgress;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+ extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset;
+
+ err = pC->pTransitionList[pC->
+ uiCurrentClip].ExtVideoTransitionFct(
+ pC->pTransitionList[pC->
+ uiCurrentClip].pExtVideoTransitionFctCtxt,
+ pC->yuv1, pC->yuv2, pPlaneOut, &extProgress,
+ pC->pTransitionList[pC->
+ uiCurrentClip].VideoTransitionType
+ - M4VSS3GPP_kVideoTransitionType_External);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVideoTransition:\
+ External video transition function returns 0x%x!",
+ err);
+ return err;
+ }
+ break;
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\
+ returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE",
+ pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType);
+ return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE;
+ }
+ }
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo()
+ * @brief Update bit stream time info by Counter Time System to be compliant with
+ * players using bit stream time info
+ * @note H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops
+ * (GOV, see the standard)
+ * @param pC (IN/OUT) returns time updated video AU,
+ * the offset between system and video time (MPEG4 only)
+ * and the state of the current clip (MPEG4 only)
+ * @return nothing
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+ M4SYS_AccessUnit *pAU )
+{
+ M4OSA_UInt8 uiTmp;
+ M4OSA_UInt32 uiCts = 0;
+ M4OSA_MemAddr8 pTmp;
+ M4OSA_UInt32 uiAdd;
+ M4OSA_UInt32 uiCurrGov;
+ M4OSA_Int8 iDiff;
+
+ M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1;
+ M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset);
+
+ /**
+ * Set H263 time counter from system time */
+ if( M4SYS_kH263 == pAU->stream->streamType )
+ {
+ uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5)
+ % M4VSS3GPP_EDIT_H263_MODULO_TIME);
+ M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress),
+ uiTmp);
+ }
+ /*
+ * Set MPEG4 GOV time counter regarding video and system time */
+ else if( M4SYS_kMPEG_4 == pAU->stream->streamType )
+ {
+ /*
+ * If GOV.
+ * beware of little/big endian! */
+ /* correction: read 8 bits block instead of one 32 bits block */
+ M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress);
+ M4OSA_UInt32 temp32 = 0;
+
+ temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8))
+ + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8)
+ + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16)
+ + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24);
+
+ M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32,
+ *(pAU->dataAddress));
+
+ if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 )
+ {
+ pTmp =
+ (M4OSA_MemAddr8)(pAU->dataAddress
+ + 1); /**< Jump to the time code (just after the 32 bits header) */
+ uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset);
+
+ switch( pClipCtxt->bMpeg4GovState )
+ {
+ case M4OSA_FALSE: /*< INIT */
+ {
+ /* video time = ceil (system time + offset) */
+ uiCts = ( uiAdd + 999) / 1000;
+
+ /* offset update */
+ ( *pOffset) += (( uiCts * 1000) - uiAdd);
+
+ /* Save values */
+ pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+
+ /* State to 'first' */
+ pClipCtxt->bMpeg4GovState = M4OSA_TRUE;
+ }
+ break;
+
+ case M4OSA_TRUE: /*< UPDATE */
+ {
+ /* Get current Gov value */
+ M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov);
+
+ /* video time = floor or ceil (system time + offset) */
+ uiCts = (uiAdd / 1000);
+ iDiff = (M4OSA_Int8)(uiCurrGov
+ - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts
+ + pClipCtxt->uiMpeg4PrevGovValueSet);
+
+ /* ceiling */
+ if( iDiff > 0 )
+ {
+ uiCts += (M4OSA_UInt32)(iDiff);
+
+ /* offset update */
+ ( *pOffset) += (( uiCts * 1000) - uiAdd);
+ }
+
+ /* Save values */
+ pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov;
+ pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+ }
+ break;
+ }
+
+ M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts);
+ }
+ }
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckVideoEffects()
+ * @brief Check which video effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+ M4OSA_UInt8 uiClipNumber )
+{
+ M4OSA_UInt8 uiClipIndex;
+ M4OSA_UInt8 uiFxIndex, i;
+ M4VSS3GPP_ClipContext *pClip;
+ M4VSS3GPP_EffectSettings *pFx;
+ M4OSA_Int32 Off, BC, EC;
+ // Decorrelate input and output encoding timestamp to handle encoder prefetch
+ M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+
+ uiClipIndex = pC->uiCurrentClip;
+ if (uiClipNumber == 1) {
+ pClip = pC->pC1;
+ pC->bClip1ActiveFramingEffect = M4OSA_FALSE;
+ } else {
+ pClip = pC->pC2;
+ pC->bClip2ActiveFramingEffect = M4OSA_FALSE;
+ }
+ /**
+ * Shortcuts for code readability */
+ Off = pClip->iVoffset;
+ BC = pClip->iActualVideoBeginCut;
+ EC = pClip->iEndTime;
+
+ i = 0;
+
+ for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+ {
+ /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/
+ pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+ if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType )
+ {
+ /**
+ * Check if there is actually a video effect */
+
+ if(uiClipNumber ==1)
+ {
+ /**< Are we after the start time of the effect?
+ * or Are we into the effect duration?
+ */
+ if ( (t >= (M4OSA_Int32)(pFx->uiStartTime)) &&
+ (t <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) {
+ /**
+ * Set the active effect(s) */
+ pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex;
+
+ /**
+ * Update counter of active effects */
+ i++;
+ if (pFx->xVSS.pFramingBuffer != M4OSA_NULL) {
+ pC->bClip1ActiveFramingEffect = M4OSA_TRUE;
+ }
+
+ /**
+ * For all external effects set this flag to true. */
+ if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External)
+ {
+ pC->m_bClipExternalHasStarted = M4OSA_TRUE;
+ }
+ }
+
+ }
+ else
+ {
+ /**< Are we into the effect duration? */
+ if ( ((M4OSA_Int32)(t + pC->pTransitionList[uiClipIndex].uiTransitionDuration)
+ >= (M4OSA_Int32)(pFx->uiStartTime))
+ && ( (M4OSA_Int32)(t + pC->pTransitionList[uiClipIndex].uiTransitionDuration)
+ <= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) {
+ /**
+ * Set the active effect(s) */
+ pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex;
+
+ /**
+ * Update counter of active effects */
+ i++;
+ if (pFx->xVSS.pFramingBuffer != M4OSA_NULL) {
+ pC->bClip2ActiveFramingEffect = M4OSA_TRUE;
+ }
+ /**
+ * For all external effects set this flag to true. */
+ if(pFx->VideoEffectType > M4VSS3GPP_kVideoEffectType_External)
+ {
+ pC->m_bClipExternalHasStarted = M4OSA_TRUE;
+ }
+
+ /**
+ * The third effect has the highest priority, then the second one, then the first one.
+ * Hence, as soon as we found an active effect, we can get out of this loop */
+ }
+ }
+ if (M4VIDEOEDITING_kH264 !=
+ pC->pC1->pSettings->ClipProperties.VideoStreamType) {
+
+ // For Mpeg4 and H263 clips, full decode encode not required
+ pC->m_bClipExternalHasStarted = M4OSA_FALSE;
+ }
+ }
+ }
+ if(1==uiClipNumber)
+ {
+ /**
+ * Save number of active effects */
+ pC->nbActiveEffects = i;
+ }
+ else
+ {
+ pC->nbActiveEffects1 = i;
+ }
+
+ /**
+ * Change the absolut time to clip related time */
+ t -= Off;
+
+ /**
+ * Check if we are on the begin cut (for clip1 only) */
+ if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) )
+ {
+ pC->bClip1AtBeginCut = M4OSA_TRUE;
+ }
+ else
+ {
+ pC->bClip1AtBeginCut = M4OSA_FALSE;
+ }
+
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
+ * @brief Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err;
+ M4ENCODER_AdvancedParams EncParams;
+
+ /**
+ * Simulate a writer interface with our specific function */
+ pC->ewc.OurWriterDataInterface.pProcessAU =
+ M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+ but it follow the writer interface */
+ pC->ewc.OurWriterDataInterface.pStartAU =
+ M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+ but it follow the writer interface */
+ pC->ewc.OurWriterDataInterface.pWriterContext =
+ (M4WRITER_Context)
+ pC; /**< We give the internal context as writer context */
+
+ /**
+ * Get the encoder interface, if not already done */
+ if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+ {
+ err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+ pC->ewc.VideoStreamType);
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+ err);
+ M4ERR_CHECK_RETURN(err);
+ }
+
+ /**
+ * Set encoder shell parameters according to VSS settings */
+
+ /* Common parameters */
+ EncParams.InputFormat = M4ENCODER_kIYUV420;
+ EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+ EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+ EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+
+ if( pC->bIsMMS == M4OSA_FALSE )
+ {
+ /* No strict regulation in video editor */
+ /* Because of the effects and transitions we should allow more flexibility */
+ /* Also it prevents to drop important frames (with a bad result on sheduling and
+ block effetcs) */
+ EncParams.bInternalRegulation = M4OSA_FALSE;
+ // Variable framerate is not supported by StageFright encoders
+ EncParams.FrameRate = M4ENCODER_k30_FPS;
+ }
+ else
+ {
+ /* In case of MMS mode, we need to enable bitrate regulation to be sure */
+ /* to reach the targeted output file size */
+ EncParams.bInternalRegulation = M4OSA_TRUE;
+ EncParams.FrameRate = pC->MMSvideoFramerate;
+ }
+
+ /**
+ * Other encoder settings (defaults) */
+ EncParams.uiHorizontalSearchRange = 0; /* use default */
+ EncParams.uiVerticalSearchRange = 0; /* use default */
+ EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+ EncParams.uiIVopPeriod = 0; /* use default */
+ EncParams.uiMotionEstimationTools = 0; /* M4V_MOTION_EST_TOOLS_ALL */
+ EncParams.bAcPrediction = M4OSA_TRUE; /* use AC prediction */
+ EncParams.uiStartingQuantizerValue = 10; /* initial QP = 10 */
+ EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+ /**
+ * Set the video profile and level */
+ EncParams.videoProfile = pC->ewc.outputVideoProfile;
+ EncParams.videoLevel= pC->ewc.outputVideoLevel;
+
+ switch ( pC->ewc.VideoStreamType )
+ {
+ case M4SYS_kH263:
+
+ EncParams.Format = M4ENCODER_kH263;
+
+ EncParams.uiStartingQuantizerValue = 10;
+ EncParams.uiRateFactor = 1; /* default */
+
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ break;
+
+ case M4SYS_kMPEG_4:
+
+ EncParams.Format = M4ENCODER_kMPEG4;
+
+ EncParams.uiStartingQuantizerValue = 8;
+ EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration
+ * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+ if( EncParams.uiRateFactor == 0 )
+ EncParams.uiRateFactor = 1; /* default */
+
+ if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+ {
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ }
+ else
+ {
+ EncParams.bErrorResilience = M4OSA_TRUE;
+ EncParams.bDataPartitioning = M4OSA_TRUE;
+ }
+ break;
+
+ case M4SYS_kH264:
+ M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264");
+
+ EncParams.Format = M4ENCODER_kH264;
+
+ EncParams.uiStartingQuantizerValue = 10;
+ EncParams.uiRateFactor = 1; /* default */
+
+ EncParams.bErrorResilience = M4OSA_FALSE;
+ EncParams.bDataPartitioning = M4OSA_FALSE;
+ //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS;
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x",
+ pC->ewc.VideoStreamType);
+ return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+ }
+
+ if( pC->bIsMMS == M4OSA_FALSE )
+ {
+ EncParams.Bitrate = pC->xVSS.outputVideoBitrate;
+
+ }
+ else
+ {
+ EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */
+ EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */
+ }
+
+ M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit");
+ /**
+ * Init the video encoder (advanced settings version of the encoder Open function) */
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+ &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+ pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+ pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+ M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen");
+
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+ &pC->ewc.WriterVideoAU, &EncParams);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+ err);
+ return err;
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart");
+
+ if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+ err);
+ return err;
+ }
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
+ * @brief Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ if( M4OSA_NULL != pC->ewc.pEncContext )
+ {
+ if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+ {
+ if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+ err);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+ }
+
+ /* Has the encoder actually been opened? Don't close it if that's not the case. */
+ if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+ {
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+ err);
+ /* Well... how the heck do you handle a failed cleanup? */
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+ }
+
+ err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+ pC->ewc.pEncContext);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_intDestroyVideoEncoder:\
+ pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+ err);
+ /**< We do not return the error here because we still have stuff to free */
+ }
+
+ pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+ /**
+ * Reset variable */
+ pC->ewc.pEncContext = M4OSA_NULL;
+ }
+
+ M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err);
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter()
+ * @brief Modify the time counter of the given H263 video AU
+ * @note
+ * @param pAuDataBuffer (IN/OUT) H263 Video AU to modify
+ * @param uiCts (IN) New time counter value
+ * @return nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt8 uiCts )
+{
+ /*
+ * The H263 time counter is 8 bits located on the "x" below:
+ *
+ * |--------|--------|--------|--------|
+ * ???????? ???????? ??????xx xxxxxx??
+ */
+
+ /**
+ * Write the 2 bits on the third byte */
+ pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3);
+
+ /**
+ * Write the 6 bits on the fourth byte */
+ pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3);
+
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov()
+ * @brief Modify the time info from Group Of VOP video AU
+ * @note
+ * @param pAuDataBuffer (IN) MPEG4 Video AU to modify
+ * @param uiCtsSec (IN) New GOV time info in second unit
+ * @return nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt32 uiCtsSec )
+{
+ /*
+ * The MPEG-4 time code length is 18 bits:
+ *
+ * hh mm marker ss
+ * xxxxx|xxx xxx 1 xxxx xx ??????
+ * |----- ---|--- - ----|-- ------|
+ */
+ M4OSA_UInt8 uiHh;
+ M4OSA_UInt8 uiMm;
+ M4OSA_UInt8 uiSs;
+ M4OSA_UInt8 uiTmp;
+
+ /**
+ * Write the 2 last bits ss */
+ uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */
+ pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F));
+
+ if( uiCtsSec < 60 )
+ {
+ /**
+ * Write the 3 last bits of mm, the marker bit (0x10 */
+ pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2));
+
+ /**
+ * Write the 5 bits of hh and 3 of mm (out of 6) */
+ pAuDataBuffer[0] = 0;
+ }
+ else
+ {
+ /**
+ * Write the 3 last bits of mm, the marker bit (0x10 */
+ uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */
+ uiMm = (M4OSA_UInt8)(uiTmp % 60);
+ pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2));
+
+ if( uiTmp < 60 )
+ {
+ /**
+ * Write the 5 bits of hh and 3 of mm (out of 6) */
+ pAuDataBuffer[0] = ((uiMm >> 3));
+ }
+ else
+ {
+ /**
+ * Write the 5 bits of hh and 3 of mm (out of 6) */
+ uiHh = (M4OSA_UInt8)(uiTmp / 60);
+ pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3));
+ }
+ }
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov()
+ * @brief Get the time info from Group Of VOP video AU
+ * @note
+ * @param pAuDataBuffer (IN) MPEG4 Video AU to modify
+ * @param pCtsSec (OUT) Current GOV time info in second unit
+ * @return nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+ M4OSA_UInt32 *pCtsSec )
+{
+ /*
+ * The MPEG-4 time code length is 18 bits:
+ *
+ * hh mm marker ss
+ * xxxxx|xxx xxx 1 xxxx xx ??????
+ * |----- ---|--- - ----|-- ------|
+ */
+ M4OSA_UInt8 uiHh;
+ M4OSA_UInt8 uiMm;
+ M4OSA_UInt8 uiSs;
+ M4OSA_UInt8 uiTmp;
+ M4OSA_UInt32 uiCtsSec;
+
+ /**
+ * Read ss */
+ uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6);
+ uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2);
+ uiCtsSec = uiSs + uiTmp;
+
+ /**
+ * Read mm */
+ uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5);
+ uiTmp = (( pAuDataBuffer[0] & 0x07) << 3);
+ uiMm = uiMm + uiTmp;
+ uiCtsSec = ( uiMm * 60) + uiCtsSec;
+
+ /**
+ * Read hh */
+ uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3);
+
+ if( uiHh )
+ {
+ uiCtsSec = ( uiHh * 3600) + uiCtsSec;
+ }
+
+ /*
+ * in sec */
+ *pCtsSec = uiCtsSec;
+
+ return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAllocateYUV420()
+ * @brief Allocate the three YUV 4:2:0 planes
+ * @note
+ * @param pPlanes (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures
+ * @param uiWidth (IN) Image width
+ * @param uiHeight(IN) Image height
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+ M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight )
+{
+ if (pPlanes == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intAllocateYUV420: Invalid pPlanes pointer");
+ return M4ERR_PARAMETER;
+ }
+ /* if the buffer is not NULL and same size with target size,
+ * do not malloc again*/
+ if (pPlanes[0].pac_data != M4OSA_NULL &&
+ pPlanes[0].u_width == uiWidth &&
+ pPlanes[0].u_height == uiHeight) {
+ return M4NO_ERROR;
+ }
+
+ pPlanes[0].u_width = uiWidth;
+ pPlanes[0].u_height = uiHeight;
+ pPlanes[0].u_stride = uiWidth;
+ pPlanes[0].u_topleft = 0;
+
+ if (pPlanes[0].pac_data != M4OSA_NULL) {
+ free(pPlanes[0].pac_data);
+ pPlanes[0].pac_data = M4OSA_NULL;
+ }
+ pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[0].u_stride
+ * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data");
+
+ if( M4OSA_NULL == pPlanes[0].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\
+ returning M4ERR_ALLOC");
+ return M4ERR_ALLOC;
+ }
+
+ pPlanes[1].u_width = pPlanes[0].u_width >> 1;
+ pPlanes[1].u_height = pPlanes[0].u_height >> 1;
+ pPlanes[1].u_stride = pPlanes[1].u_width;
+ pPlanes[1].u_topleft = 0;
+ if (pPlanes[1].pac_data != M4OSA_NULL) {
+ free(pPlanes[1].pac_data);
+ pPlanes[1].pac_data = M4OSA_NULL;
+ }
+ pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[1].u_stride
+ * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data");
+
+ if( M4OSA_NULL == pPlanes[1].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\
+ returning M4ERR_ALLOC");
+ free((void *)pPlanes[0].pac_data);
+ pPlanes[0].pac_data = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+
+ pPlanes[2].u_width = pPlanes[1].u_width;
+ pPlanes[2].u_height = pPlanes[1].u_height;
+ pPlanes[2].u_stride = pPlanes[2].u_width;
+ pPlanes[2].u_topleft = 0;
+ if (pPlanes[2].pac_data != M4OSA_NULL) {
+ free(pPlanes[2].pac_data);
+ pPlanes[2].pac_data = M4OSA_NULL;
+ }
+ pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(pPlanes[2].u_stride
+ * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data");
+
+ if( M4OSA_NULL == pPlanes[2].pac_data )
+ {
+ M4OSA_TRACE1_0(
+ "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\
+ returning M4ERR_ALLOC");
+ free((void *)pPlanes[0].pac_data);
+ free((void *)pPlanes[1].pac_data);
+ pPlanes[0].pac_data = M4OSA_NULL;
+ pPlanes[1].pac_data = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+
+ memset((void *)pPlanes[0].pac_data, 0, pPlanes[0].u_stride*pPlanes[0].u_height);
+ memset((void *)pPlanes[1].pac_data, 0, pPlanes[1].u_stride*pPlanes[1].u_height);
+ memset((void *)pPlanes[2].pac_data, 0, pPlanes[2].u_stride*pPlanes[2].u_height);
+ /**
+ * Return */
+ M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR");
+ return M4NO_ERROR;
+}
+
+/**
+******************************************************************************
+* M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+* M4OSA_FileReadPointer* pFileReadPtr,
+* M4VIFI_ImagePlane* pImagePlanes,
+* M4OSA_UInt32 width,
+* M4OSA_UInt32 height);
+* @brief It Coverts and resizes a ARGB8888 image to YUV420
+* @note
+* @param pFileIn (IN) The ARGB888 input file
+* @param pFileReadPtr (IN) Pointer on filesystem functions
+* @param pImagePlanes (IN/OUT) Pointer on YUV420 output planes allocated by the user.
+* ARGB8888 image will be converted and resized to output
+* YUV420 plane size
+* @param width (IN) width of the ARGB8888
+* @param height (IN) height of the ARGB8888
+* @return M4NO_ERROR: No error
+* @return M4ERR_ALLOC: memory error
+* @return M4ERR_PARAMETER: At least one of the function parameters is null
+******************************************************************************
+*/
+
+M4OSA_ERR M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height) {
+ M4OSA_Context pARGBIn;
+ M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
+ M4OSA_UInt32 frameSize_argb = width * height * 4;
+ M4OSA_UInt32 frameSize_rgb888 = width * height * 3;
+ M4OSA_UInt32 i = 0,j= 0;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_UInt8 *pArgbPlane =
+ (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb,
+ M4VS, (M4OSA_Char*)"argb data");
+ if (pArgbPlane == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420: \
+ Failed to allocate memory for ARGB plane");
+ return M4ERR_ALLOC;
+ }
+
+ /* Get file size */
+ err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
+ if (err != M4NO_ERROR) {
+ M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 : \
+ Can not open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+ free(pArgbPlane);
+ pArgbPlane = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pArgbPlane,
+ &frameSize_argb);
+ if (err != M4NO_ERROR) {
+ M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
+ Can not read ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+ pFileReadPtr->closeRead(pARGBIn);
+ free(pArgbPlane);
+ pArgbPlane = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ err = pFileReadPtr->closeRead(pARGBIn);
+ if(err != M4NO_ERROR) {
+ M4OSA_TRACE1_2("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
+ Can not close ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+ free(pArgbPlane);
+ pArgbPlane = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ rgbPlane1.pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize_rgb888,
+ M4VS, (M4OSA_Char*)"RGB888 plane1");
+ if(rgbPlane1.pac_data == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 \
+ Failed to allocate memory for rgb plane1");
+ free(pArgbPlane);
+ return M4ERR_ALLOC;
+ }
+
+ rgbPlane1.u_height = height;
+ rgbPlane1.u_width = width;
+ rgbPlane1.u_stride = width*3;
+ rgbPlane1.u_topleft = 0;
+
+
+ /** Remove the alpha channel */
+ for (i=0, j = 0; i < frameSize_argb; i++) {
+ if ((i % 4) == 0) continue;
+ rgbPlane1.pac_data[j] = pArgbPlane[i];
+ j++;
+ }
+ free(pArgbPlane);
+
+ /**
+ * Check if resizing is required with color conversion */
+ if(width != pImagePlanes->u_width || height != pImagePlanes->u_height) {
+
+ frameSize_rgb888 = pImagePlanes->u_width * pImagePlanes->u_height * 3;
+ rgbPlane2.pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize_rgb888, M4VS,
+ (M4OSA_Char*)"rgb Plane2");
+ if(rgbPlane2.pac_data == M4OSA_NULL) {
+ M4OSA_TRACE1_0("Failed to allocate memory for rgb plane2");
+ free(rgbPlane1.pac_data);
+ return M4ERR_ALLOC;
+ }
+ rgbPlane2.u_height = pImagePlanes->u_height;
+ rgbPlane2.u_width = pImagePlanes->u_width;
+ rgbPlane2.u_stride = pImagePlanes->u_width*3;
+ rgbPlane2.u_topleft = 0;
+
+ /* Resizing */
+ err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL,
+ &rgbPlane1, &rgbPlane2);
+ free(rgbPlane1.pac_data);
+ if(err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("error resizing RGB888 to RGB888: 0x%x\n", err);
+ free(rgbPlane2.pac_data);
+ return err;
+ }
+
+ /*Converting Resized RGB888 to YUV420 */
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
+ free(rgbPlane2.pac_data);
+ if(err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("error converting from RGB888 to YUV: 0x%x\n", err);
+ return err;
+ }
+ } else {
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
+ if(err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
+ }
+ free(rgbPlane1.pac_data);
+ }
+cleanup:
+ M4OSA_TRACE3_0("M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 exit");
+ return err;
+}
+
+M4OSA_ERR M4VSS3GPP_intApplyRenderingMode(M4VSS3GPP_InternalEditContext *pC,
+ M4xVSS_MediaRendering renderingMode,
+ M4VIFI_ImagePlane* pInplane,
+ M4VIFI_ImagePlane* pOutplane) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4AIR_Params airParams;
+ M4VIFI_ImagePlane pImagePlanesTemp[3];
+ M4OSA_UInt32 i = 0;
+
+ if (renderingMode == M4xVSS_kBlackBorders) {
+ memset((void *)pOutplane[0].pac_data, Y_PLANE_BORDER_VALUE,
+ (pOutplane[0].u_height*pOutplane[0].u_stride));
+ memset((void *)pOutplane[1].pac_data, U_PLANE_BORDER_VALUE,
+ (pOutplane[1].u_height*pOutplane[1].u_stride));
+ memset((void *)pOutplane[2].pac_data, V_PLANE_BORDER_VALUE,
+ (pOutplane[2].u_height*pOutplane[2].u_stride));
+ }
+
+ if (renderingMode == M4xVSS_kResizing) {
+ /**
+ * Call the resize filter.
+ * From the intermediate frame to the encoder image plane */
+ err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL,
+ pInplane, pOutplane);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
+ M4ViFilResizeBilinearYUV420toYUV420 returns 0x%x!", err);
+ return err;
+ }
+ } else {
+ M4VIFI_ImagePlane* pPlaneTemp = M4OSA_NULL;
+ M4OSA_UInt8* pOutPlaneY =
+ pOutplane[0].pac_data + pOutplane[0].u_topleft;
+ M4OSA_UInt8* pOutPlaneU =
+ pOutplane[1].pac_data + pOutplane[1].u_topleft;
+ M4OSA_UInt8* pOutPlaneV =
+ pOutplane[2].pac_data + pOutplane[2].u_topleft;
+ M4OSA_UInt8* pInPlaneY = M4OSA_NULL;
+ M4OSA_UInt8* pInPlaneU = M4OSA_NULL;
+ M4OSA_UInt8* pInPlaneV = M4OSA_NULL;
+
+ /* To keep media aspect ratio*/
+ /* Initialize AIR Params*/
+ airParams.m_inputCoord.m_x = 0;
+ airParams.m_inputCoord.m_y = 0;
+ airParams.m_inputSize.m_height = pInplane->u_height;
+ airParams.m_inputSize.m_width = pInplane->u_width;
+ airParams.m_outputSize.m_width = pOutplane->u_width;
+ airParams.m_outputSize.m_height = pOutplane->u_height;
+ airParams.m_bOutputStripe = M4OSA_FALSE;
+ airParams.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+ /**
+ Media rendering: Black borders*/
+ if (renderingMode == M4xVSS_kBlackBorders) {
+ pImagePlanesTemp[0].u_width = pOutplane[0].u_width;
+ pImagePlanesTemp[0].u_height = pOutplane[0].u_height;
+ pImagePlanesTemp[0].u_stride = pOutplane[0].u_width;
+ pImagePlanesTemp[0].u_topleft = 0;
+
+ pImagePlanesTemp[1].u_width = pOutplane[1].u_width;
+ pImagePlanesTemp[1].u_height = pOutplane[1].u_height;
+ pImagePlanesTemp[1].u_stride = pOutplane[1].u_width;
+ pImagePlanesTemp[1].u_topleft = 0;
+
+ pImagePlanesTemp[2].u_width = pOutplane[2].u_width;
+ pImagePlanesTemp[2].u_height = pOutplane[2].u_height;
+ pImagePlanesTemp[2].u_stride = pOutplane[2].u_width;
+ pImagePlanesTemp[2].u_topleft = 0;
+
+ /**
+ * Allocates plan in local image plane structure */
+ pImagePlanesTemp[0].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ pImagePlanesTemp[0].u_width * pImagePlanesTemp[0].u_height,
+ M4VS, (M4OSA_Char *)"pImagePlaneTemp Y") ;
+ if (pImagePlanesTemp[0].pac_data == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[1].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ pImagePlanesTemp[1].u_width * pImagePlanesTemp[1].u_height,
+ M4VS, (M4OSA_Char *)"pImagePlaneTemp U") ;
+ if (pImagePlanesTemp[1].pac_data == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
+ free(pImagePlanesTemp[0].pac_data);
+ return M4ERR_ALLOC;
+ }
+ pImagePlanesTemp[2].pac_data =
+ (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ pImagePlanesTemp[2].u_width * pImagePlanesTemp[2].u_height,
+ M4VS, (M4OSA_Char *)"pImagePlaneTemp V") ;
+ if (pImagePlanesTemp[2].pac_data == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4VSS3GPP_intApplyRenderingMode: Alloc Error");
+ free(pImagePlanesTemp[0].pac_data);
+ free(pImagePlanesTemp[1].pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ pInPlaneY = pImagePlanesTemp[0].pac_data ;
+ pInPlaneU = pImagePlanesTemp[1].pac_data ;
+ pInPlaneV = pImagePlanesTemp[2].pac_data ;
+
+ memset((void *)pImagePlanesTemp[0].pac_data, Y_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride));
+ memset((void *)pImagePlanesTemp[1].pac_data, U_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride));
+ memset((void *)pImagePlanesTemp[2].pac_data, V_PLANE_BORDER_VALUE,
+ (pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride));
+
+ M4OSA_UInt32 height =
+ (pInplane->u_height * pOutplane->u_width) /pInplane->u_width;
+
+ if (height <= pOutplane->u_height) {
+ /**
+ * Black borders will be on the top and the bottom side */
+ airParams.m_outputSize.m_width = pOutplane->u_width;
+ airParams.m_outputSize.m_height = height;
+ /**
+ * Number of lines at the top */
+ pImagePlanesTemp[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_height -
+ airParams.m_outputSize.m_height)>>1)) *
+ pImagePlanesTemp[0].u_stride;
+ pImagePlanesTemp[0].u_height = airParams.m_outputSize.m_height;
+ pImagePlanesTemp[1].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height -
+ (airParams.m_outputSize.m_height>>1)))>>1) *
+ pImagePlanesTemp[1].u_stride;
+ pImagePlanesTemp[1].u_height =
+ airParams.m_outputSize.m_height>>1;
+ pImagePlanesTemp[2].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height -
+ (airParams.m_outputSize.m_height>>1)))>>1) *
+ pImagePlanesTemp[2].u_stride;
+ pImagePlanesTemp[2].u_height =
+ airParams.m_outputSize.m_height>>1;
+ } else {
+ /**
+ * Black borders will be on the left and right side */
+ airParams.m_outputSize.m_height = pOutplane->u_height;
+ airParams.m_outputSize.m_width =
+ (M4OSA_UInt32)((pInplane->u_width * pOutplane->u_height)/pInplane->u_height);
+
+ pImagePlanesTemp[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width -
+ airParams.m_outputSize.m_width)>>1));
+ pImagePlanesTemp[0].u_width = airParams.m_outputSize.m_width;
+ pImagePlanesTemp[1].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width -
+ (airParams.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[1].u_width = airParams.m_outputSize.m_width>>1;
+ pImagePlanesTemp[2].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width -
+ (airParams.m_outputSize.m_width>>1)))>>1);
+ pImagePlanesTemp[2].u_width = airParams.m_outputSize.m_width>>1;
+ }
+
+ /**
+ * Width and height have to be even */
+ airParams.m_outputSize.m_width =
+ (airParams.m_outputSize.m_width>>1)<<1;
+ airParams.m_outputSize.m_height =
+ (airParams.m_outputSize.m_height>>1)<<1;
+ airParams.m_inputSize.m_width =
+ (airParams.m_inputSize.m_width>>1)<<1;
+ airParams.m_inputSize.m_height =
+ (airParams.m_inputSize.m_height>>1)<<1;
+ pImagePlanesTemp[0].u_width =
+ (pImagePlanesTemp[0].u_width>>1)<<1;
+ pImagePlanesTemp[1].u_width =
+ (pImagePlanesTemp[1].u_width>>1)<<1;
+ pImagePlanesTemp[2].u_width =
+ (pImagePlanesTemp[2].u_width>>1)<<1;
+ pImagePlanesTemp[0].u_height =
+ (pImagePlanesTemp[0].u_height>>1)<<1;
+ pImagePlanesTemp[1].u_height =
+ (pImagePlanesTemp[1].u_height>>1)<<1;
+ pImagePlanesTemp[2].u_height =
+ (pImagePlanesTemp[2].u_height>>1)<<1;
+
+ /**
+ * Check that values are coherent */
+ if (airParams.m_inputSize.m_height ==
+ airParams.m_outputSize.m_height) {
+ airParams.m_inputSize.m_width =
+ airParams.m_outputSize.m_width;
+ } else if (airParams.m_inputSize.m_width ==
+ airParams.m_outputSize.m_width) {
+ airParams.m_inputSize.m_height =
+ airParams.m_outputSize.m_height;
+ }
+ pPlaneTemp = pImagePlanesTemp;
+ }
+
+ /**
+ * Media rendering: Cropping*/
+ if (renderingMode == M4xVSS_kCropping) {
+ airParams.m_outputSize.m_height = pOutplane->u_height;
+ airParams.m_outputSize.m_width = pOutplane->u_width;
+ if ((airParams.m_outputSize.m_height *
+ airParams.m_inputSize.m_width)/airParams.m_outputSize.m_width <
+ airParams.m_inputSize.m_height) {
+ /* Height will be cropped */
+ airParams.m_inputSize.m_height =
+ (M4OSA_UInt32)((airParams.m_outputSize.m_height *
+ airParams.m_inputSize.m_width)/airParams.m_outputSize.m_width);
+ airParams.m_inputSize.m_height =
+ (airParams.m_inputSize.m_height>>1)<<1;
+ airParams.m_inputCoord.m_y =
+ (M4OSA_Int32)((M4OSA_Int32)((pInplane->u_height -
+ airParams.m_inputSize.m_height))>>1);
+ } else {
+ /* Width will be cropped */
+ airParams.m_inputSize.m_width =
+ (M4OSA_UInt32)((airParams.m_outputSize.m_width *
+ airParams.m_inputSize.m_height)/airParams.m_outputSize.m_height);
+ airParams.m_inputSize.m_width =
+ (airParams.m_inputSize.m_width>>1)<<1;
+ airParams.m_inputCoord.m_x =
+ (M4OSA_Int32)((M4OSA_Int32)((pInplane->u_width -
+ airParams.m_inputSize.m_width))>>1);
+ }
+ pPlaneTemp = pOutplane;
+ }
+ /**
+ * Call AIR functions */
+ if (M4OSA_NULL == pC->m_air_context) {
+ err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+ if(err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
+ M4AIR_create returned error 0x%x", err);
+ goto cleanUp;
+ }
+ }
+
+ err = M4AIR_configure(pC->m_air_context, &airParams);
+ if (err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
+ Error when configuring AIR: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ goto cleanUp;
+ }
+
+ err = M4AIR_get(pC->m_air_context, pInplane, pPlaneTemp);
+ if (err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intApplyRenderingMode: \
+ Error when getting AIR plane: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ goto cleanUp;
+ }
+
+ if (renderingMode == M4xVSS_kBlackBorders) {
+ for (i=0; i<pOutplane[0].u_height; i++) {
+ memcpy((void *)pOutPlaneY, (void *)pInPlaneY,
+ pOutplane[0].u_width);
+ pInPlaneY += pOutplane[0].u_width;
+ pOutPlaneY += pOutplane[0].u_stride;
+ }
+ for (i=0; i<pOutplane[1].u_height; i++) {
+ memcpy((void *)pOutPlaneU, (void *)pInPlaneU,
+ pOutplane[1].u_width);
+ pInPlaneU += pOutplane[1].u_width;
+ pOutPlaneU += pOutplane[1].u_stride;
+ }
+ for (i=0; i<pOutplane[2].u_height; i++) {
+ memcpy((void *)pOutPlaneV, (void *)pInPlaneV,
+ pOutplane[2].u_width);
+ pInPlaneV += pOutplane[2].u_width;
+ pOutPlaneV += pOutplane[2].u_stride;
+ }
+ }
+ }
+cleanUp:
+ if (renderingMode == M4xVSS_kBlackBorders) {
+ for (i=0; i<3; i++) {
+ if (pImagePlanesTemp[i].pac_data != M4OSA_NULL) {
+ free(pImagePlanesTemp[i].pac_data);
+ pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ }
+ return err;
+}
+
+M4OSA_ERR M4VSS3GPP_intSetYuv420PlaneFromARGB888 (
+ M4VSS3GPP_InternalEditContext *pC,
+ M4VSS3GPP_ClipContext* pClipCtxt) {
+
+ M4OSA_ERR err= M4NO_ERROR;
+
+ // Allocate memory for YUV plane
+ pClipCtxt->pPlaneYuv =
+ (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
+ 3*sizeof(M4VIFI_ImagePlane), M4VS,
+ (M4OSA_Char*)"pPlaneYuv");
+
+ if (pClipCtxt->pPlaneYuv == M4OSA_NULL) {
+ return M4ERR_ALLOC;
+ }
+
+ pClipCtxt->pPlaneYuv[0].u_height =
+ pClipCtxt->pSettings->ClipProperties.uiStillPicHeight;
+ pClipCtxt->pPlaneYuv[0].u_width =
+ pClipCtxt->pSettings->ClipProperties.uiStillPicWidth;
+ pClipCtxt->pPlaneYuv[0].u_stride = pClipCtxt->pPlaneYuv[0].u_width;
+ pClipCtxt->pPlaneYuv[0].u_topleft = 0;
+
+ pClipCtxt->pPlaneYuv[0].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
+ pClipCtxt->pPlaneYuv[0].u_height * pClipCtxt->pPlaneYuv[0].u_width * 1.5,
+ M4VS, (M4OSA_Char*)"imageClip YUV data");
+ if (pClipCtxt->pPlaneYuv[0].pac_data == M4OSA_NULL) {
+ free(pClipCtxt->pPlaneYuv);
+ return M4ERR_ALLOC;
+ }
+
+ pClipCtxt->pPlaneYuv[1].u_height = pClipCtxt->pPlaneYuv[0].u_height >>1;
+ pClipCtxt->pPlaneYuv[1].u_width = pClipCtxt->pPlaneYuv[0].u_width >> 1;
+ pClipCtxt->pPlaneYuv[1].u_stride = pClipCtxt->pPlaneYuv[1].u_width;
+ pClipCtxt->pPlaneYuv[1].u_topleft = 0;
+ pClipCtxt->pPlaneYuv[1].pac_data = (M4VIFI_UInt8*)(
+ pClipCtxt->pPlaneYuv[0].pac_data +
+ pClipCtxt->pPlaneYuv[0].u_height * pClipCtxt->pPlaneYuv[0].u_width);
+
+ pClipCtxt->pPlaneYuv[2].u_height = pClipCtxt->pPlaneYuv[0].u_height >>1;
+ pClipCtxt->pPlaneYuv[2].u_width = pClipCtxt->pPlaneYuv[0].u_width >> 1;
+ pClipCtxt->pPlaneYuv[2].u_stride = pClipCtxt->pPlaneYuv[2].u_width;
+ pClipCtxt->pPlaneYuv[2].u_topleft = 0;
+ pClipCtxt->pPlaneYuv[2].pac_data = (M4VIFI_UInt8*)(
+ pClipCtxt->pPlaneYuv[1].pac_data +
+ pClipCtxt->pPlaneYuv[1].u_height * pClipCtxt->pPlaneYuv[1].u_width);
+
+ err = M4VSS3GPP_internalConvertAndResizeARGB8888toYUV420 (
+ pClipCtxt->pSettings->pFile,
+ pC->pOsaFileReadPtr,
+ pClipCtxt->pPlaneYuv,
+ pClipCtxt->pSettings->ClipProperties.uiStillPicWidth,
+ pClipCtxt->pSettings->ClipProperties.uiStillPicHeight);
+ if (M4NO_ERROR != err) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ free(pClipCtxt->pPlaneYuv);
+ return err;
+ }
+
+ // Set the YUV data to the decoder using setoption
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption (
+ pClipCtxt->pViDecCtxt,
+ M4DECODER_kOptionID_DecYuvData,
+ (M4OSA_DataOption)pClipCtxt->pPlaneYuv);
+ if (M4NO_ERROR != err) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ free(pClipCtxt->pPlaneYuv);
+ return err;
+ }
+
+ pClipCtxt->pSettings->ClipProperties.bSetImageData = M4OSA_TRUE;
+
+ // Allocate Yuv plane with effect
+ pClipCtxt->pPlaneYuvWithEffect =
+ (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(
+ 3*sizeof(M4VIFI_ImagePlane), M4VS,
+ (M4OSA_Char*)"pPlaneYuvWithEffect");
+ if (pClipCtxt->pPlaneYuvWithEffect == M4OSA_NULL) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ free(pClipCtxt->pPlaneYuv);
+ return M4ERR_ALLOC;
+ }
+
+ pClipCtxt->pPlaneYuvWithEffect[0].u_height = pC->ewc.uiVideoHeight;
+ pClipCtxt->pPlaneYuvWithEffect[0].u_width = pC->ewc.uiVideoWidth;
+ pClipCtxt->pPlaneYuvWithEffect[0].u_stride = pC->ewc.uiVideoWidth;
+ pClipCtxt->pPlaneYuvWithEffect[0].u_topleft = 0;
+
+ pClipCtxt->pPlaneYuvWithEffect[0].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
+ pC->ewc.uiVideoHeight * pC->ewc.uiVideoWidth * 1.5,
+ M4VS, (M4OSA_Char*)"imageClip YUV data");
+ if (pClipCtxt->pPlaneYuvWithEffect[0].pac_data == M4OSA_NULL) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ free(pClipCtxt->pPlaneYuv);
+ free(pClipCtxt->pPlaneYuvWithEffect);
+ return M4ERR_ALLOC;
+ }
+
+ pClipCtxt->pPlaneYuvWithEffect[1].u_height =
+ pClipCtxt->pPlaneYuvWithEffect[0].u_height >>1;
+ pClipCtxt->pPlaneYuvWithEffect[1].u_width =
+ pClipCtxt->pPlaneYuvWithEffect[0].u_width >> 1;
+ pClipCtxt->pPlaneYuvWithEffect[1].u_stride =
+ pClipCtxt->pPlaneYuvWithEffect[1].u_width;
+ pClipCtxt->pPlaneYuvWithEffect[1].u_topleft = 0;
+ pClipCtxt->pPlaneYuvWithEffect[1].pac_data = (M4VIFI_UInt8*)(
+ pClipCtxt->pPlaneYuvWithEffect[0].pac_data +
+ pClipCtxt->pPlaneYuvWithEffect[0].u_height * pClipCtxt->pPlaneYuvWithEffect[0].u_width);
+
+ pClipCtxt->pPlaneYuvWithEffect[2].u_height =
+ pClipCtxt->pPlaneYuvWithEffect[0].u_height >>1;
+ pClipCtxt->pPlaneYuvWithEffect[2].u_width =
+ pClipCtxt->pPlaneYuvWithEffect[0].u_width >> 1;
+ pClipCtxt->pPlaneYuvWithEffect[2].u_stride =
+ pClipCtxt->pPlaneYuvWithEffect[2].u_width;
+ pClipCtxt->pPlaneYuvWithEffect[2].u_topleft = 0;
+ pClipCtxt->pPlaneYuvWithEffect[2].pac_data = (M4VIFI_UInt8*)(
+ pClipCtxt->pPlaneYuvWithEffect[1].pac_data +
+ pClipCtxt->pPlaneYuvWithEffect[1].u_height * pClipCtxt->pPlaneYuvWithEffect[1].u_width);
+
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_YuvWithEffectContiguous,
+ (M4OSA_DataOption)pClipCtxt->pPlaneYuvWithEffect);
+ if (M4NO_ERROR != err) {
+ free(pClipCtxt->pPlaneYuv[0].pac_data);
+ free(pClipCtxt->pPlaneYuv);
+ free(pClipCtxt->pPlaneYuvWithEffect);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4VSS3GPP_intRenderFrameWithEffect(M4VSS3GPP_InternalEditContext *pC,
+ M4VSS3GPP_ClipContext* pClipCtxt,
+ M4_MediaTime ts,
+ M4OSA_Bool bIsClip1,
+ M4VIFI_ImagePlane *pResizePlane,
+ M4VIFI_ImagePlane *pPlaneNoResize,
+ M4VIFI_ImagePlane *pPlaneOut) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt8 numEffects = 0;
+ M4VIFI_ImagePlane *pDecoderRenderFrame = M4OSA_NULL;
+ M4OSA_UInt32 yuvFrameWidth = 0, yuvFrameHeight = 0;
+ M4VIFI_ImagePlane* pTmp = M4OSA_NULL;
+ M4VIFI_ImagePlane pTemp[3];
+ M4OSA_UInt8 i = 0;
+ M4OSA_Bool bSkipFramingEffect = M4OSA_FALSE;
+
+ memset((void *)pTemp, 0, 3*sizeof(M4VIFI_ImagePlane));
+ /* Resize or rotate case */
+ if (M4OSA_NULL != pClipCtxt->m_pPreResizeFrame) {
+ /**
+ * If we do modify the image, we need an intermediate image plane */
+ err = M4VSS3GPP_intAllocateYUV420(pResizePlane,
+ pClipCtxt->m_pPreResizeFrame[0].u_width,
+ pClipCtxt->m_pPreResizeFrame[0].u_height);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ M4VSS3GPP_intAllocateYUV420 returns 0x%x", err);
+ return err;
+ }
+
+ if ((pClipCtxt->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (pC->nbActiveEffects == 0) &&
+ (pClipCtxt->bGetYuvDataFromDecoder == M4OSA_FALSE)) {
+
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt,
+ M4DECODER_kOptionID_EnableYuvWithEffect,
+ (M4OSA_DataOption)M4OSA_TRUE);
+ if (M4NO_ERROR == err) {
+ pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pClipCtxt->pViDecCtxt, &ts,
+ pClipCtxt->pPlaneYuvWithEffect, M4OSA_TRUE);
+ }
+
+ } else {
+ if (pClipCtxt->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) {
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt,
+ M4DECODER_kOptionID_EnableYuvWithEffect,
+ (M4OSA_DataOption)M4OSA_FALSE);
+ }
+ if (M4NO_ERROR == err) {
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pClipCtxt->pViDecCtxt, &ts,
+ pClipCtxt->m_pPreResizeFrame, M4OSA_TRUE);
+ }
+
+ }
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ returns error 0x%x", err);
+ return err;
+ }
+
+ if (pClipCtxt->pSettings->FileType !=
+ M4VIDEOEDITING_kFileType_ARGB8888) {
+ if (0 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees) {
+ // Save width and height of un-rotated frame
+ yuvFrameWidth = pClipCtxt->m_pPreResizeFrame[0].u_width;
+ yuvFrameHeight = pClipCtxt->m_pPreResizeFrame[0].u_height;
+ err = M4VSS3GPP_intRotateVideo(pClipCtxt->m_pPreResizeFrame,
+ pClipCtxt->pSettings->ClipProperties.videoRotationDegrees);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ rotateVideo() returns error 0x%x", err);
+ return err;
+ }
+ /* Set the new video size for temporary buffer */
+ M4VSS3GPP_intSetYUV420Plane(pResizePlane,
+ pClipCtxt->m_pPreResizeFrame[0].u_width,
+ pClipCtxt->m_pPreResizeFrame[0].u_height);
+ }
+ }
+
+ if (bIsClip1 == M4OSA_TRUE) {
+ pC->bIssecondClip = M4OSA_FALSE;
+ numEffects = pC->nbActiveEffects;
+ } else {
+ numEffects = pC->nbActiveEffects1;
+ pC->bIssecondClip = M4OSA_TRUE;
+ }
+
+ if ( numEffects > 0) {
+ pClipCtxt->bGetYuvDataFromDecoder = M4OSA_TRUE;
+ /* If video frame need to be resized or rotated,
+ * then apply the overlay after the frame was rendered with rendering mode.
+ * Here skip the framing(overlay) effect when applying video Effect. */
+ bSkipFramingEffect = M4OSA_TRUE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC,
+ pClipCtxt->m_pPreResizeFrame, pResizePlane, bSkipFramingEffect);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ M4VSS3GPP_intApplyVideoEffect() err 0x%x", err);
+ return err;
+ }
+ pDecoderRenderFrame= pResizePlane;
+ } else {
+ pDecoderRenderFrame = pClipCtxt->m_pPreResizeFrame;
+ }
+ /* Do rendering mode */
+ if ((pClipCtxt->bGetYuvDataFromDecoder == M4OSA_TRUE) ||
+ (pClipCtxt->pSettings->FileType !=
+ M4VIDEOEDITING_kFileType_ARGB8888)) {
+ if (bIsClip1 == M4OSA_TRUE) {
+ if (pC->bClip1ActiveFramingEffect == M4OSA_TRUE) {
+ err = M4VSS3GPP_intAllocateYUV420(pTemp,
+ pPlaneOut[0].u_width, pPlaneOut[0].u_height);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pTmp = pTemp;
+ } else {
+ pTmp = pC->yuv1;
+ }
+ err = M4VSS3GPP_intApplyRenderingMode (pC,
+ pClipCtxt->pSettings->xVSS.MediaRendering,
+ pDecoderRenderFrame,pTmp);
+ } else {
+ if (pC->bClip2ActiveFramingEffect == M4OSA_TRUE) {
+ err = M4VSS3GPP_intAllocateYUV420(pTemp,
+ pPlaneOut[0].u_width, pPlaneOut[0].u_height);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intAllocateYUV420 error 0x%x", err);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
+ pTmp = pTemp;
+ } else {
+ pTmp = pC->yuv2;
+ }
+ err = M4VSS3GPP_intApplyRenderingMode (pC,
+ pClipCtxt->pSettings->xVSS.MediaRendering,
+ pDecoderRenderFrame,pTmp);
+ }
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ M4VSS3GPP_intApplyRenderingMode error 0x%x ", err);
+ for (i=0; i<3; i++) {
+ if (pTemp[i].pac_data != M4OSA_NULL) {
+ free(pTemp[i].pac_data);
+ pTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ return err;
+ }
+ /* Apply overlay if overlay exist*/
+ if (bIsClip1 == M4OSA_TRUE) {
+ if (pC->bClip1ActiveFramingEffect == M4OSA_TRUE) {
+ err = M4VSS3GPP_intApplyVideoOverlay(pC,
+ pTemp, pC->yuv1);
+ }
+ pClipCtxt->lastDecodedPlane = pC->yuv1;
+ } else {
+ if (pC->bClip2ActiveFramingEffect == M4OSA_TRUE) {
+ err = M4VSS3GPP_intApplyVideoOverlay(pC,
+ pTemp, pC->yuv2);
+ }
+ pClipCtxt->lastDecodedPlane = pC->yuv2;
+ }
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intVPP: \
+ M4VSS3GPP_intApplyVideoOverlay) error 0x%x ", err);
+ pC->ewc.VppError = err;
+ for (i=0; i<3; i++) {
+ if (pTemp[i].pac_data != M4OSA_NULL) {
+ free(pTemp[i].pac_data);
+ pTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+ return M4NO_ERROR;
+ }
+ } else {
+ pClipCtxt->lastDecodedPlane = pClipCtxt->pPlaneYuvWithEffect;
+ }
+ // free the temp buffer
+ for (i=0; i<3; i++) {
+ if (pTemp[i].pac_data != M4OSA_NULL) {
+ free(pTemp[i].pac_data);
+ pTemp[i].pac_data = M4OSA_NULL;
+ }
+ }
+
+ if ((pClipCtxt->pSettings->FileType ==
+ M4VIDEOEDITING_kFileType_ARGB8888) &&
+ (pC->nbActiveEffects == 0) &&
+ (pClipCtxt->bGetYuvDataFromDecoder == M4OSA_TRUE)) {
+ if (bIsClip1 == M4OSA_TRUE) {
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt,
+ M4DECODER_kOptionID_YuvWithEffectNonContiguous,
+ (M4OSA_DataOption)pC->yuv1);
+ } else {
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+ pClipCtxt->pViDecCtxt,
+ M4DECODER_kOptionID_YuvWithEffectNonContiguous,
+ (M4OSA_DataOption)pC->yuv2);
+ }
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ null decoder setOption error 0x%x ", err);
+ return err;
+ }
+ pClipCtxt->bGetYuvDataFromDecoder = M4OSA_FALSE;
+ }
+
+ // Reset original width and height for resize frame plane
+ if (0 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees &&
+ 180 != pClipCtxt->pSettings->ClipProperties.videoRotationDegrees) {
+
+ M4VSS3GPP_intSetYUV420Plane(pClipCtxt->m_pPreResizeFrame,
+ yuvFrameWidth, yuvFrameHeight);
+ }
+
+ } else {
+ /* No rotate or no resize case*/
+ if (bIsClip1 == M4OSA_TRUE) {
+ numEffects = pC->nbActiveEffects;
+ } else {
+ numEffects = pC->nbActiveEffects1;
+ }
+
+ if(numEffects > 0) {
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pClipCtxt->pViDecCtxt, &ts, pPlaneNoResize, M4OSA_TRUE);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ Render returns error 0x%x", err);
+ return err;
+ }
+
+ bSkipFramingEffect = M4OSA_FALSE;
+ if (bIsClip1 == M4OSA_TRUE) {
+ pC->bIssecondClip = M4OSA_FALSE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC, pPlaneNoResize,
+ pC->yuv1, bSkipFramingEffect);
+ pClipCtxt->lastDecodedPlane = pC->yuv1;
+ } else {
+ pC->bIssecondClip = M4OSA_TRUE;
+ err = M4VSS3GPP_intApplyVideoEffect(pC, pPlaneNoResize,
+ pC->yuv2, bSkipFramingEffect);
+ pClipCtxt->lastDecodedPlane = pC->yuv2;
+ }
+
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ M4VSS3GPP_intApplyVideoEffect error 0x%x", err);
+ return err;
+ }
+ } else {
+
+ if (bIsClip1 == M4OSA_TRUE) {
+ pTmp = pC->yuv1;
+ } else {
+ pTmp = pC->yuv2;
+ }
+ err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctRender(
+ pClipCtxt->pViDecCtxt, &ts, pTmp, M4OSA_TRUE);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_1("M4VSS3GPP_intRenderFrameWithEffect: \
+ Render returns error 0x%x,", err);
+ return err;
+ }
+ pClipCtxt->lastDecodedPlane = pTmp;
+ }
+ pClipCtxt->iVideoRenderCts = (M4OSA_Int32)ts;
+ }
+
+ return err;
+}
+
+M4OSA_ERR M4VSS3GPP_intRotateVideo(M4VIFI_ImagePlane* pPlaneIn,
+ M4OSA_UInt32 rotationDegree) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIFI_ImagePlane outPlane[3];
+
+ if (rotationDegree != 180) {
+ // Swap width and height of in plane
+ outPlane[0].u_width = pPlaneIn[0].u_height;
+ outPlane[0].u_height = pPlaneIn[0].u_width;
+ outPlane[0].u_stride = outPlane[0].u_width;
+ outPlane[0].u_topleft = 0;
+ outPlane[0].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
+ (outPlane[0].u_stride*outPlane[0].u_height), M4VS,
+ (M4OSA_Char*)("out Y plane for rotation"));
+ if (outPlane[0].pac_data == M4OSA_NULL) {
+ return M4ERR_ALLOC;
+ }
+
+ outPlane[1].u_width = pPlaneIn[0].u_height/2;
+ outPlane[1].u_height = pPlaneIn[0].u_width/2;
+ outPlane[1].u_stride = outPlane[1].u_width;
+ outPlane[1].u_topleft = 0;
+ outPlane[1].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
+ (outPlane[1].u_stride*outPlane[1].u_height), M4VS,
+ (M4OSA_Char*)("out U plane for rotation"));
+ if (outPlane[1].pac_data == M4OSA_NULL) {
+ free((void *)outPlane[0].pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ outPlane[2].u_width = pPlaneIn[0].u_height/2;
+ outPlane[2].u_height = pPlaneIn[0].u_width/2;
+ outPlane[2].u_stride = outPlane[2].u_width;
+ outPlane[2].u_topleft = 0;
+ outPlane[2].pac_data = (M4OSA_UInt8 *)M4OSA_32bitAlignedMalloc(
+ (outPlane[2].u_stride*outPlane[2].u_height), M4VS,
+ (M4OSA_Char*)("out V plane for rotation"));
+ if (outPlane[2].pac_data == M4OSA_NULL) {
+ free((void *)outPlane[0].pac_data);
+ free((void *)outPlane[1].pac_data);
+ return M4ERR_ALLOC;
+ }
+ }
+
+ switch(rotationDegree) {
+ case 90:
+ M4VIFI_Rotate90RightYUV420toYUV420(M4OSA_NULL, pPlaneIn, outPlane);
+ break;
+
+ case 180:
+ // In plane rotation, so planeOut = planeIn
+ M4VIFI_Rotate180YUV420toYUV420(M4OSA_NULL, pPlaneIn, pPlaneIn);
+ break;
+
+ case 270:
+ M4VIFI_Rotate90LeftYUV420toYUV420(M4OSA_NULL, pPlaneIn, outPlane);
+ break;
+
+ default:
+ M4OSA_TRACE1_1("invalid rotation param %d", (int)rotationDegree);
+ err = M4ERR_PARAMETER;
+ break;
+ }
+
+ if (rotationDegree != 180) {
+ memset((void *)pPlaneIn[0].pac_data, 0,
+ (pPlaneIn[0].u_width*pPlaneIn[0].u_height));
+ memset((void *)pPlaneIn[1].pac_data, 0,
+ (pPlaneIn[1].u_width*pPlaneIn[1].u_height));
+ memset((void *)pPlaneIn[2].pac_data, 0,
+ (pPlaneIn[2].u_width*pPlaneIn[2].u_height));
+ // Copy Y, U and V planes
+ memcpy((void *)pPlaneIn[0].pac_data, (void *)outPlane[0].pac_data,
+ (pPlaneIn[0].u_width*pPlaneIn[0].u_height));
+ memcpy((void *)pPlaneIn[1].pac_data, (void *)outPlane[1].pac_data,
+ (pPlaneIn[1].u_width*pPlaneIn[1].u_height));
+ memcpy((void *)pPlaneIn[2].pac_data, (void *)outPlane[2].pac_data,
+ (pPlaneIn[2].u_width*pPlaneIn[2].u_height));
+
+ free((void *)outPlane[0].pac_data);
+ free((void *)outPlane[1].pac_data);
+ free((void *)outPlane[2].pac_data);
+
+ // Swap the width and height of the in plane
+ uint32_t temp = 0;
+ temp = pPlaneIn[0].u_width;
+ pPlaneIn[0].u_width = pPlaneIn[0].u_height;
+ pPlaneIn[0].u_height = temp;
+ pPlaneIn[0].u_stride = pPlaneIn[0].u_width;
+
+ temp = pPlaneIn[1].u_width;
+ pPlaneIn[1].u_width = pPlaneIn[1].u_height;
+ pPlaneIn[1].u_height = temp;
+ pPlaneIn[1].u_stride = pPlaneIn[1].u_width;
+
+ temp = pPlaneIn[2].u_width;
+ pPlaneIn[2].u_width = pPlaneIn[2].u_height;
+ pPlaneIn[2].u_height = temp;
+ pPlaneIn[2].u_stride = pPlaneIn[2].u_width;
+ }
+
+ return err;
+}
+
+M4OSA_ERR M4VSS3GPP_intSetYUV420Plane(M4VIFI_ImagePlane* planeIn,
+ M4OSA_UInt32 width, M4OSA_UInt32 height) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ if (planeIn == M4OSA_NULL) {
+ M4OSA_TRACE1_0("NULL in plane, error");
+ return M4ERR_PARAMETER;
+ }
+
+ planeIn[0].u_width = width;
+ planeIn[0].u_height = height;
+ planeIn[0].u_stride = planeIn[0].u_width;
+
+ planeIn[1].u_width = width/2;
+ planeIn[1].u_height = height/2;
+ planeIn[1].u_stride = planeIn[1].u_width;
+
+ planeIn[2].u_width = width/2;
+ planeIn[2].u_height = height/2;
+ planeIn[2].u_stride = planeIn[1].u_width;
+
+ return err;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
new file mode 100755
index 0000000..f30f705
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file M4VSS3GPP_MediaAndCodecSubscription.c
+ * @brief Media readers and codecs subscription
+ * @note This file implements the subscription of supported media
+ * readers and decoders for the VSS. Potential support can
+ * be activated or de-activated
+ * using compilation flags set in the projects settings.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+
+#include "M4OSA_Debug.h"
+#include "M4VSS3GPP_InternalTypes.h" /**< Include for VSS specific types */
+#include "M4VSS3GPP_InternalFunctions.h" /**< Registration module */
+
+/* _______________________ */
+/*| |*/
+/*| reader subscription |*/
+/*|_______________________|*/
+
+/* Reader registration : at least one reader must be defined */
+#ifndef M4VSS_SUPPORT_READER_3GP
+#ifndef M4VSS_SUPPORT_READER_AMR
+#ifndef M4VSS_SUPPORT_READER_MP3
+#ifndef M4VSS_SUPPORT_READER_PCM
+#ifndef M4VSS_SUPPORT_AUDEC_NULL
+#error "no reader registered"
+#endif /* M4VSS_SUPPORT_AUDEC_NULL */
+#endif /* M4VSS_SUPPORT_READER_PCM */
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+#endif /* M4VSS_SUPPORT_READER_AMR */
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+/* There must be at least one MPEG4 decoder */
+#if !defined(M4VSS_SUPPORT_VIDEC_3GP) && !defined(M4VSS_ENABLE_EXTERNAL_DECODERS)
+#error "Wait, what?"
+/* "Hey, this is the VSS3GPP speaking. Pray tell, how the heck do you expect me to be able to do
+any editing without a built-in video decoder, nor the possibility to receive an external one?!
+Seriously, I'd love to know." */
+#endif
+
+/* Include files for each reader to subscribe */
+#ifdef M4VSS_SUPPORT_READER_3GP
+#include "VideoEditor3gpReader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_AMR
+#include "M4READER_Amr.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_MP3
+#include "VideoEditorMp3Reader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_PCM
+#include "M4READER_Pcm.h"
+#endif
+
+
+/* ______________________________ */
+/*| |*/
+/*| audio decoder subscription |*/
+/*|______________________________|*/
+
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorVideoDecoder.h"
+#include "M4DECODER_Null.h"
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+#include "M4AD_Null.h"
+#endif
+
+/* _______________________ */
+/*| |*/
+/*| writer subscription |*/
+/*|_______________________|*/
+
+/* Writer registration : at least one writer must be defined */
+//#ifndef M4VSS_SUPPORT_WRITER_AMR
+#ifndef M4VSS_SUPPORT_WRITER_3GPP
+#error "no writer registered"
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+//#endif /* M4VSS_SUPPORT_WRITER_AMR */
+
+/* Include files for each writer to subscribe */
+//#ifdef M4VSS_SUPPORT_WRITER_AMR
+/*extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
+M4WRITER_GlobalInterface** SrcGlobalInterface,
+M4WRITER_DataInterface** SrcDataInterface);*/
+//#endif
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
+ M4WRITER_GlobalInterface** SrcGlobalInterface,
+ M4WRITER_DataInterface** SrcDataInterface);
+#endif
+
+/* ______________________________ */
+/*| |*/
+/*| video encoder subscription |*/
+/*|______________________________|*/
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorVideoEncoder.h"
+
+
+/* ______________________________ */
+/*| |*/
+/*| audio encoder subscription |*/
+/*|______________________________|*/
+
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL)\
+ return ((M4OSA_ERR)(retval));
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
+ * @brief This function registers the reader, decoders, writers and encoders
+ * in the VSS.
+ * @note
+ * @param pContext: (IN) Execution context.
+ * @return M4NO_ERROR: there is no error
+ * @return M4ERR_PARAMETER pContext is NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4READER_MediaType readerMediaType;
+ M4READER_GlobalInterface* pReaderGlobalInterface;
+ M4READER_DataInterface* pReaderDataInterface;
+
+ M4WRITER_OutputFileType writerMediaType;
+ M4WRITER_GlobalInterface* pWriterGlobalInterface;
+ M4WRITER_DataInterface* pWriterDataInterface;
+
+ M4AD_Type audioDecoderType;
+ M4ENCODER_AudioFormat audioCodecType;
+ M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
+ M4AD_Interface* pAudioDecoderInterface;
+
+ M4DECODER_VideoType videoDecoderType;
+ M4ENCODER_Format videoCodecType;
+ M4ENCODER_GlobalInterface* pVideoCodecInterface;
+ M4DECODER_VideoInterface* pVideoDecoderInterface;
+
+ M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+
+ /* _______________________ */
+ /*| |*/
+ /*| reader subscription |*/
+ /*|_______________________|*/
+
+ /* --- 3GP --- */
+
+#ifdef M4VSS_SUPPORT_READER_3GP
+ err = VideoEditor3gpReader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GP reader");
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+ /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+ err = M4READER_AMR_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register AMR reader");
+#endif /* M4VSS_SUPPORT_READER_AMR */
+
+ /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_READER_MP3
+ err = VideoEditorMp3Reader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 reader");
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+
+ /* --- PCM --- */
+
+#ifdef M4VSS_SUPPORT_READER_PCM
+ err = M4READER_PCM_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+ &pReaderDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4READER_PCM interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+ pReaderDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register PCM reader");
+#endif /* M4VSS_SUPPORT_READER_PCM */
+
+ /* ______________________________ */
+ /*| |*/
+ /*| video decoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- MPEG4 & H263 --- */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+ err = VideoEditorVideoDecoder_getInterface_MPEG4(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register MPEG4 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+ err = VideoEditorVideoDecoder_getInterface_H264(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4DECODER_H264 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register H264 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEC_NULL
+ err = M4DECODER_NULL_getInterface(
+ &videoDecoderType, &pVideoDecoderInterface);
+ if (M4NO_ERROR != err) {
+ M4OSA_TRACE1_0("M4VD NULL Decoder interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoDecoder(
+ pContext, videoDecoderType, pVideoDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err, "M4VSS3GPP_subscribeMediaAndCodec: \
+ can't register video NULL decoder");
+#endif
+ /* ______________________________ */
+ /*| |*/
+ /*| audio decoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- AMRNB --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+ err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType, &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4 AMRNB interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register AMRNB decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
+
+ /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+ err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType, &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4 AAC interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register AAC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AAC */
+
+ /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+ err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType, &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4 MP3 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_MP3 */
+
+
+ /* --- NULL --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+ err = M4AD_NULL_getInterface( &audioDecoderType, &pAudioDecoderInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AD NULL Decoder interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register EVRC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_NULL */
+
+ /* _______________________ */
+ /*| |*/
+ /*| writer subscription |*/
+ /*|_______________________|*/
+
+
+ /* --- 3GPP --- */
+
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+ /* retrieves the 3GPP writer media type and pointer to functions*/
+ err = M4WRITER_3GP_getInterfaces( &writerMediaType, &pWriterGlobalInterface,
+ &pWriterDataInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerWriter( pContext, writerMediaType, pWriterGlobalInterface,
+ pWriterDataInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GPP writer");
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+
+ /* ______________________________ */
+ /*| |*/
+ /*| video encoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- MPEG4 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ /* retrieves the MPEG4 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType, &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register video MPEG4 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+ /* --- H263 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+ /* retrieves the H263 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType, &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register video H263 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+ /* retrieves the H264 encoder type and pointer to functions*/
+ err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType, &pVideoCodecInterface,
+ M4ENCODER_OPEN_ADVANCED);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4VSS3GPP_subscribeMediaAndCodec: M4H264E interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register video H264 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+ /* ______________________________ */
+ /*| |*/
+ /*| audio encoder subscription |*/
+ /*|______________________________|*/
+
+ /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+ /* retrieves the AMR encoder type and pointer to functions*/
+ err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType, &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AMR interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AMR encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AMR */
+
+ /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+ /* retrieves the AAC encoder type and pointer to functions*/
+ err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType, &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4AAC interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AAC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AAC */
+
+ /* --- EVRC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_EVRC
+ /* retrieves the EVRC encoder type and pointer to functions*/
+ err = M4EVRC_getInterfaces( &audioCodecType, &pAudioCodecInterface);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_0("M4EVRC interface allocation error");
+ return err;
+ }
+ err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+ M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+ "M4VSS3GPP_subscribeMediaAndCodec: can't register audio EVRC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_EVRC */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+ pContext->bAllowFreeingOMXCodecInterface = M4OSA_TRUE; /* when NXP SW codecs are registered,
+ then allow unregistration*/
+#endif
+
+
+ return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4xVSS_API.c b/libvideoeditor/vss/src/M4xVSS_API.c
new file mode 100755
index 0000000..9f5410b
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_API.c
@@ -0,0 +1,6367 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4xVSS_API.c
+ * @brief API of eXtended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+
+/**
+ * OSAL main types and errors ***/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_CharStar.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+#include "M4DECODER_Common.h"
+#include "VideoEditorVideoDecoder.h"
+
+/**
+ * VSS 3GPP API definition */
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/*************************
+Begin of xVSS API
+ **************************/
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/* RC: to delete unecessary temp files on the fly */
+#include "M4VSS3GPP_InternalTypes.h"
+#include <utils/Log.h>
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* pParams)
+ * @brief This function initializes the xVSS
+ * @note Initializes the xVSS edit operation (allocates an execution context).
+ *
+ * @param pContext (OUT) Pointer on the xVSS edit context to allocate
+ * @param params (IN) Parameters mandatory for xVSS
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_Init( M4OSA_Context *pContext, M4xVSS_InitParams *pParams )
+{
+ M4xVSS_Context *xVSS_context;
+ M4OSA_UInt32 length = 0, i;
+
+ if( pParams == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Parameter structure for M4xVSS_Init function is NULL");
+ return M4ERR_PARAMETER;
+ }
+
+ if( pParams->pFileReadPtr == M4OSA_NULL
+ || pParams->pFileWritePtr == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "pFileReadPtr or pFileWritePtr in M4xVSS_InitParams structure is NULL");
+ return M4ERR_PARAMETER;
+ }
+
+ xVSS_context = (M4xVSS_Context *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_Context), M4VS,
+ (M4OSA_Char *)"Context of the xVSS layer");
+
+ if( xVSS_context == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+ return M4ERR_ALLOC;
+ }
+
+ /* Initialize file read/write functions pointers */
+ xVSS_context->pFileReadPtr = pParams->pFileReadPtr;
+ xVSS_context->pFileWritePtr = pParams->pFileWritePtr;
+
+ /*UTF Conversion support: copy conversion functions pointers and allocate the temporary
+ buffer*/
+ if( pParams->pConvFromUTF8Fct != M4OSA_NULL )
+ {
+ if( pParams->pConvToUTF8Fct != M4OSA_NULL )
+ {
+ xVSS_context->UTFConversionContext.pConvFromUTF8Fct =
+ pParams->pConvFromUTF8Fct;
+ xVSS_context->UTFConversionContext.pConvToUTF8Fct =
+ pParams->pConvToUTF8Fct;
+ xVSS_context->UTFConversionContext.m_TempOutConversionSize =
+ UTF_CONVERSION_BUFFER_SIZE;
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(UTF_CONVERSION_BUFFER_SIZE
+ * sizeof(M4OSA_UInt8),
+ M4VA, (M4OSA_Char *)"M4xVSS_Init: UTF conversion buffer");
+
+ if( M4OSA_NULL
+ == xVSS_context->UTFConversionContext.pTempOutConversionBuffer )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+ free(xVSS_context->pTempPath);
+ xVSS_context->pTempPath = M4OSA_NULL;
+ free(xVSS_context);
+ xVSS_context = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4xVSS_Init: one UTF conversion pointer is null and the other\
+ is not null");
+ free(xVSS_context->pTempPath);
+ xVSS_context->pTempPath = M4OSA_NULL;
+ free(xVSS_context);
+ xVSS_context = M4OSA_NULL;
+ return M4ERR_PARAMETER;
+ }
+ }
+ else
+ {
+ xVSS_context->UTFConversionContext.pConvFromUTF8Fct = M4OSA_NULL;
+ xVSS_context->UTFConversionContext.pConvToUTF8Fct = M4OSA_NULL;
+ xVSS_context->UTFConversionContext.m_TempOutConversionSize = 0;
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+ M4OSA_NULL;
+ }
+
+ if( pParams->pTempPath != M4OSA_NULL )
+ {
+ /*No need to convert into UTF8 as all input of xVSS are in UTF8
+ (the conversion customer format into UTF8
+ is done in VA/VAL)*/
+ xVSS_context->pTempPath =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(strlen(pParams->pTempPath) + 1,
+ M4VS, (M4OSA_Char *)"xVSS Path for temporary files");
+
+ if( xVSS_context->pTempPath == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pTempPath, (void *)pParams->pTempPath,
+ strlen(pParams->pTempPath) + 1);
+ /* TODO: Check that no previous xVSS temporary files are present ? */
+ }
+ else
+ {
+ M4OSA_TRACE1_0("Path for temporary files is NULL");
+ free(xVSS_context);
+ xVSS_context = M4OSA_NULL;
+ return M4ERR_PARAMETER;
+ }
+
+ xVSS_context->pSettings =
+ (M4VSS3GPP_EditSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EditSettings),
+ M4VS, (M4OSA_Char *)"Copy of VSS structure");
+
+ if( xVSS_context->pSettings == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+ free(xVSS_context->pTempPath);
+ xVSS_context->pTempPath = M4OSA_NULL;
+ free(xVSS_context);
+ xVSS_context = M4OSA_NULL;
+ return M4ERR_ALLOC;
+ }
+
+ /* Initialize pointers in pSettings */
+ xVSS_context->pSettings->pClipList = M4OSA_NULL;
+ xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+ xVSS_context->pSettings->Effects = M4OSA_NULL; /* RC */
+ xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+ /* This is used to know if the user has added or removed some medias */
+ xVSS_context->previousClipNumber = 0;
+
+ /* "State machine" */
+ xVSS_context->editingStep = 0;
+ xVSS_context->analyseStep = 0;
+
+ xVSS_context->pcmPreviewFile = M4OSA_NULL;
+
+ /* Initialize Pto3GPP and MCS lists */
+ xVSS_context->pMCSparamsList = M4OSA_NULL;
+ xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+ xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+ xVSS_context->pMCScurrentParams = M4OSA_NULL;
+
+ xVSS_context->tempFileIndex = 0;
+
+ xVSS_context->targetedBitrate = 0;
+
+ xVSS_context->targetedTimescale = 0;
+
+ xVSS_context->pAudioMixContext = M4OSA_NULL;
+ xVSS_context->pAudioMixSettings = M4OSA_NULL;
+
+ /*FB: initialize to avoid crash when error during the editing*/
+ xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+
+ /* Initialize state if all initializations are corrects */
+ xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+ /* initialize MCS context*/
+ xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+ *pContext = xVSS_context;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_ReduceTranscode
+ * @brief This function changes the given editing structure in order to
+ * minimize the transcoding time.
+ * @note The xVSS analyses this structure, and if needed, changes the
+ * output parameters (Video codec, video size, audio codec,
+ * audio nb of channels) to minimize the transcoding time.
+ *
+ * @param pContext (OUT) Pointer on the xVSS edit context to allocate
+ * @param pSettings (IN) Edition settings (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_ReduceTranscode( M4OSA_Context pContext,
+ M4VSS3GPP_EditSettings *pSettings )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIDEOEDITING_ClipProperties fileProperties;
+ M4OSA_UInt8 i, j;
+ M4OSA_Bool bAudioTransition = M4OSA_FALSE;
+ M4OSA_Bool bIsBGMReplace = M4OSA_FALSE;
+ M4OSA_Bool bFound;
+ M4OSA_UInt32 videoConfig[9] =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ /** Index <-> Video config **/
+ /* 0: H263 SQCIF */
+ /* 1: H263 QCIF */
+ /* 2: H263 CIF */
+ /* 3: MPEG4 SQCIF */
+ /* 4: MPEG4 QQVGA */
+ /* 5: MPEG4 QCIF */
+ /* 6: MPEG4 QVGA */
+ /* 7: MPEG4 CIF */
+ /* 8: MPEG4 VGA */
+ /****************************/
+ M4OSA_UInt32 audioConfig[3] =
+ {
+ 0, 0, 0
+ };
+ /** Index <-> Audio config **/
+ /* 0: AMR */
+ /* 1: AAC 16kHz mono */
+ /* 2: AAC 16kHz stereo */
+ /****************************/
+
+ /* Check state */
+ if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+ && xVSS_context->m_state != M4xVSS_kStateOpened )
+ {
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_ReduceTranscode function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /* Check number of clips */
+ if( pSettings->uiClipNumber == 0 )
+ {
+ M4OSA_TRACE1_0("The number of input clip must be greater than 0 !");
+ return M4ERR_PARAMETER;
+ }
+
+ /* Check if there is a background music, and if its audio will replace input clip audio */
+ if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100 )
+ {
+ bIsBGMReplace = M4OSA_TRUE;
+ }
+ }
+
+ /* Parse all clips, and give occurences of each combination */
+ for ( i = 0; i < pSettings->uiClipNumber; i++ )
+ {
+ /* We ignore JPG input files as they are always transcoded */
+ if( pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP )
+ {
+ /**
+ * UTF conversion: convert into the customer format*/
+ M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+ M4OSA_UInt32 ConvertedSize = 0;
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)pSettings->pClipList[i]->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &ConvertedSize);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_ReduceTranscode:\
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+ /**
+ * End of the utf conversion, now use the converted path*/
+ err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+ &fileProperties);
+
+ //err = M4xVSS_internalGetProperties(xVSS_context, pSettings->pClipList[i]->pFile,
+ // &fileProperties);
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+ err);
+ /* TODO: Translate error code of MCS to an xVSS error code ? */
+ return err;
+ }
+
+ /* Check best video settings */
+ if( fileProperties.uiVideoWidth == 128
+ && fileProperties.uiVideoHeight == 96 )
+ {
+ if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+ {
+ videoConfig[0] += fileProperties.uiClipVideoDuration;
+ }
+ else if( ( fileProperties.VideoStreamType
+ == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[3] += fileProperties.uiClipVideoDuration;
+ }
+ }
+ else if( fileProperties.uiVideoWidth == 160
+ && fileProperties.uiVideoHeight == 120 )
+ {
+ if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[4] += fileProperties.uiClipVideoDuration;
+ }
+ }
+ else if( fileProperties.uiVideoWidth == 176
+ && fileProperties.uiVideoHeight == 144 )
+ {
+ if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+ {
+ videoConfig[1] += fileProperties.uiClipVideoDuration;
+ }
+ else if( ( fileProperties.VideoStreamType
+ == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[5] += fileProperties.uiClipVideoDuration;
+ }
+ }
+ else if( fileProperties.uiVideoWidth == 320
+ && fileProperties.uiVideoHeight == 240 )
+ {
+ if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[6] += fileProperties.uiClipVideoDuration;
+ }
+ }
+ else if( fileProperties.uiVideoWidth == 352
+ && fileProperties.uiVideoHeight == 288 )
+ {
+ if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+ {
+ videoConfig[2] += fileProperties.uiClipVideoDuration;
+ }
+ else if( ( fileProperties.VideoStreamType
+ == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[7] += fileProperties.uiClipVideoDuration;
+ }
+ }
+ else if( fileProperties.uiVideoWidth == 640
+ && fileProperties.uiVideoHeight == 480 )
+ {
+ if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+ || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+ {
+ videoConfig[8] += fileProperties.uiClipVideoDuration;
+ }
+ }
+
+ /* If there is a BGM that replaces existing audio track, we do not care about
+ audio track as it will be replaced */
+ /* If not, we try to minimize audio reencoding */
+ if( bIsBGMReplace == M4OSA_FALSE )
+ {
+ if( fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC )
+ {
+ if( fileProperties.uiSamplingFrequency == 16000 && \
+ fileProperties.uiNbChannels == 1 )
+ {
+ audioConfig[1] += fileProperties.uiClipAudioDuration;
+ }
+ else if( fileProperties.uiSamplingFrequency == 16000 && \
+ fileProperties.uiNbChannels == 2 )
+ {
+ audioConfig[2] += fileProperties.uiClipAudioDuration;
+ }
+ }
+ else if( fileProperties.AudioStreamType
+ == M4VIDEOEDITING_kAMR_NB )
+ {
+ audioConfig[0] += fileProperties.uiClipAudioDuration;
+ }
+ }
+ }
+ }
+
+ /* Find best output video format (the most occuring combination) */
+ j = 0;
+ bFound = M4OSA_FALSE;
+
+ for ( i = 0; i < 9; i++ )
+ {
+ if( videoConfig[i] >= videoConfig[j] )
+ {
+ j = i;
+ bFound = M4OSA_TRUE;
+ }
+ }
+
+ if( bFound )
+ {
+ switch( j )
+ {
+ case 0:
+ pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+ break;
+
+ case 1:
+ pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+ break;
+
+ case 2:
+ pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+ break;
+
+ case 3:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+ break;
+
+ case 4:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQQVGA;
+ break;
+
+ case 5:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+ break;
+
+ case 6:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQVGA;
+ break;
+
+ case 7:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+ break;
+
+ case 8:
+ pSettings->xVSS.outputVideoFormat =
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+ pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kVGA;
+ break;
+ }
+ }
+
+ /* Find best output audio format (the most occuring combination) */
+ j = 0;
+ bFound = M4OSA_FALSE;
+
+ for ( i = 0; i < 3; i++ )
+ {
+ if( audioConfig[i] >= audioConfig[j] )
+ {
+ j = i;
+ bFound = M4OSA_TRUE;
+ }
+ }
+
+ if( bFound )
+ {
+ switch( j )
+ {
+ case 0:
+ pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+ pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+ break;
+
+ case 1:
+ pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+ pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+ break;
+
+ case 2:
+ pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+ pSettings->xVSS.bAudioMono = M4OSA_FALSE;
+ break;
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext,
+ * M4VSS3GPP_EditSettings* pSettings)
+ * @brief This function gives to the xVSS an editing structure
+ * @note The xVSS analyses this structure, and prepare edition
+ * This function must be called after M4xVSS_Init, after
+ * M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
+ * After this function, the user must call M4xVSS_Step until
+ * it returns another error than M4NO_ERROR.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pSettings (IN) Edition settings (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SendCommand( M4OSA_Context pContext,
+ M4VSS3GPP_EditSettings *pSettings )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_UInt8 i, j;
+ M4OSA_UInt8 nbOfSameClip = 0;
+ M4OSA_ERR err;
+ M4OSA_Bool isNewBGM = M4OSA_TRUE;
+ M4xVSS_Pto3GPP_params *pPto3GPP_last = M4OSA_NULL;
+ M4xVSS_MCS_params *pMCS_last = M4OSA_NULL;
+ M4OSA_UInt32 width, height, samplingFreq;
+ M4OSA_Bool bIsTranscoding = M4OSA_FALSE;
+ M4OSA_Int32 totalDuration;
+ M4OSA_UInt32 outputSamplingFrequency = 0;
+ M4OSA_UInt32 length = 0;
+ M4OSA_Int8 masterClip = -1;
+
+ i = 0;
+ /* Check state */
+ if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+ && xVSS_context->m_state != M4xVSS_kStateOpened )
+ {
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_SendCommand function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /* State is back to initialized to allow call of cleanup function in case of error */
+ xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+ /* Check if a previous sendCommand has been called */
+ if( xVSS_context->previousClipNumber != 0 )
+ {
+ M4OSA_UInt32 pCmpResult = 0;
+
+ /* Compare BGM input */
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL \
+ && pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ pCmpResult = strcmp((const char *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+ (const char *)pSettings->xVSS.pBGMtrack->pFile);
+
+ if( pCmpResult == 0 )
+ {
+ /* Check if audio output parameters have changed */
+ if( xVSS_context->pSettings->xVSS.outputAudioFormat ==
+ pSettings->xVSS.outputAudioFormat
+ && xVSS_context->pSettings->xVSS.bAudioMono
+ == pSettings->xVSS.bAudioMono )
+ {
+ /* It means that BGM is the same as before, so, no need to redecode it */
+ M4OSA_TRACE2_0(
+ "BGM is the same as before, nothing to decode");
+ isNewBGM = M4OSA_FALSE;
+ }
+ else
+ {
+ /* We need to unallocate PCM preview file path in internal context */
+ if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pcmPreviewFile);
+ xVSS_context->pcmPreviewFile = M4OSA_NULL;
+ }
+ }
+ }
+ else
+ {
+ /* We need to unallocate PCM preview file path in internal context */
+ if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pcmPreviewFile);
+ xVSS_context->pcmPreviewFile = M4OSA_NULL;
+ }
+ }
+ }
+
+ /* Check if output settings have changed */
+ if( xVSS_context->pSettings->xVSS.outputVideoSize
+ != pSettings->xVSS.outputVideoSize
+ || xVSS_context->pSettings->xVSS.outputVideoFormat
+ != pSettings->xVSS.outputVideoFormat
+ || xVSS_context->pSettings->xVSS.outputVideoProfile
+ != pSettings->xVSS.outputVideoProfile
+ || xVSS_context->pSettings->xVSS.outputVideoLevel
+ != pSettings->xVSS.outputVideoLevel
+ || xVSS_context->pSettings->xVSS.outputAudioFormat
+ != pSettings->xVSS.outputAudioFormat
+ || xVSS_context->pSettings->xVSS.bAudioMono
+ != pSettings->xVSS.bAudioMono
+ || xVSS_context->pSettings->xVSS.outputAudioSamplFreq
+ != pSettings->xVSS.outputAudioSamplFreq )
+ {
+ /* If it is the case, we can't reuse already transcoded/converted files */
+ /* so, we delete these files and remove them from chained list */
+ if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+ {
+ M4xVSS_Pto3GPP_params *pParams =
+ xVSS_context->pPTo3GPPparamsList;
+ M4xVSS_Pto3GPP_params *pParams_sauv;
+
+ while( pParams != M4OSA_NULL )
+ {
+ if( pParams->pFileIn != M4OSA_NULL )
+ {
+ free(pParams->pFileIn);
+ pParams->pFileIn = M4OSA_NULL;
+ }
+
+ if( pParams->pFileOut != M4OSA_NULL )
+ {
+ /* Delete temporary file */
+ remove((const char *)pParams->pFileOut);
+ free(pParams->pFileOut);
+ pParams->pFileOut = M4OSA_NULL;
+ }
+
+ if( pParams->pFileTemp != M4OSA_NULL )
+ {
+ /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+ remove((const char *)pParams->pFileTemp);
+ free(pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ pParams->pFileTemp = M4OSA_NULL;
+ }
+ pParams_sauv = pParams;
+ pParams = pParams->pNext;
+ free(pParams_sauv);
+ pParams_sauv = M4OSA_NULL;
+ }
+ xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+ }
+
+ if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+ {
+ M4xVSS_MCS_params *pParams = xVSS_context->pMCSparamsList;
+ M4xVSS_MCS_params *pParams_sauv;
+ M4xVSS_MCS_params *pParams_bgm = M4OSA_NULL;
+
+ while( pParams != M4OSA_NULL )
+ {
+ /* Here, we do not delete BGM */
+ if( pParams->isBGM != M4OSA_TRUE )
+ {
+ if( pParams->pFileIn != M4OSA_NULL )
+ {
+ free(pParams->pFileIn);
+ pParams->pFileIn = M4OSA_NULL;
+ }
+
+ if( pParams->pFileOut != M4OSA_NULL )
+ {
+ /* Delete temporary file */
+ remove((const char *)pParams->pFileOut);
+ free(pParams->pFileOut);
+ pParams->pFileOut = M4OSA_NULL;
+ }
+
+ if( pParams->pFileTemp != M4OSA_NULL )
+ {
+ /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+ remove((const char *)pParams->pFileTemp);
+ free(pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ pParams->pFileTemp = M4OSA_NULL;
+ }
+ pParams_sauv = pParams;
+ pParams = pParams->pNext;
+ free(pParams_sauv);
+ pParams_sauv = M4OSA_NULL;
+ }
+ else
+ {
+ pParams_bgm = pParams;
+ pParams = pParams->pNext;
+ /*PR P4ME00003182 initialize this pointer because the following params
+ element will be deallocated*/
+ if( pParams != M4OSA_NULL
+ && pParams->isBGM != M4OSA_TRUE )
+ {
+ pParams_bgm->pNext = M4OSA_NULL;
+ }
+ }
+ }
+ xVSS_context->pMCSparamsList = pParams_bgm;
+ }
+ /* Maybe need to implement framerate changing */
+ //xVSS_context->pSettings->videoFrameRate;
+ }
+
+ /* Unallocate previous xVSS_context->pSettings structure */
+ M4xVSS_freeSettings(xVSS_context->pSettings);
+
+ /*Unallocate output file path*/
+ if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->pOutputFile);
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ }
+ xVSS_context->pSettings->uiOutputPathSize = 0;
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+
+ /**********************************
+ Clips registering
+ **********************************/
+
+ /* Copy settings from user given structure to our "local" structure */
+ xVSS_context->pSettings->xVSS.outputVideoFormat =
+ pSettings->xVSS.outputVideoFormat;
+ xVSS_context->pSettings->xVSS.outputVideoProfile =
+ pSettings->xVSS.outputVideoProfile;
+ xVSS_context->pSettings->xVSS.outputVideoLevel =
+ pSettings->xVSS.outputVideoLevel;
+ xVSS_context->pSettings->xVSS.outputVideoSize =
+ pSettings->xVSS.outputVideoSize;
+ xVSS_context->pSettings->xVSS.outputAudioFormat =
+ pSettings->xVSS.outputAudioFormat;
+ xVSS_context->pSettings->xVSS.bAudioMono = pSettings->xVSS.bAudioMono;
+ xVSS_context->pSettings->xVSS.outputAudioSamplFreq =
+ pSettings->xVSS.outputAudioSamplFreq;
+ /*xVSS_context->pSettings->pOutputFile = pSettings->pOutputFile;*/
+ /*FB: VAL CR P4ME00003076
+ The output video and audio bitrate are given by the user in the edition settings structure*/
+ xVSS_context->pSettings->xVSS.outputVideoBitrate =
+ pSettings->xVSS.outputVideoBitrate;
+ xVSS_context->pSettings->xVSS.outputAudioBitrate =
+ pSettings->xVSS.outputAudioBitrate;
+ xVSS_context->pSettings->PTVolLevel = pSettings->PTVolLevel;
+
+ /*FB: bug fix if the output path is given in M4xVSS_sendCommand*/
+
+ if( pSettings->pOutputFile != M4OSA_NULL
+ && pSettings->uiOutputPathSize > 0 )
+ {
+ M4OSA_Void *pDecodedPath = pSettings->pOutputFile;
+ /*As all inputs of the xVSS are in UTF8, convert the output file path into the
+ customer format*/
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)pSettings->pOutputFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_SendCommand:\
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ pSettings->uiOutputPathSize = length;
+ }
+
+ xVSS_context->pSettings->pOutputFile = (M4OSA_Void *)M4OSA_32bitAlignedMalloc \
+ (pSettings->uiOutputPathSize + 1, M4VS,
+ (M4OSA_Char *)"output file path");
+
+ if( xVSS_context->pSettings->pOutputFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pSettings->pOutputFile,
+ (void *)pDecodedPath, pSettings->uiOutputPathSize + 1);
+ xVSS_context->pSettings->uiOutputPathSize = pSettings->uiOutputPathSize;
+ xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+ }
+ else
+ {
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ xVSS_context->pSettings->uiOutputPathSize = 0;
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ xVSS_context->pSettings->pTemporaryFile = pSettings->pTemporaryFile;
+ xVSS_context->pSettings->uiClipNumber = pSettings->uiClipNumber;
+ xVSS_context->pSettings->videoFrameRate = pSettings->videoFrameRate;
+ xVSS_context->pSettings->uiMasterClip =
+ 0; /* With VSS 2.0, this new param is mandatory */
+ xVSS_context->pSettings->xVSS.pTextRenderingFct =
+ pSettings->xVSS.pTextRenderingFct; /* CR text handling */
+ xVSS_context->pSettings->xVSS.outputFileSize =
+ pSettings->xVSS.outputFileSize;
+
+ if( pSettings->xVSS.outputFileSize != 0 \
+ && pSettings->xVSS.outputAudioFormat != M4VIDEOEDITING_kAMR_NB )
+ {
+ M4OSA_TRACE1_0("M4xVSS_SendCommand: Impossible to limit file\
+ size with other audio output than AAC");
+ return M4ERR_PARAMETER;
+ }
+ xVSS_context->nbStepTotal = 0;
+ xVSS_context->currentStep = 0;
+
+ if( xVSS_context->pSettings->xVSS.outputVideoFormat != M4VIDEOEDITING_kMPEG4
+ && xVSS_context->pSettings->xVSS.outputVideoFormat
+ != M4VIDEOEDITING_kH263
+ && xVSS_context->pSettings->xVSS.outputVideoFormat
+ != M4VIDEOEDITING_kH264 )
+ {
+ xVSS_context->pSettings->xVSS.outputVideoFormat =
+ M4VIDEOEDITING_kNoneVideo;
+ }
+
+ /* Get output width/height */
+ switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ width = 128;
+ height = 96;
+ break;
+
+ case M4VIDEOEDITING_kQQVGA:
+ width = 160;
+ height = 120;
+ break;
+
+ case M4VIDEOEDITING_kQCIF:
+ width = 176;
+ height = 144;
+ break;
+
+ case M4VIDEOEDITING_kQVGA:
+ width = 320;
+ height = 240;
+ break;
+
+ case M4VIDEOEDITING_kCIF:
+ width = 352;
+ height = 288;
+ break;
+
+ case M4VIDEOEDITING_kVGA:
+ width = 640;
+ height = 480;
+ break;
+ /* +PR LV5807 */
+ case M4VIDEOEDITING_kWVGA:
+ width = 800;
+ height = 480;
+ break;
+
+ case M4VIDEOEDITING_kNTSC:
+ width = 720;
+ height = 480;
+ break;
+ /* -PR LV5807 */
+ /* +CR Google */
+ case M4VIDEOEDITING_k640_360:
+ width = 640;
+ height = 360;
+ break;
+
+ case M4VIDEOEDITING_k854_480:
+
+ // StageFright encoders require %16 resolution
+
+ width = M4ENCODER_854_480_Width;
+
+ height = 480;
+ break;
+
+ case M4VIDEOEDITING_k1280_720:
+ width = 1280;
+ height = 720;
+ break;
+
+ case M4VIDEOEDITING_k1080_720:
+ // StageFright encoders require %16 resolution
+ width = M4ENCODER_1080_720_Width;
+ height = 720;
+ break;
+
+ case M4VIDEOEDITING_k960_720:
+ width = 960;
+ height = 720;
+ break;
+
+ case M4VIDEOEDITING_k1920_1080:
+ width = 1920;
+ height = M4ENCODER_1920_1080_Height;
+ break;
+
+ /* -CR Google */
+ default: /* If output video size is not given, we take QCIF size */
+ width = 176;
+ height = 144;
+ xVSS_context->pSettings->xVSS.outputVideoSize =
+ M4VIDEOEDITING_kQCIF;
+ break;
+ }
+
+ /* Get output Sampling frequency */
+ switch( xVSS_context->pSettings->xVSS.outputAudioSamplFreq )
+ {
+ case M4VIDEOEDITING_k8000_ASF:
+ samplingFreq = 8000;
+ break;
+
+ case M4VIDEOEDITING_k16000_ASF:
+ samplingFreq = 16000;
+ break;
+
+ case M4VIDEOEDITING_k22050_ASF:
+ samplingFreq = 22050;
+ break;
+
+ case M4VIDEOEDITING_k24000_ASF:
+ samplingFreq = 24000;
+ break;
+
+ case M4VIDEOEDITING_k32000_ASF:
+ samplingFreq = 32000;
+ break;
+
+ case M4VIDEOEDITING_k44100_ASF:
+ samplingFreq = 44100;
+ break;
+
+ case M4VIDEOEDITING_k48000_ASF:
+ samplingFreq = 48000;
+ break;
+
+ case M4VIDEOEDITING_kDefault_ASF:
+ default:
+ if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kAMR_NB )
+ {
+ samplingFreq = 8000;
+ }
+ else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kAAC )
+ {
+ samplingFreq = 16000;
+ }
+ else
+ {
+ samplingFreq = 0;
+ }
+ break;
+ }
+
+ /* Allocate clip/transitions if clip number is not null ... */
+ if( 0 < xVSS_context->pSettings->uiClipNumber )
+ {
+ if( xVSS_context->pSettings->pClipList != M4OSA_NULL )
+ {
+ free((xVSS_context->pSettings->pClipList));
+ xVSS_context->pSettings->pClipList = M4OSA_NULL;
+ }
+
+ if( xVSS_context->pSettings->pTransitionList != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->pTransitionList);
+ xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+ }
+
+ xVSS_context->pSettings->pClipList =
+ (M4VSS3GPP_ClipSettings ** )M4OSA_32bitAlignedMalloc \
+ (sizeof(M4VSS3GPP_ClipSettings *)*xVSS_context->pSettings->uiClipNumber,
+ M4VS, (M4OSA_Char *)"xVSS, copy of pClipList");
+
+ if( xVSS_context->pSettings->pClipList == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /* Set clip list to NULL */
+ memset((void *)xVSS_context->pSettings->pClipList,0,
+ sizeof(M4VSS3GPP_ClipSettings *)
+ *xVSS_context->pSettings->uiClipNumber);
+
+ if( xVSS_context->pSettings->uiClipNumber > 1 )
+ {
+ xVSS_context->pSettings->pTransitionList =
+ (M4VSS3GPP_TransitionSettings ** ) \
+ M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings *) \
+ *(xVSS_context->pSettings->uiClipNumber - 1), M4VS, (M4OSA_Char *) \
+ "xVSS, copy of pTransitionList");
+
+ if( xVSS_context->pSettings->pTransitionList == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /* Set transition list to NULL */
+ memset(
+ (void *)xVSS_context->pSettings->pTransitionList,0,
+ sizeof(M4VSS3GPP_TransitionSettings *)
+ *(xVSS_context->pSettings->uiClipNumber - 1));
+ }
+ else
+ {
+ xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+ }
+ }
+ /* else, there is a pb in the input settings structure */
+ else
+ {
+ M4OSA_TRACE1_0("No clip in this settings list !!");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_PARAMETER;
+ }
+
+ /* RC Allocate effects settings */
+ xVSS_context->pSettings->nbEffects = pSettings->nbEffects;
+
+ if( 0 < xVSS_context->pSettings->nbEffects )
+ {
+ xVSS_context->pSettings->Effects =
+ (M4VSS3GPP_EffectSettings *)M4OSA_32bitAlignedMalloc \
+ (xVSS_context->pSettings->nbEffects * sizeof(M4VSS3GPP_EffectSettings),
+ M4VS, (M4OSA_Char *)"effects settings");
+
+ if( xVSS_context->pSettings->Effects == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /*FB bug fix 19.03.2008: these pointers were not initialized -> crash when free*/
+ for ( i = 0; i < xVSS_context->pSettings->nbEffects; i++ )
+ {
+ xVSS_context->pSettings->Effects[i].xVSS.pFramingFilePath =
+ M4OSA_NULL;
+ xVSS_context->pSettings->Effects[i].xVSS.pFramingBuffer =
+ M4OSA_NULL;
+ xVSS_context->pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+ }
+ /**/
+ }
+
+ if( xVSS_context->targetedTimescale == 0 )
+ {
+ M4OSA_UInt32 pTargetedTimeScale = 0;
+
+ err = M4xVSS_internalGetTargetedTimeScale(xVSS_context, pSettings,
+ &pTargetedTimeScale);
+
+ if( M4NO_ERROR != err || pTargetedTimeScale == 0 )
+ {
+ M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalGetTargetedTimeScale\
+ returned 0x%x", err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return err;
+ }
+ xVSS_context->targetedTimescale = pTargetedTimeScale;
+ }
+
+ /* Initialize total duration variable */
+ totalDuration = 0;
+
+ /* Parsing list of clips given by application, and prepare analyzing */
+ for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+ {
+ /* Allocate current clip */
+ xVSS_context->pSettings->pClipList[i] =
+ (M4VSS3GPP_ClipSettings *)M4OSA_32bitAlignedMalloc \
+ (sizeof(M4VSS3GPP_ClipSettings), M4VS, (M4OSA_Char *)"clip settings");
+
+ if( xVSS_context->pSettings->pClipList[i] == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ /* Copy clip settings from given structure to our xVSS_context structure */
+ err =
+ M4xVSS_DuplicateClipSettings(xVSS_context->pSettings->pClipList[i],
+ pSettings->pClipList[i], M4OSA_TRUE);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_DuplicateClipSettings return error 0x%x",
+ err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return err;
+ }
+
+ xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
+ M4OSA_FALSE;
+
+ /* Because there is 1 less transition than clip number */
+ if( i < xVSS_context->pSettings->uiClipNumber - 1 )
+ {
+ xVSS_context->pSettings->pTransitionList[i] =
+ (M4VSS3GPP_TransitionSettings
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings),
+ M4VS, (M4OSA_Char *)"transition settings");
+
+ if( xVSS_context->pSettings->pTransitionList[i] == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ memcpy(
+ (void *)xVSS_context->pSettings->pTransitionList[i],
+ (void *)pSettings->pTransitionList[i],
+ sizeof(M4VSS3GPP_TransitionSettings));
+ /* Initialize external effect context to NULL, to know if input jpg has already been
+ decoded or not */
+ xVSS_context->pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+ switch( xVSS_context->pSettings->
+ pTransitionList[i]->VideoTransitionType )
+ {
+ /* If transition type is alpha magic, we need to decode input file */
+ case M4xVSS_kVideoTransitionType_AlphaMagic:
+ /* Allocate our alpha magic settings structure to have a copy of the
+ provided one */
+ xVSS_context->pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings =
+ (M4xVSS_AlphaMagicSettings *)M4OSA_32bitAlignedMalloc \
+ (sizeof(M4xVSS_AlphaMagicSettings), M4VS,
+ (M4OSA_Char *)"Input Alpha magic settings structure");
+
+ if( xVSS_context->pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /* Copy data from the provided alpha magic settings structure tou our
+ structure */
+ memcpy((void *)xVSS_context->pSettings->
+ pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings,
+ (void *)pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings,
+ sizeof(M4xVSS_AlphaMagicSettings));
+
+ /* Allocate our alpha magic input filename */
+ xVSS_context->pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath = M4OSA_32bitAlignedMalloc(
+ (strlen(pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath)
+ + 1), M4VS, (M4OSA_Char *)"Alpha magic file path");
+
+ if( xVSS_context->pSettings->pTransitionList[i]-> \
+ xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath
+ == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /* Copy data from the provided alpha magic filename to our */
+ M4OSA_chrNCopy(
+ xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath,
+ pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath, strlen(
+ pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath) + 1);
+
+ /* Parse all transition to know if the input jpg has already been decoded */
+ for ( j = 0; j < i; j++ )
+ {
+ if( xVSS_context->pSettings->
+ pTransitionList[j]->VideoTransitionType
+ == M4xVSS_kVideoTransitionType_AlphaMagic )
+ {
+ M4OSA_UInt32 pCmpResult = 0;
+ pCmpResult = strcmp((const char *)xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath, (const char *)xVSS_context->pSettings->
+ pTransitionList[j]->xVSS.
+ transitionSpecific.
+ pAlphaMagicSettings->pAlphaFilePath);
+
+ if( pCmpResult == 0 )
+ {
+ M4xVSS_internal_AlphaMagicSettings
+ *alphaSettings;
+
+ alphaSettings =
+ (M4xVSS_internal_AlphaMagicSettings
+ *)M4OSA_32bitAlignedMalloc(
+ sizeof(
+ M4xVSS_internal_AlphaMagicSettings),
+ M4VS,
+ (M4OSA_Char
+ *)
+ "Alpha magic settings structure 1");
+
+ if( alphaSettings == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send
+ command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ alphaSettings->pPlane =
+ ((M4xVSS_internal_AlphaMagicSettings *)(
+ xVSS_context->pSettings->
+ pTransitionList[j]->
+ pExtVideoTransitionFctCtxt))->
+ pPlane;
+
+ if( xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.transitionSpecific.
+ pAlphaMagicSettings->blendingPercent > 0
+ && xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.
+ pAlphaMagicSettings->blendingPercent
+ <= 100 )
+ {
+ alphaSettings->blendingthreshold =
+ ( xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.
+ pAlphaMagicSettings->
+ blendingPercent) * 255 / 200;
+ }
+ else
+ {
+ alphaSettings->blendingthreshold = 0;
+ }
+ alphaSettings->isreverse =
+ xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.
+ pAlphaMagicSettings->isreverse;
+ /* It means that the input jpg file for alpha magic has already
+ been decoded -> no nedd to decode it again */
+ if( alphaSettings->blendingthreshold == 0 )
+ {
+ xVSS_context->pSettings->
+ pTransitionList[i]->
+ ExtVideoTransitionFct =
+ M4xVSS_AlphaMagic;
+ }
+ else
+ {
+ xVSS_context->pSettings->
+ pTransitionList[i]->
+ ExtVideoTransitionFct =
+ M4xVSS_AlphaMagicBlending;
+ }
+ xVSS_context->pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt = alphaSettings;
+ break;
+ }
+ }
+ }
+
+ /* If the jpg has not been decoded yet ... */
+ if( xVSS_context->pSettings->
+ pTransitionList[i]->pExtVideoTransitionFctCtxt
+ == M4OSA_NULL )
+ {
+ M4VIFI_ImagePlane *outputPlane;
+ M4xVSS_internal_AlphaMagicSettings *alphaSettings;
+ /*UTF conversion support*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+ /*To support ARGB8888 : get the width and height */
+ M4OSA_UInt32 width_ARGB888 =
+ xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->width;
+ M4OSA_UInt32 height_ARGB888 =
+ xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->height;
+ M4OSA_TRACE1_1(
+ " TransitionListM4xVSS_SendCommand width State is %d",
+ width_ARGB888);
+ M4OSA_TRACE1_1(
+ " TransitionListM4xVSS_SendCommand height! State is %d",
+ height_ARGB888);
+ /* Allocate output plane */
+ outputPlane = (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(3
+ * sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char
+ *)
+ "Output plane for Alpha magic transition");
+
+ if( outputPlane == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ outputPlane[0].u_width = width;
+ outputPlane[0].u_height = height;
+ outputPlane[0].u_topleft = 0;
+ outputPlane[0].u_stride = width;
+ outputPlane[0].pac_data = (M4VIFI_UInt8
+ *)M4OSA_32bitAlignedMalloc(( width * height * 3)
+ >> 1,
+ M4VS,
+ (M4OSA_Char
+ *)
+ "Alloc for the Alpha magic pac_data output YUV");
+ ;
+
+ if( outputPlane[0].pac_data == M4OSA_NULL )
+ {
+ free(outputPlane);
+ outputPlane = M4OSA_NULL;
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ outputPlane[1].u_width = width >> 1;
+ outputPlane[1].u_height = height >> 1;
+ outputPlane[1].u_topleft = 0;
+ outputPlane[1].u_stride = width >> 1;
+ outputPlane[1].pac_data = outputPlane[0].pac_data
+ + outputPlane[0].u_width * outputPlane[0].u_height;
+ outputPlane[2].u_width = width >> 1;
+ outputPlane[2].u_height = height >> 1;
+ outputPlane[2].u_topleft = 0;
+ outputPlane[2].u_stride = width >> 1;
+ outputPlane[2].pac_data = outputPlane[1].pac_data
+ + outputPlane[1].u_width * outputPlane[1].u_height;
+
+ pDecodedPath =
+ xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ pAlphaFilePath;
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.
+ pAlphaMagicSettings->pAlphaFilePath,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.
+ pTempOutConversionBuffer;
+ }
+ /**
+ End of the conversion, use the decoded path*/
+ /*To support ARGB8888 : convert + resizing from ARGB8888 to yuv420 */
+
+ err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(
+ pDecodedPath,
+ xVSS_context->pFileReadPtr, outputPlane,
+ width_ARGB888, height_ARGB888);
+
+ if( err != M4NO_ERROR )
+ {
+ free(outputPlane[0].pac_data);
+ outputPlane[0].pac_data = M4OSA_NULL;
+ free(outputPlane);
+ outputPlane = M4OSA_NULL;
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: error when decoding alpha magic JPEG: 0x%x",
+ err);
+ return err;
+ }
+
+ /* Allocate alpha settings structure */
+ alphaSettings =
+ (M4xVSS_internal_AlphaMagicSettings *)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_internal_AlphaMagicSettings),
+ M4VS, (M4OSA_Char
+ *)"Alpha magic settings structure 2");
+
+ if( alphaSettings == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ alphaSettings->pPlane = outputPlane;
+
+ if( xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ blendingPercent > 0 && xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ blendingPercent <= 100 )
+ {
+ alphaSettings->blendingthreshold =
+ ( xVSS_context->pSettings->
+ pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ blendingPercent) * 255 / 200;
+ }
+ else
+ {
+ alphaSettings->blendingthreshold = 0;
+ }
+ alphaSettings->isreverse =
+ xVSS_context->pSettings->pTransitionList[i]->xVSS.
+ transitionSpecific.pAlphaMagicSettings->
+ isreverse;
+
+ if( alphaSettings->blendingthreshold == 0 )
+ {
+ xVSS_context->pSettings->pTransitionList[i]->
+ ExtVideoTransitionFct = M4xVSS_AlphaMagic;
+ }
+ else
+ {
+ xVSS_context->pSettings->pTransitionList[i]->
+ ExtVideoTransitionFct =
+ M4xVSS_AlphaMagicBlending;
+ }
+ xVSS_context->pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt = alphaSettings;
+ }
+
+ break;
+
+ case M4xVSS_kVideoTransitionType_SlideTransition:
+ {
+ M4xVSS_internal_SlideTransitionSettings *slideSettings;
+ slideSettings =
+ (M4xVSS_internal_SlideTransitionSettings *)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_internal_SlideTransitionSettings),
+ M4VS, (M4OSA_Char
+ *)"Internal slide transition settings");
+
+ if( M4OSA_NULL == slideSettings )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ /* Just copy the lone parameter from the input settings to the internal
+ context. */
+
+ slideSettings->direction =
+ pSettings->pTransitionList[i]->xVSS.transitionSpecific.
+ pSlideTransitionSettings->direction;
+
+ /* No need to keep our copy of the settings. */
+ xVSS_context->pSettings->pTransitionList[i]->
+ xVSS.transitionSpecific.pSlideTransitionSettings =
+ M4OSA_NULL;
+ xVSS_context->pSettings->pTransitionList[i]->
+ ExtVideoTransitionFct = &M4xVSS_SlideTransition;
+ xVSS_context->pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt = slideSettings;
+ }
+ break;
+
+ case M4xVSS_kVideoTransitionType_FadeBlack:
+ {
+ xVSS_context->pSettings->pTransitionList[i]->
+ ExtVideoTransitionFct = &M4xVSS_FadeBlackTransition;
+ }
+ break;
+
+ case M4xVSS_kVideoTransitionType_External:
+ {
+ xVSS_context->pSettings->pTransitionList[i]->
+ ExtVideoTransitionFct =
+ pSettings->pTransitionList[i]->ExtVideoTransitionFct;
+ xVSS_context->pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt =
+ pSettings->pTransitionList[i]->
+ pExtVideoTransitionFctCtxt;
+ xVSS_context->pSettings->pTransitionList[i]->
+ VideoTransitionType =
+ M4VSS3GPP_kVideoTransitionType_External;
+ }
+ break;
+
+ default:
+ break;
+ } // switch
+
+ /* Update total_duration with transition duration */
+ totalDuration -= xVSS_context->pSettings->
+ pTransitionList[i]->uiTransitionDuration;
+ }
+
+
+ if( xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_ARGB8888 )
+ {
+ if(M4OSA_TRUE ==
+ xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom) {
+ M4OSA_Char out_img[M4XVSS_MAX_PATH_LEN];
+ M4OSA_Char out_img_tmp[M4XVSS_MAX_PATH_LEN];
+ M4xVSS_Pto3GPP_params *pParams = M4OSA_NULL;
+ M4OSA_Context pARGBFileIn;
+ /*UTF conversion support*/
+ M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+
+ /* Parse Pto3GPP params chained list to know if input file has already been
+ converted */
+ if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+ {
+ M4OSA_UInt32 pCmpResult = 0;
+
+ pParams = xVSS_context->pPTo3GPPparamsList;
+ /* We parse all Pto3gpp Param chained list */
+ while( pParams != M4OSA_NULL )
+ {
+ pCmpResult = strcmp((const char *)pSettings->pClipList[i]->pFile,
+ (const char *)pParams->pFileIn);
+
+ if( pCmpResult == 0
+ && (pSettings->pClipList[i]->uiEndCutTime
+ == pParams->duration
+ || pSettings->pClipList[i]->xVSS.uiDuration
+ == pParams->duration)
+ && pSettings->pClipList[i]->xVSS.MediaRendering
+ == pParams->MediaRendering )
+
+
+
+ {
+ /* Replace JPG filename with existing 3GP filename */
+ goto replaceARGB_3GP;
+ }
+ /* We need to update this variable, in case some pictures have been
+ added between two */
+ /* calls to M4xVSS_sendCommand */
+ pPto3GPP_last = pParams;
+ pParams = pParams->pNext;
+ }
+ }
+
+ /* Construct output temporary 3GP filename */
+ err = M4OSA_chrSPrintf(out_img, M4XVSS_MAX_PATH_LEN - 1, (M4OSA_Char *)"%simg%d.3gp",
+ xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return err;
+ }
+
+ #ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+ err = M4OSA_chrSPrintf(out_img_tmp, M4XVSS_MAX_PATH_LEN - 1, "%simg%d.tmp",
+ xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return err;
+ }
+
+ #endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ xVSS_context->tempFileIndex++;
+
+ /* Allocate last element Pto3GPP params structure */
+ pParams = (M4xVSS_Pto3GPP_params
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_Pto3GPP_params),
+ M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
+
+ if( pParams == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ /* Initializes pfilexxx members of pParams to be able to free them correctly */
+ pParams->pFileIn = M4OSA_NULL;
+ pParams->pFileOut = M4OSA_NULL;
+ pParams->pFileTemp = M4OSA_NULL;
+ pParams->pNext = M4OSA_NULL;
+ pParams->MediaRendering = M4xVSS_kResizing;
+
+ /*To support ARGB8888 :get the width and height */
+ pParams->height = pSettings->pClipList[
+ i]->ClipProperties.uiStillPicHeight; //ARGB_Height;
+ pParams->width = pSettings->pClipList[
+ i]->ClipProperties.uiStillPicWidth; //ARGB_Width;
+ M4OSA_TRACE3_1("CLIP M4xVSS_SendCommand ARGB8888 H = %d", pParams->height);
+ M4OSA_TRACE3_1("CLIP M4xVSS_SendCommand ARGB8888 W = %d", pParams->width);
+
+ if( xVSS_context->pPTo3GPPparamsList
+ == M4OSA_NULL ) /* Means it is the first element of the list */
+ {
+ /* Initialize the xVSS context with the first element of the list */
+ xVSS_context->pPTo3GPPparamsList = pParams;
+
+ /* Save this element in case of other file to convert */
+ pPto3GPP_last = pParams;
+ }
+ else
+ {
+ /* Update next pointer of the previous last element of the chain */
+ pPto3GPP_last->pNext = pParams;
+
+ /* Update save of last element of the chain */
+ pPto3GPP_last = pParams;
+ }
+
+ /* Fill the last M4xVSS_Pto3GPP_params element */
+ pParams->duration =
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+ /* If duration is filled, let's use it instead of EndCutTime */
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+ {
+ pParams->duration =
+ xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+ }
+
+ pParams->InputFileType = M4VIDEOEDITING_kFileType_ARGB8888;
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+ length = strlen(pDecodedPath);
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+ *)xVSS_context->pSettings->pClipList[i]->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileIn = (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
+ (M4OSA_Char *)"Pto3GPP Params: file in");
+
+ if( pParams->pFileIn == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
+ (length + 1)); /* Copy input file path */
+
+ /* Check that JPG file is present on the FS (P4ME00002974) by just opening
+ and closing it */
+ err =
+ xVSS_context->pFileReadPtr->openRead(&pARGBFileIn, pDecodedPath,
+ M4OSA_kFileRead);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
+ pDecodedPath, err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ err = xVSS_context->pFileReadPtr->closeRead(pARGBFileIn);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
+ pDecodedPath, err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = out_img;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileOut = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"Pto3GPP Params: file out");
+
+ if( pParams->pFileOut == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileOut, (void *)pDecodedPath,
+ (length + 1)); /* Copy output file path */
+
+ #ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+
+ pDecodedPath = out_img_tmp;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8\
+ returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileTemp = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"Pto3GPP Params: file temp");
+
+ if( pParams->pFileTemp == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileTemp, (void *)pDecodedPath,
+ (length + 1)); /* Copy temporary file path */
+
+ #endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ /* Fill PanAndZoom settings if needed */
+
+ if( M4OSA_TRUE
+ == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
+ {
+ pParams->isPanZoom =
+ xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
+ /* Check that Pan & Zoom parameters are corrects */
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 1000
+ || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
+ <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+ PanZoomTopleftXa > 1000
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ PanZoomTopleftYa > 1000
+ || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+ > 1000
+ || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+ <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+ PanZoomTopleftXb > 1000
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ PanZoomTopleftYb > 1000)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ pParams->PanZoomXa =
+ xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
+ pParams->PanZoomTopleftXa =
+ xVSS_context->pSettings->
+ pClipList[i]->xVSS.PanZoomTopleftXa;
+ pParams->PanZoomTopleftYa =
+ xVSS_context->pSettings->
+ pClipList[i]->xVSS.PanZoomTopleftYa;
+ pParams->PanZoomXb =
+ xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
+ pParams->PanZoomTopleftXb =
+ xVSS_context->pSettings->
+ pClipList[i]->xVSS.PanZoomTopleftXb;
+ pParams->PanZoomTopleftYb =
+ xVSS_context->pSettings->
+ pClipList[i]->xVSS.PanZoomTopleftYb;
+ }
+ else
+ {
+ pParams->isPanZoom = M4OSA_FALSE;
+ }
+ /*+ PR No: blrnxpsw#223*/
+ /*Intializing the Video Frame Rate as it may not be intialized*/
+ /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
+ M4xVSS_internalStartConvertPictureTo3gp*/
+ switch( xVSS_context->pSettings->videoFrameRate )
+ {
+ case M4VIDEOEDITING_k30_FPS:
+ pParams->framerate = 33;
+ break;
+
+ case M4VIDEOEDITING_k25_FPS:
+ pParams->framerate = 40;
+ break;
+
+ case M4VIDEOEDITING_k20_FPS:
+ pParams->framerate = 50;
+ break;
+
+ case M4VIDEOEDITING_k15_FPS:
+ pParams->framerate = 66;
+ break;
+
+ case M4VIDEOEDITING_k12_5_FPS:
+ pParams->framerate = 80;
+ break;
+
+ case M4VIDEOEDITING_k10_FPS:
+ pParams->framerate = 100;
+ break;
+
+ case M4VIDEOEDITING_k7_5_FPS:
+ pParams->framerate = 133;
+ break;
+
+ case M4VIDEOEDITING_k5_FPS:
+ pParams->framerate = 200;
+ break;
+
+ default:
+ /*Making Default Frame Rate @ 15 FPS*/
+ pParams->framerate = 66;
+ break;
+ }
+ /*-PR No: blrnxpsw#223*/
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+ == M4xVSS_kCropping
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ MediaRendering == M4xVSS_kBlackBorders
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ MediaRendering == M4xVSS_kResizing )
+ {
+ pParams->MediaRendering =
+ xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
+ }
+
+ pParams->pNext = M4OSA_NULL;
+ pParams->isCreated = M4OSA_FALSE;
+ xVSS_context->nbStepTotal++;
+ /* Set bTranscodingRequired to TRUE to indicate the kenburn video has
+ * been generated in analysis phase, and does not need to be tanscoded again
+ * in saving phase */
+ xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
+ M4OSA_TRUE;
+
+ replaceARGB_3GP:
+ /* Update total duration */
+ totalDuration += pParams->duration;
+
+ /* Replacing in VSS structure the JPG file by the 3gp file */
+ xVSS_context->pSettings->pClipList[i]->FileType =
+ M4VIDEOEDITING_kFileType_3GPP;
+
+ if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->pClipList[i]->pFile);
+ xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+ }
+
+ /**
+ * UTF conversion: convert into UTF8, before being used*/
+ pDecodedPath = pParams->pFileOut;
+
+ if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertToUTF8(xVSS_context,
+ (M4OSA_Void *)pParams->pFileOut,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: \
+ 0x%x",err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+ else
+ {
+ length = strlen(pDecodedPath);
+ }
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_32bitAlignedMalloc((length
+ + 1), M4VS, (M4OSA_Char *)"xVSS file path of ARGB to 3gp");
+
+ if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pSettings->pClipList[i]->pFile,
+ (void *)pDecodedPath, (length + 1));
+ /*FB: add file path size because of UTF16 conversion*/
+ xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+ }
+ }
+ /************************
+ 3GP input file type case
+ *************************/
+ else if( xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_3GPP
+ || xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_MP4
+ || xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_M4V )
+ {
+ /*UTF conversion support*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+ /* Need to call MCS in case 3GP video/audio types are not compatible
+ (H263/MPEG4 or AMRNB/AAC) */
+ /* => Need to fill MCS_Params structure with the right parameters ! */
+ /* Need also to parse MCS params struct to check if file has already been transcoded */
+
+ M4VIDEOEDITING_ClipProperties fileProperties;
+ M4xVSS_MCS_params *pParams;
+ M4OSA_Bool audioIsDifferent = M4OSA_FALSE;
+ M4OSA_Bool videoIsDifferent = M4OSA_FALSE;
+ M4OSA_Bool bAudioMono;
+ /* Initialize file properties structure */
+
+ memset((void *) &fileProperties,0,
+ sizeof(M4VIDEOEDITING_ClipProperties));
+
+ //fileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+
+ /* Prevent from bad initializing of percentage cut time */
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent
+ > 100 || xVSS_context->pSettings->pClipList[i]->xVSS.
+ uiBeginCutPercent > 100 )
+ {
+ /* These percentage cut time have probably not been initialized */
+ /* Let's not use them by setting them to 0 */
+ xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent = 0;
+ xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent =
+ 0;
+ }
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+ *)xVSS_context->pSettings->pClipList[i]->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+ &fileProperties);
+
+ if( err != M4NO_ERROR )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+ err);
+ /* TODO: Translate error code of MCS to an xVSS error code */
+ return err;
+ }
+
+ /* Parse MCS params chained list to know if input file has already been converted */
+ if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+ {
+ M4OSA_UInt32 pCmpResult = 0;
+
+ pParams = xVSS_context->pMCSparamsList;
+ /* We parse all MCS Param chained list */
+ while( pParams != M4OSA_NULL )
+ {
+
+ /**
+ * UTF conversion: convert into UTF8, before being used*/
+ pDecodedPath = pParams->pFileIn;
+
+ if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertToUTF8(xVSS_context,
+ (M4OSA_Void *)pParams->pFileIn,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err:\
+ 0x%x", err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pCmpResult = strcmp((const char *)pSettings->pClipList[i]->pFile,
+ (const char *)pDecodedPath);
+
+ /* If input filenames are the same, and if this is not a BGM, we can reuse
+ the transcoded file */
+ if( pCmpResult == 0 && pParams->isBGM == M4OSA_FALSE
+ && pParams->BeginCutTime
+ == pSettings->pClipList[i]->uiBeginCutTime
+ && (pParams->EndCutTime
+ == pSettings->pClipList[i]->uiEndCutTime
+ || pParams->EndCutTime
+ == pSettings->pClipList[i]->uiBeginCutTime
+ + pSettings->pClipList[i]->xVSS.uiDuration)
+ && pSettings->pClipList[i]->xVSS.MediaRendering
+ == pParams->MediaRendering )
+ {
+ if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+ || (pParams->OutputAudioFormat
+ == M4VIDEOEDITING_kNullAudio
+ && fileProperties.AudioStreamType
+ == pSettings->xVSS.outputAudioFormat)
+ || pParams->OutputAudioFormat
+ == pSettings->xVSS.outputAudioFormat
+ || fileProperties.AudioStreamType
+ == M4VIDEOEDITING_kNoneAudio )
+ {
+ /* Replace 3GP filename with transcoded 3GP filename */
+ goto replace3GP_3GP;
+ }
+ }
+ else if( ( pParams->OutputAudioFormat
+ == M4VIDEOEDITING_kNullAudio
+ && fileProperties.AudioStreamType
+ == pSettings->xVSS.outputAudioFormat)
+ || pParams->OutputAudioFormat
+ == pSettings->xVSS.outputAudioFormat
+ || fileProperties.AudioStreamType
+ == M4VIDEOEDITING_kNoneAudio )
+ {
+ /* Replace 3GP filename with transcoded 3GP filename */
+ goto replace3GP_3GP;
+ }
+ }
+
+ /* We need to update this variable, in case some 3GP files have been added
+ between two */
+ /* calls to M4xVSS_sendCommand */
+ pMCS_last = pParams;
+ pParams = pParams->pNext;
+ }
+ }
+
+ /* If we have percentage information let's use it... */
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent != 0
+ || xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent
+ != 0 )
+ {
+ /* If percentage information are not correct and if duration field is not filled */
+ if( ( xVSS_context->pSettings->pClipList[i]->xVSS.
+ uiEndCutPercent
+ <= xVSS_context->pSettings->pClipList[i]->xVSS.
+ uiBeginCutPercent)
+ && xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration
+ == 0 )
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Bad percentage for begin and end cut time !");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ /* We transform the percentage into absolute time */
+ xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+ = (M4OSA_UInt32)(
+ xVSS_context->pSettings->pClipList[i]->xVSS.
+ uiBeginCutPercent
+ * fileProperties.uiClipDuration / 100);
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+ = (M4OSA_UInt32)(
+ xVSS_context->pSettings->pClipList[i]->xVSS.
+ uiEndCutPercent
+ * fileProperties.uiClipDuration / 100);
+ }
+ /* ...Otherwise, we use absolute time. */
+ else
+ {
+ /* If endCutTime == 0, it means all the file is taken. Let's change to the file
+ duration, to accurate preview. */
+ if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime == 0
+ || xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+ > fileProperties.uiClipDuration )
+ {
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+ fileProperties.uiClipDuration;
+ }
+ }
+
+ /* If duration field is filled, it has priority on other fields on EndCutTime,
+ so let's use it */
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+ {
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+ xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+ +xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+
+ if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+ > fileProperties.uiClipDuration )
+ {
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+ fileProperties.uiClipDuration;
+ }
+ }
+
+ /* If output video format is not set, we take video format of the first 3GP video */
+ if( xVSS_context->pSettings->xVSS.outputVideoFormat
+ == M4VIDEOEDITING_kNoneVideo )
+ {
+ //xVSS_context->pSettings->xVSS.outputVideoFormat = fileProperties.VideoStreamType;
+ //M4OSA_TRACE2_1("Output video format is not set, set it to current clip: %d",
+ // xVSS_context->pSettings->xVSS.outputVideoFormat);
+ M4OSA_TRACE1_0(
+ "Output video format is not set, an error parameter is returned.");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kNoneAudio )
+ {
+ //xVSS_context->pSettings->xVSS.outputAudioFormat = fileProperties.AudioStreamType;
+ M4OSA_TRACE2_1(
+ "Output audio format is not set -> remove audio track of clip: %d",
+ i);
+ }
+
+ if( fileProperties.uiNbChannels == 1 )
+ {
+ bAudioMono = M4OSA_TRUE;
+ }
+ else
+ {
+ bAudioMono = M4OSA_FALSE;
+ }
+
+ if( fileProperties.AudioStreamType
+ != xVSS_context->pSettings->xVSS.outputAudioFormat
+ || (fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
+ && (fileProperties.uiSamplingFrequency != samplingFreq
+ || bAudioMono
+ != xVSS_context->pSettings->xVSS.bAudioMono)) )
+ {
+ audioIsDifferent = M4OSA_TRUE;
+ /* If we want to replace audio, there is no need to transcode audio */
+ if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ /* temp fix :PT volume not herad in the second clip */
+ if( /*(pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+ && xVSS_context->pSettings->xVSS.outputFileSize == 0)
+ ||*/
+ fileProperties.AudioStreamType
+ == M4VIDEOEDITING_kNoneAudio ) /*11/12/2008 CR 3283 VAL for the MMS
+ use case, we need to transcode except the media without audio*/
+ {
+ audioIsDifferent = M4OSA_FALSE;
+ }
+ }
+ else if( fileProperties.AudioStreamType
+ == M4VIDEOEDITING_kNoneAudio )
+ {
+ audioIsDifferent = M4OSA_FALSE;
+ }
+ }
+ /* Here check the clip video profile and level, if it exceeds
+ * the profile and level of export file, then the file needs
+ * to be transcoded(do not do compress domain trim).
+ * Also for MPEG4 fomart, always do transcoding since HW encoder
+ * may use different time scale value than the input clip*/
+ if ((fileProperties.uiVideoProfile >
+ xVSS_context->pSettings->xVSS.outputVideoProfile) ||
+ (fileProperties.uiVideoLevel >
+ xVSS_context->pSettings->xVSS.outputVideoLevel) ||
+ (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)) {
+ /* Set bTranscodingRequired to TRUE to indicate the video will be
+ * transcoded in MCS. */
+ xVSS_context->pSettings->pClipList[i]->bTranscodingRequired =
+ M4OSA_TRUE;
+ videoIsDifferent = M4OSA_TRUE;
+ }
+
+ if( videoIsDifferent == M4OSA_TRUE || audioIsDifferent == M4OSA_TRUE)
+ {
+ M4OSA_Char out_3gp[M4XVSS_MAX_PATH_LEN];
+ M4OSA_Char out_3gp_tmp[M4XVSS_MAX_PATH_LEN];
+
+ /* Construct output temporary 3GP filename */
+ err = M4OSA_chrSPrintf(out_3gp, M4XVSS_MAX_PATH_LEN - 1, (M4OSA_Char *)"%svid%d.3gp",
+ xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+ return err;
+ }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+ err = M4OSA_chrSPrintf(out_3gp_tmp, M4XVSS_MAX_PATH_LEN - 1, "%svid%d.tmp",
+ xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+ return err;
+ }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ xVSS_context->tempFileIndex++;
+
+ pParams =
+ (M4xVSS_MCS_params *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_MCS_params),
+ M4VS, (M4OSA_Char *)"Element of MCS Params (for 3GP)");
+
+ if( pParams == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ pParams->MediaRendering = M4xVSS_kResizing;
+ pParams->videoclipnumber = i; // Indicates video clip index
+
+ if( xVSS_context->pMCSparamsList
+ == M4OSA_NULL ) /* Means it is the first element of the list */
+ {
+ /* Initialize the xVSS context with the first element of the list */
+ xVSS_context->pMCSparamsList = pParams;
+ }
+ else
+ {
+ /* Update next pointer of the previous last element of the chain */
+ pMCS_last->pNext = pParams;
+ }
+
+ /* Save this element in case of other file to convert */
+ pMCS_last = pParams;
+
+ /* Fill the last M4xVSS_MCS_params element */
+ pParams->InputFileType = M4VIDEOEDITING_kFileType_3GPP;
+ pParams->OutputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+ pParams->OutputVideoTimescale = xVSS_context->targetedTimescale;
+
+ /* We do not need to reencode video if its parameters do not differ */
+ /* from output settings parameters */
+ if( videoIsDifferent == M4OSA_TRUE )
+ {
+ pParams->OutputVideoFormat =
+ xVSS_context->pSettings->xVSS.outputVideoFormat;
+ pParams->outputVideoProfile =
+ xVSS_context->pSettings->xVSS.outputVideoProfile;
+ pParams->outputVideoLevel =
+ xVSS_context->pSettings->xVSS.outputVideoLevel;
+ pParams->OutputVideoFrameRate =
+ xVSS_context->pSettings->videoFrameRate;
+ pParams->OutputVideoFrameSize =
+ xVSS_context->pSettings->xVSS.outputVideoSize;
+
+ /*FB: VAL CR P4ME00003076
+ The output video bitrate is now directly given by the user in the edition
+ settings structure If the bitrate given by the user is irrelevant
+ (the MCS minimum and maximum video bitrate are used),
+ the output video bitrate is hardcoded according to the output video size*/
+ if( xVSS_context->pSettings->xVSS.outputVideoBitrate
+ >= M4VIDEOEDITING_k16_KBPS
+ && xVSS_context->pSettings->xVSS.outputVideoBitrate
+ <= M4VIDEOEDITING_k8_MBPS ) /*+ New Encoder bitrates */
+ {
+ pParams->OutputVideoBitrate =
+ xVSS_context->pSettings->xVSS.outputVideoBitrate;
+ }
+ else
+ {
+ switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k48_KBPS;
+ break;
+
+ case M4VIDEOEDITING_kQQVGA:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+
+ case M4VIDEOEDITING_kQCIF:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k128_KBPS;
+ break;
+
+ case M4VIDEOEDITING_kQVGA:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k384_KBPS;
+ break;
+
+ case M4VIDEOEDITING_kCIF:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k384_KBPS;
+ break;
+
+ case M4VIDEOEDITING_kVGA:
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k512_KBPS;
+ break;
+
+ default: /* Should not happen !! */
+ pParams->OutputVideoBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+ }
+ }
+ }
+ else
+ {
+ pParams->outputVideoProfile =
+ xVSS_context->pSettings->xVSS.outputVideoProfile;
+ pParams->outputVideoLevel =
+ xVSS_context->pSettings->xVSS.outputVideoLevel;
+ pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+ pParams->OutputVideoFrameRate =
+ M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise, MCS returns an error */
+ }
+
+ if( audioIsDifferent == M4OSA_TRUE )
+ {
+ pParams->OutputAudioFormat =
+ xVSS_context->pSettings->xVSS.outputAudioFormat;
+
+ switch( xVSS_context->pSettings->xVSS.outputAudioFormat )
+ {
+ case M4VIDEOEDITING_kNoneAudio:
+ break;
+
+ case M4VIDEOEDITING_kAMR_NB:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k12_2_KBPS;
+ pParams->bAudioMono = M4OSA_TRUE;
+ pParams->OutputAudioSamplingFrequency =
+ M4VIDEOEDITING_kDefault_ASF;
+ break;
+
+ case M4VIDEOEDITING_kAAC:
+ {
+ /*FB: VAL CR P4ME00003076
+ The output audio bitrate in the AAC case is now directly given by
+ the user in the edition settings structure
+ If the bitrate given by the user is irrelevant or undefined
+ (the MCS minimum and maximum audio bitrate are used),
+ the output audio bitrate is hard coded according to the output
+ audio sampling frequency*/
+
+ /*Check if the audio bitrate is correctly defined*/
+
+ /*Mono
+ MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+ if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+ >= M4VIDEOEDITING_k16_KBPS
+ && xVSS_context->pSettings->
+ xVSS.outputAudioBitrate
+ <= M4VIDEOEDITING_k192_KBPS
+ && xVSS_context->pSettings->xVSS.bAudioMono
+ == M4OSA_TRUE )
+ {
+ pParams->OutputAudioBitrate =
+ xVSS_context->pSettings->
+ xVSS.outputAudioBitrate;
+ }
+ /*Stereo
+ MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+ else if( xVSS_context->pSettings->
+ xVSS.outputAudioBitrate
+ >= M4VIDEOEDITING_k32_KBPS
+ && xVSS_context->pSettings->
+ xVSS.outputAudioBitrate
+ <= M4VIDEOEDITING_k192_KBPS
+ && xVSS_context->pSettings->xVSS.bAudioMono
+ == M4OSA_FALSE )
+ {
+ pParams->OutputAudioBitrate =
+ xVSS_context->pSettings->
+ xVSS.outputAudioBitrate;
+ }
+
+ /*The audio bitrate is hard coded according to the output audio
+ sampling frequency*/
+ else
+ {
+ switch( xVSS_context->pSettings->
+ xVSS.outputAudioSamplFreq )
+ {
+ case M4VIDEOEDITING_k16000_ASF:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k24_KBPS;
+ break;
+
+ case M4VIDEOEDITING_k22050_ASF:
+ case M4VIDEOEDITING_k24000_ASF:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k32_KBPS;
+ break;
+
+ case M4VIDEOEDITING_k32000_ASF:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k48_KBPS;
+ break;
+
+ case M4VIDEOEDITING_k44100_ASF:
+ case M4VIDEOEDITING_k48000_ASF:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+
+ default:
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k64_KBPS;
+ break;
+ }
+
+ if( xVSS_context->pSettings->xVSS.bAudioMono
+ == M4OSA_FALSE )
+ {
+ /* Output bitrate have to be doubled */
+ pParams->OutputAudioBitrate +=
+ pParams->OutputAudioBitrate;
+ }
+ }
+
+ pParams->bAudioMono =
+ xVSS_context->pSettings->xVSS.bAudioMono;
+
+ if( xVSS_context->pSettings->
+ xVSS.outputAudioSamplFreq
+ == M4VIDEOEDITING_k8000_ASF )
+ {
+ /* Prevent from unallowed sampling frequencies */
+ pParams->OutputAudioSamplingFrequency =
+ M4VIDEOEDITING_kDefault_ASF;
+ }
+ else
+ {
+ pParams->OutputAudioSamplingFrequency =
+ xVSS_context->pSettings->
+ xVSS.outputAudioSamplFreq;
+ }
+ break;
+ }
+
+ default: /* Should not happen !! */
+ pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+ pParams->OutputAudioBitrate =
+ M4VIDEOEDITING_k12_2_KBPS;
+ pParams->bAudioMono = M4OSA_TRUE;
+ pParams->OutputAudioSamplingFrequency =
+ M4VIDEOEDITING_kDefault_ASF;
+ break;
+ }
+ }
+ else
+ {
+ pParams->OutputAudioFormat = M4VIDEOEDITING_kNullAudio;
+ }
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)xVSS_context->pSettings->
+ pClipList[i]->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileIn =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"MCS 3GP Params: file in");
+
+ if( pParams->pFileIn == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
+ (length + 1)); /* Copy input file path */
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = out_3gp;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileOut =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"MCS 3GP Params: file out");
+
+ if( pParams->pFileOut == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileOut, (void *)pDecodedPath,
+ (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+
+ pDecodedPath = out_3gp_tmp;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_3gp_tmp,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileTemp =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"MCS 3GP Params: file temp");
+
+ if( pParams->pFileTemp == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileTemp, (void *)pDecodedPath,
+ (length + 1)); /* Copy temporary file path */
+
+#else
+
+ pParams->pFileTemp = M4OSA_NULL;
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+ /*FB 2008/10/20 keep media aspect ratio, add media rendering parameter*/
+
+ if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+ == M4xVSS_kCropping
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ MediaRendering == M4xVSS_kBlackBorders
+ || xVSS_context->pSettings->pClipList[i]->xVSS.
+ MediaRendering == M4xVSS_kResizing )
+ {
+ pParams->MediaRendering =
+ xVSS_context->pSettings->pClipList[i]->xVSS.
+ MediaRendering;
+ }
+
+ /*FB: transcoding per parts*/
+ pParams->BeginCutTime =
+ xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+ pParams->EndCutTime =
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+
+ pParams->pNext = M4OSA_NULL;
+ pParams->isBGM = M4OSA_FALSE;
+ pParams->isCreated = M4OSA_FALSE;
+ xVSS_context->nbStepTotal++;
+ bIsTranscoding = M4OSA_TRUE;
+
+replace3GP_3GP:
+ /* Update total duration */
+ totalDuration +=
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+ - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+
+ /* Replacing in VSS structure the original 3GP file by the transcoded 3GP file */
+ xVSS_context->pSettings->pClipList[i]->FileType =
+ M4VIDEOEDITING_kFileType_3GPP;
+
+ if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->pClipList[i]->pFile);
+ xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+ }
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = pParams->pFileOut;
+
+ if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertToUTF8(xVSS_context,
+ (M4OSA_Void *)pParams->pFileOut,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+ else
+ {
+ length = strlen(pDecodedPath);
+ }
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_32bitAlignedMalloc(
+ (length + 1),
+ M4VS, (M4OSA_Char *)"xVSS file path of 3gp to 3gp");
+
+ if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pSettings->pClipList[i]->pFile,
+ (void *)pDecodedPath, (length + 1));
+ /*FB: add file path size because of UTF 16 conversion*/
+ xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+
+ /* We define master clip as first 3GP input clip */
+ /*if(xVSS_context->pSettings->uiMasterClip == 0 && fileProperties.
+ AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+ {
+ xVSS_context->pSettings->uiMasterClip = i;
+ }*/
+ }
+ else
+ {
+ /* Update total duration */
+ totalDuration +=
+ xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+ - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+ }
+ /* We define master clip as first 3GP input clip */
+ if( masterClip == -1
+ && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
+ {
+ masterClip = i;
+ xVSS_context->pSettings->uiMasterClip = i;
+ }
+
+ }
+ /**************************
+ Other input file type case
+ ***************************/
+ else
+ {
+ M4OSA_TRACE1_0("Bad file type as input clip");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_PARAMETER;
+ }
+ }
+
+ /*********************************************************
+ * Parse all effects to make some adjustment for framing, *
+ * text and to transform relative time into absolute time *
+ **********************************************************/
+ for ( j = 0; j < xVSS_context->pSettings->nbEffects; j++ )
+ {
+ /* Copy effect to "local" structure */
+ memcpy((void *) &(xVSS_context->pSettings->Effects[j]),
+ (void *) &(pSettings->Effects[j]),
+ sizeof(M4VSS3GPP_EffectSettings));
+
+ /* Prevent from bad initializing of effect percentage time */
+ if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent > 100
+ || xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent > 100 )
+ {
+ /* These percentage time have probably not been initialized */
+ /* Let's not use them by setting them to 0 */
+ xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent = 0;
+ xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent = 0;
+ }
+
+ /* If we have percentage information let's use it... Otherwise, we use absolute time. */
+ if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent != 0 )
+ {
+ xVSS_context->pSettings->
+ Effects[j].uiStartTime = (M4OSA_UInt32)(totalDuration
+ * xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent
+ / 100);
+ /* The percentage of effect duration is based on the duration of the clip -
+ start time */
+ xVSS_context->pSettings->
+ Effects[j].uiDuration = (M4OSA_UInt32)(totalDuration
+ * xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent
+ / 100);
+ }
+
+ /* If there is a framing effect, we need to allocate framing effect structure */
+ if( xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Framing )
+ {
+#ifdef DECODE_GIF_ON_SAVING
+
+ M4xVSS_FramingContext *framingCtx;
+ /*UTF conversion support*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#else
+
+ M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ M4OSA_Char *pExt2 = M4OSA_NULL;
+ M4VIFI_ImagePlane *pPlane =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+ M4OSA_Int32 result1, result2;
+
+ /* Copy framing file path */
+ if( pSettings->Effects[j].xVSS.pFramingFilePath != M4OSA_NULL )
+ {
+ xVSS_context->pSettings->
+ Effects[j].xVSS.pFramingFilePath = M4OSA_32bitAlignedMalloc(
+ strlen(pSettings->Effects[j].xVSS.pFramingFilePath)
+ + 1, M4VS, (M4OSA_Char *)"Local Framing file path");
+
+ if( xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath
+ == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pSettings->
+ Effects[j].xVSS.pFramingFilePath,
+ (void *)pSettings->
+ Effects[j].xVSS.pFramingFilePath, strlen(
+ pSettings->Effects[j].xVSS.pFramingFilePath) + 1);
+
+ pExt2 =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+ }
+
+#ifdef DECODE_GIF_ON_SAVING
+
+ framingCtx = (M4xVSS_FramingContext
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingContext),
+ M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+ if( framingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ framingCtx->aFramingCtx = M4OSA_NULL;
+ framingCtx->aFramingCtx_last = M4OSA_NULL;
+ framingCtx->pSPSContext = M4OSA_NULL;
+ framingCtx->outputVideoSize =
+ xVSS_context->pSettings->xVSS.outputVideoSize;
+ framingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+ framingCtx->bEffectResize =
+ xVSS_context->pSettings->Effects[j].xVSS.bResize;
+ framingCtx->pEffectFilePath =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+ framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+ framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+ framingCtx->effectDuration =
+ xVSS_context->pSettings->Effects[j].uiDuration;
+ framingCtx->b_IsFileGif = M4OSA_FALSE;
+ framingCtx->alphaBlendingStruct = M4OSA_NULL;
+ framingCtx->b_animated = M4OSA_FALSE;
+
+ /* Output ratio for the effect is stored in uiFiftiesOutFrameRate parameters of the
+ extended xVSS effects structure */
+ if( xVSS_context->pSettings->Effects[j].xVSS.uiFiftiesOutFrameRate
+ != 0 )
+ {
+ framingCtx->frameDurationRatio =
+ (M4OSA_Float)(( xVSS_context->pSettings->
+ Effects[j].xVSS.uiFiftiesOutFrameRate) / 1000.0);
+ }
+ else
+ {
+ framingCtx->frameDurationRatio = 1.0;
+ }
+
+ /*Alpha blending*/
+ /*Check if the alpha blending parameters are corrects*/
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+ || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 0 )
+ {
+ /*Allocate the alpha blending structure*/
+ framingCtx->alphaBlendingStruct =
+ (M4xVSS_internalEffectsAlphaBlending *)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_internalEffectsAlphaBlending),
+ M4VS, (M4OSA_Char *)"alpha blending structure");
+
+ if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_ALLOC;
+ }
+ /*Fill the alpha blending structure*/
+ framingCtx->alphaBlendingStruct->m_fadeInTime =
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+ framingCtx->alphaBlendingStruct->m_fadeOutTime =
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+ framingCtx->alphaBlendingStruct->m_end =
+ pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+ framingCtx->alphaBlendingStruct->m_middle =
+ pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+ framingCtx->alphaBlendingStruct->m_start =
+ pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+ + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+ > 100 )
+ {
+ framingCtx->alphaBlendingStruct->m_fadeOutTime =
+ 100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+ }
+ }
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)xVSS_context->pSettings->
+ Effects[j].xVSS.pFramingFilePath,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ framingCtx->pEffectFilePath = M4OSA_32bitAlignedMalloc(length + 1, M4VS,
+ (M4OSA_Char *)"Local Framing file path");
+
+ if( framingCtx->pEffectFilePath == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)framingCtx->pEffectFilePath,
+ (void *)pDecodedPath, length + 1);
+
+ /* Save framing structure associated with corresponding effect */
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ framingCtx;
+
+#else
+
+ framingCtx = (M4xVSS_FramingStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
+ M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+ if( framingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+ /* BugFix 1.2.0: Leak when decoding error */
+ framingCtx->FramingRgb = M4OSA_NULL;
+ framingCtx->FramingYuv = M4OSA_NULL;
+ framingCtx->pNext = framingCtx;
+ /* Save framing structure associated with corresponding effect */
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ if( pExt2 != M4OSA_NULL )
+ {
+ /* Decode the image associated to the effect, and fill framing structure */
+ pExt2 += (strlen((const char *)pExt2) - 4);
+
+ result1 = strcmp((const char *)pExt2,(const char *)".rgb");
+ result2 = strcmp((const char *)pExt2,(const char *)".RGB");
+
+ if( 0 == result1 || 0 == result2 )
+ {
+#ifdef DECODE_GIF_ON_SAVING
+
+ framingCtx->aFramingCtx =
+ (M4xVSS_FramingStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
+ M4VS,
+ (M4OSA_Char
+ *)
+ "M4xVSS_internalDecodeGIF: Context of the framing effect");
+
+ if( framingCtx->aFramingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ /* TODO: Translate error code of SPS to an xVSS error code */
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_ALLOC;
+ }
+ framingCtx->aFramingCtx->pCurrent =
+ M4OSA_NULL; /* Only used by the first element of the chain */
+ framingCtx->aFramingCtx->previousClipTime = -1;
+ framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+ framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+ framingCtx->aFramingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->aFramingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+ /*To support ARGB8888 : get the width and height */
+
+ framingCtx->aFramingCtx->width =
+ xVSS_context->pSettings->Effects[j].xVSS.width;
+ framingCtx->aFramingCtx->height =
+ xVSS_context->pSettings->Effects[j].xVSS.height;
+ M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand %d",
+ framingCtx->aFramingCtx->width);
+ M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand %d",
+ framingCtx->aFramingCtx->height);
+
+#endif
+
+ err = M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(
+ xVSS_context,
+ &(xVSS_context->pSettings->Effects[j]),
+ framingCtx->aFramingCtx,xVSS_context->pSettings->xVSS.outputVideoSize);
+ M4OSA_TRACE3_1("FRAMING WIDTH BEFORE M4xVSS_SendCommand %d",
+ framingCtx->aFramingCtx->width);
+ M4OSA_TRACE3_1("FRAMING HEIGHT BEFORE M4xVSS_SendCommand %d",
+ framingCtx->aFramingCtx->height);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalDecodePNG returned 0x%x",
+ err);
+ /* TODO: Translate error code of SPS to an xVSS error code */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: Not supported still picture format 0x%x",
+ err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_PARAMETER;
+ }
+ }
+ else if( pPlane != M4OSA_NULL )
+ {
+#ifdef DECODE_GIF_ON_SAVING
+
+ framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
+ M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+ if( framingCtx->aFramingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->aFramingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->aFramingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+ /* BugFix 1.2.0: Leak when decoding error */
+ framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+ framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+ framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+ framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+ framingCtx->aFramingCtx->duration = 0;
+ framingCtx->aFramingCtx->previousClipTime = -1;
+ framingCtx->aFramingCtx->FramingRgb =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+ /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+ framingCtx->aFramingCtx->FramingRgb->u_width =
+ framingCtx->aFramingCtx->FramingRgb->u_width & ~1;
+ framingCtx->aFramingCtx->FramingRgb->u_height =
+ framingCtx->aFramingCtx->FramingRgb->u_height & ~1;
+ /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+ structure */
+ err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+#else
+
+ framingCtx->FramingRgb =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+ /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+ framingCtx->FramingRgb.u_width =
+ framingCtx->FramingRgb.u_width & ~1;
+ framingCtx->FramingRgb.u_height =
+ framingCtx->FramingRgb.u_height & ~1;
+ /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+ structure */
+ err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+#endif
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+ err);
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return err;
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: No input image/plane provided for framing effect.");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_PARAMETER;
+ }
+ }
+ /* CR: Add text handling with external text interface */
+ /* If effect type is text, we call external text function to get RGB 565 buffer */
+ if( xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Text )
+ {
+ /* Call the font engine function pointer to get RGB565 buffer */
+ /* We transform text effect into framing effect from buffer */
+ if( xVSS_context->pSettings->xVSS.pTextRenderingFct != M4OSA_NULL )
+ {
+ /*FB: add UTF convertion for text buffer*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+#ifdef DECODE_GIF_ON_SAVING
+
+ M4xVSS_FramingContext *framingCtx;
+
+#else
+
+ M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+#ifdef DECODE_GIF_ON_SAVING
+
+ framingCtx = (M4xVSS_FramingContext
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingContext),
+ M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+ if( framingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ framingCtx->aFramingCtx = M4OSA_NULL;
+ framingCtx->aFramingCtx_last = M4OSA_NULL;
+ framingCtx->pSPSContext = M4OSA_NULL;
+ framingCtx->outputVideoSize =
+ xVSS_context->pSettings->xVSS.outputVideoSize;
+ framingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+ framingCtx->bEffectResize =
+ xVSS_context->pSettings->Effects[j].xVSS.bResize;
+ framingCtx->pEffectFilePath =
+ xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+ framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+ framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+ framingCtx->effectDuration =
+ xVSS_context->pSettings->Effects[j].uiDuration;
+ framingCtx->b_IsFileGif = M4OSA_FALSE;
+ framingCtx->b_animated = M4OSA_FALSE;
+ framingCtx->alphaBlendingStruct = M4OSA_NULL;
+
+ /* Save framing structure associated with corresponding effect */
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ framingCtx;
+
+ framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
+ M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+ if( framingCtx->aFramingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->aFramingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->aFramingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+ /* BugFix 1.2.0: Leak when decoding error */
+ framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+ framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+ framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+ framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+ framingCtx->aFramingCtx->duration = 0;
+ framingCtx->aFramingCtx->previousClipTime = -1;
+
+ /*Alpha blending*/
+ /*Check if the alpha blending parameters are corrects*/
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+ {
+ pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+ }
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+ || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+ > 0 )
+ {
+ /*Allocate the alpha blending structure*/
+ framingCtx->alphaBlendingStruct =
+ (M4xVSS_internalEffectsAlphaBlending *)M4OSA_32bitAlignedMalloc(
+ sizeof(M4xVSS_internalEffectsAlphaBlending),
+ M4VS, (M4OSA_Char *)"alpha blending structure");
+
+ if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Allocation error in M4xVSS_SendCommand");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_ALLOC;
+ }
+ /*Fill the alpha blending structure*/
+ framingCtx->alphaBlendingStruct->m_fadeInTime =
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+ framingCtx->alphaBlendingStruct->m_fadeOutTime =
+ pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+ framingCtx->alphaBlendingStruct->m_end =
+ pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+ framingCtx->alphaBlendingStruct->m_middle =
+ pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+ framingCtx->alphaBlendingStruct->m_start =
+ pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+ if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+ + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+ > 100 )
+ {
+ framingCtx->alphaBlendingStruct->m_fadeOutTime =
+ 100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+ }
+ }
+#else
+
+ framingCtx = (M4xVSS_FramingStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FramingStruct),
+ M4VS, (M4OSA_Char
+ *)"Context of the framing effect (for text)");
+
+ if( framingCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->topleft_x =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+ framingCtx->topleft_y =
+ xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+ framingCtx->FramingRgb = M4OSA_NULL;
+
+ /* BugFix 1.2.0: Leak when decoding error */
+ framingCtx->FramingYuv = M4OSA_NULL;
+ framingCtx->pNext = framingCtx;
+
+#endif
+ /* Save framing structure associated with corresponding effect */
+
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ framingCtx;
+
+ /* FB: changes for Video Artist: memcopy pTextBuffer so that it can be changed
+ after a complete analysis*/
+ if( pSettings->Effects[j].xVSS.pTextBuffer == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("M4xVSS_SendCommand: pTextBuffer is null");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ /*Convert text buffer into customer format before being used*/
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = pSettings->Effects[j].xVSS.pTextBuffer;
+ xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+ pSettings->Effects[j].xVSS.textBufferSize;
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)pSettings->
+ Effects[j].xVSS.pTextBuffer,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+ length;
+ }
+ /**
+ * End of the UTF conversion, use the converted file path*/
+
+ xVSS_context->pSettings->
+ Effects[j].xVSS.pTextBuffer = M4OSA_32bitAlignedMalloc(
+ xVSS_context->pSettings->Effects[j].xVSS.textBufferSize + 1,
+ M4VS, (M4OSA_Char *)"Local text buffer effect");
+
+ //xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer =
+ // M4OSA_32bitAlignedMalloc(strlen(pSettings->Effects[j].xVSS.pTextBuffer)+1,
+ // M4VS, "Local text buffer effect");
+ if( xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer
+ == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ if( pSettings->Effects[j].xVSS.pTextBuffer != M4OSA_NULL )
+ {
+ //memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->Effects[j]
+ //.xVSS.pTextBuffer, (M4OSA_MemAddr8)pSettings->Effects[j].xVSS.pTextBuffer,
+ // strlen(pSettings->Effects[j].xVSS.pTextBuffer)+1);
+ memcpy((void *)xVSS_context->pSettings->
+ Effects[j].xVSS.pTextBuffer,
+ (void *)pDecodedPath, xVSS_context->pSettings->
+ Effects[j].xVSS.textBufferSize + 1);
+ }
+
+ /*Allocate the text RGB buffer*/
+ framingCtx->aFramingCtx->FramingRgb =
+ (M4VIFI_ImagePlane *)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane),
+ M4VS,
+ (M4OSA_Char *)"RGB structure for the text effect");
+
+ if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+ if( xVSS_context->pSettings->Effects[j].xVSS.uiTextBufferWidth
+ == 0 || xVSS_context->pSettings->
+ Effects[j].xVSS.uiTextBufferHeight == 0 )
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_SendCommand: text plane width and height are not defined");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_PARAMETER;
+ }
+ /* Allocate input RGB text buffer and force it to even size to avoid errors in
+ YUV conversion */
+ framingCtx->aFramingCtx->FramingRgb->u_width =
+ xVSS_context->pSettings->
+ Effects[j].xVSS.uiTextBufferWidth & ~1;
+ framingCtx->aFramingCtx->FramingRgb->u_height =
+ xVSS_context->pSettings->
+ Effects[j].xVSS.uiTextBufferHeight & ~1;
+ framingCtx->aFramingCtx->FramingRgb->u_stride =
+ 2 * framingCtx->aFramingCtx->FramingRgb->u_width;
+ framingCtx->aFramingCtx->FramingRgb->u_topleft = 0;
+ framingCtx->aFramingCtx->FramingRgb->pac_data =
+ (M4VIFI_UInt8 *)M4OSA_32bitAlignedMalloc(
+ framingCtx->aFramingCtx->FramingRgb->u_height
+ * framingCtx->aFramingCtx->FramingRgb->u_stride,
+ M4VS, (M4OSA_Char *)"Text RGB plane->pac_data");
+
+ if( framingCtx->aFramingCtx->FramingRgb->pac_data
+ == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+
+#ifdef DECODE_GIF_ON_SAVING
+ /**/
+ /* Call text rendering function */
+
+ err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+ xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+ xVSS_context->pSettings->
+ Effects[j].xVSS.pTextBuffer,
+ xVSS_context->pSettings->
+ Effects[j].xVSS.textBufferSize,
+ &(framingCtx->aFramingCtx->FramingRgb));
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_0("Text rendering external function failed\n");
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+
+ /* Check that RGB buffer is set */
+ if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Text rendering function did not set RGB buffer correctly !");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ /* Convert RGB plane to YUV420 and update framing structure */
+ err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+ err);
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+
+#else
+ /**/
+ /* Call text rendering function */
+
+ err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+ xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+ xVSS_context->pSettings->
+ Effects[j].xVSS.pTextBuffer,
+ xVSS_context->pSettings->
+ Effects[j].xVSS.textBufferSize,
+ &(framingCtx->FramingRgb));
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_0("Text rendering external function failed\n");
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+
+ /* Check that RGB buffer is set */
+ if( framingCtx->FramingRgb == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0(
+ "Text rendering function did not set RGB buffer correctly !");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ /* Convert RGB plane to YUV420 and update framing structure */
+ err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+ err);
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ /* Change internally effect type from "text" to framing */
+
+ xVSS_context->pSettings->Effects[j].VideoEffectType =
+ M4xVSS_kVideoEffectType_Framing;
+ xVSS_context->pSettings->Effects[j].xVSS.bResize = M4OSA_FALSE;
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: No text rendering function set !!");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+ }
+
+ /* Allocate the structure to store the data needed by the Fifties effect */
+ else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Fifties )
+ {
+ M4xVSS_FiftiesStruct *fiftiesCtx;
+
+ /* Check the expected frame rate for the fifties effect (must be above 0) */
+ if( 0 == xVSS_context->pSettings->
+ Effects[j].xVSS.uiFiftiesOutFrameRate )
+ {
+ M4OSA_TRACE1_0(
+ "The frame rate for the fifties effect must be greater than 0 !");
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_PARAMETER;
+ }
+
+ fiftiesCtx = (M4xVSS_FiftiesStruct
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_FiftiesStruct),
+ M4VS, (M4OSA_Char *)"Context of the fifties effect");
+
+ if( fiftiesCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_ALLOC;
+ }
+
+ fiftiesCtx->previousClipTime = -1;
+ fiftiesCtx->fiftiesEffectDuration = 1000 / xVSS_context->pSettings->
+ Effects[j].xVSS.uiFiftiesOutFrameRate;
+ fiftiesCtx->shiftRandomValue = 0;
+ fiftiesCtx->stripeRandomValue = 0;
+
+ /* Save the structure associated with corresponding effect */
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ fiftiesCtx;
+ }
+
+ /* Allocate the structure to store the data needed by the Color effect */
+ else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_ColorRGB16
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_BlackAndWhite
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Pink
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Green
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Sepia
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Negative
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Gradient )
+ {
+ M4xVSS_ColorStruct *ColorCtx;
+
+ ColorCtx =
+ (M4xVSS_ColorStruct *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_ColorStruct),
+ M4VS, (M4OSA_Char *)"Context of the color effect");
+
+ if( ColorCtx == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return M4ERR_ALLOC;
+ }
+
+ ColorCtx->colorEffectType =
+ xVSS_context->pSettings->Effects[j].VideoEffectType;
+
+ if( xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_ColorRGB16
+ || xVSS_context->pSettings->Effects[j].VideoEffectType
+ == M4xVSS_kVideoEffectType_Gradient )
+ {
+ ColorCtx->rgb16ColorData =
+ xVSS_context->pSettings->Effects[j].xVSS.uiRgb16InputColor;
+ }
+ else
+ {
+ ColorCtx->rgb16ColorData = 0;
+ }
+
+ /* Save the structure associated with corresponding effect */
+ xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ ColorCtx;
+ }
+ }
+
+ /**********************************
+ Background music registering
+ **********************************/
+ if( pSettings->xVSS.pBGMtrack != M4OSA_NULL && isNewBGM == M4OSA_TRUE )
+ {
+#ifdef PREVIEW_ENABLED
+
+ M4xVSS_MCS_params *pParams;
+ M4OSA_Char *out_pcm;
+ /*UTF conversion support*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#endif
+
+ /* We save output file pointer, because we will need to use it when saving audio mixed
+ file (last save step) */
+
+ xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+ xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+ /* If a previous BGM has already been registered, delete it */
+ /* Here can be implemented test to know if the same BGM is registered */
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->xVSS.pBGMtrack->
+ pFile);
+ xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+ }
+ free(xVSS_context->pSettings->xVSS.pBGMtrack);
+ xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+ }
+
+ /* Allocate BGM */
+ xVSS_context->pSettings->xVSS.pBGMtrack =
+ (M4xVSS_BGMSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_BGMSettings), M4VS,
+ (M4OSA_Char *)"xVSS_context->pSettings->xVSS.pBGMtrack");
+
+ if( xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ return M4ERR_ALLOC;
+ }
+
+ /* Copy input structure to our structure */
+ memcpy((void *)xVSS_context->pSettings->xVSS.pBGMtrack,
+ (void *)pSettings->xVSS.pBGMtrack,
+ sizeof(M4xVSS_BGMSettings));
+ /* Allocate file name, and copy file name buffer to our structure */
+ xVSS_context->pSettings->xVSS.pBGMtrack->pFile =
+ M4OSA_32bitAlignedMalloc((strlen(pSettings->xVSS.pBGMtrack->pFile)
+ + 1), M4VS, (M4OSA_Char *)"xVSS BGM file path");
+
+ if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+ (void *)pSettings->xVSS.pBGMtrack->pFile,
+ strlen(pSettings->xVSS.pBGMtrack->pFile) + 1);
+
+#ifdef PREVIEW_ENABLED
+ /* Decode BGM track to pcm output file */
+
+ pParams =
+ (M4xVSS_MCS_params *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_MCS_params), M4VS,
+ (M4OSA_Char *)"Element of MCS Params (for BGM)");
+
+ if( pParams == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+ return M4ERR_ALLOC;
+ }
+
+ /* Initialize the pointers in case of problem (PR 2273) */
+ pParams->pFileIn = M4OSA_NULL;
+ pParams->pFileOut = M4OSA_NULL;
+ pParams->pFileTemp = M4OSA_NULL;
+ pParams->pNext = M4OSA_NULL;
+ pParams->BeginCutTime = 0;
+ pParams->EndCutTime = 0;
+
+ if( xVSS_context->pMCSparamsList
+ == M4OSA_NULL ) /* Means it is the first element of the list */
+ {
+ /* Initialize the xVSS context with the first element of the list */
+ xVSS_context->pMCSparamsList = pParams;
+
+ }
+ else
+ {
+ M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+ M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+ /* Parse MCS params chained list to find and delete BGM element */
+ while( pParams_temp != M4OSA_NULL )
+ {
+ if( pParams_temp->isBGM == M4OSA_TRUE )
+ {
+ /* Remove this element */
+ if( pParams_temp->pFileIn != M4OSA_NULL )
+ {
+ free(pParams_temp->pFileIn);
+ pParams_temp->pFileIn = M4OSA_NULL;
+ }
+
+ if( pParams_temp->pFileOut != M4OSA_NULL )
+ {
+ /* Remove PCM temporary file */
+ remove((const char *)pParams_temp->pFileOut);
+ free(pParams_temp->pFileOut);
+ pParams_temp->pFileOut = M4OSA_NULL;
+ }
+ /* Chain previous element with next element = remove BGM chained
+ list element */
+ if( pParams_prev != M4OSA_NULL )
+ {
+ pParams_prev->pNext = pParams_temp->pNext;
+ }
+ /* If current pointer is the first of the chained list and next pointer of
+ the chained list is NULL */
+ /* it means that there was only one element in the list */
+ /* => we put the context variable to NULL to reaffect the first chained list
+ element */
+ if( pParams_temp == xVSS_context->pMCSparamsList
+ && pParams_temp->pNext == M4OSA_NULL )
+ {
+ xVSS_context->pMCSparamsList = M4OSA_NULL;
+ }
+ /* In that case, BGM pointer is the first one, but there are others elements
+ after it */
+ /* So, we need to change first chained list element */
+ else if( pParams_temp->pNext != M4OSA_NULL
+ && pParams_prev == M4OSA_NULL )
+ {
+ xVSS_context->pMCSparamsList = pParams_temp->pNext;
+ }
+
+ if( pParams_temp->pNext != M4OSA_NULL )
+ {
+ pParams_prev = pParams_temp->pNext;
+ free(pParams_temp);
+ pParams_temp = M4OSA_NULL;
+ pParams_temp = pParams_prev;
+ }
+ else
+ {
+ free(pParams_temp);
+ pParams_temp = M4OSA_NULL;
+ }
+ }
+ else
+ {
+ pParams_prev = pParams_temp;
+ pParams_temp = pParams_temp->pNext;
+ }
+ }
+ /* We need to initialize the last element of the chained list to be able to add new
+ BGM element */
+ pMCS_last = pParams_prev;
+
+ if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+ {
+ /* In that case, it means that there was only one element in the chained list */
+ /* So, we need to save the new params*/
+ xVSS_context->pMCSparamsList = pParams;
+ }
+ else
+ {
+ /* Update next pointer of the previous last element of the chain */
+ pMCS_last->pNext = pParams;
+ }
+
+ }
+
+ /* Fill the last M4xVSS_MCS_params element */
+ pParams->InputFileType =
+ xVSS_context->pSettings->xVSS.pBGMtrack->FileType;
+ pParams->OutputFileType = M4VIDEOEDITING_kFileType_PCM;
+ pParams->OutputVideoFormat = M4VIDEOEDITING_kNoneVideo;
+ pParams->OutputVideoFrameSize = M4VIDEOEDITING_kQCIF;
+ pParams->OutputVideoFrameRate = M4VIDEOEDITING_k15_FPS;
+
+ if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kAAC )
+ {
+ pParams->OutputAudioFormat = M4VIDEOEDITING_kAAC;
+ pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+
+ /*FB: VAL CR P4ME00003076
+ The output audio bitrate in the AAC case is now directly given by the user*/
+ /*Check if the audio bitrate is correctly defined*/
+ /*Mono
+ MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+ if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+ >= M4VIDEOEDITING_k16_KBPS
+ && xVSS_context->pSettings->xVSS.outputAudioBitrate
+ <= M4VIDEOEDITING_k192_KBPS
+ && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+ {
+ pParams->OutputAudioBitrate =
+ xVSS_context->pSettings->xVSS.outputAudioBitrate;
+ }
+ /*Stereo
+ MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+ else if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+ >= M4VIDEOEDITING_k32_KBPS
+ && xVSS_context->pSettings->xVSS.outputAudioBitrate
+ <= M4VIDEOEDITING_k192_KBPS
+ && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_FALSE )
+ {
+ pParams->OutputAudioBitrate =
+ xVSS_context->pSettings->xVSS.outputAudioBitrate;
+ }
+ else
+ {
+ pParams->OutputAudioBitrate = M4VIDEOEDITING_k32_KBPS;
+ }
+ pParams->bAudioMono = xVSS_context->pSettings->xVSS.bAudioMono;
+ }
+ else
+ {
+ pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+ pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+ pParams->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+ pParams->bAudioMono = M4OSA_TRUE;
+ }
+ pParams->OutputVideoBitrate = M4VIDEOEDITING_kUndefinedBitrate;
+
+ /* Prepare output filename */
+ /* 21 is the size of "preview_16000_2.pcm" + \0 */
+ out_pcm =
+ (M4OSA_Char *)M4OSA_32bitAlignedMalloc(strlen(xVSS_context->pTempPath)
+ + 21, M4VS, (M4OSA_Char *)"Temp char* for pcmPreviewFile");
+
+ if( out_pcm == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+ return M4ERR_ALLOC;
+ }
+
+ /* Copy temporary path to final preview path string */
+ M4OSA_chrNCopy(out_pcm, xVSS_context->pTempPath,
+ strlen(xVSS_context->pTempPath) + 1);
+
+ /* Depending of the output sample frequency and nb of channels, we construct preview
+ output filename */
+ if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kAAC )
+ {
+ /* Construct output temporary PCM filename */
+ if( xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+ {
+ strncat((char *)out_pcm, (const char *)"preview_16000_1.pcm\0",
+ 20);
+ }
+ else
+ {
+ strncat((char *)out_pcm, (const char *)"preview_16000_2.pcm\0",
+ 20);
+ }
+ }
+ else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+ == M4VIDEOEDITING_kAMR_NB )
+ {
+ /* Construct output temporary PCM filename */
+ strncat((char *)out_pcm, (const char *)"preview_08000_1.pcm\0", 20);
+ }
+ else
+ {
+ if( out_pcm != M4OSA_NULL )
+ {
+ free(out_pcm);
+ out_pcm = M4OSA_NULL;
+ }
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Bad audio output format \n");
+ return M4ERR_PARAMETER;
+ }
+
+ xVSS_context->pcmPreviewFile = out_pcm;
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = out_pcm;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_pcm, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ xVSS_context->pcmPreviewFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
+ (M4OSA_Char *)"pcmPreviewFile");
+
+ if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ free(out_pcm);
+ out_pcm = M4OSA_NULL;
+ /*FB: to avoid leaks when there is an error in the send command*/
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ /**/
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pcmPreviewFile, (void *)pDecodedPath, length + 1);
+
+ /* Free temporary output filename */
+ if( out_pcm != M4OSA_NULL )
+ {
+ free(out_pcm);
+ out_pcm = M4OSA_NULL;
+ }
+
+ pParams->pFileOut = M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"MCS BGM Params: file out");
+
+ if( pParams->pFileOut == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ return M4ERR_ALLOC;
+ }
+ pParams->pFileTemp = M4OSA_NULL;
+
+ memcpy((void *)pParams->pFileOut,(void *) xVSS_context->pcmPreviewFile,
+ (length + 1)); /* Copy output file path */
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+
+ pDecodedPath = xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)xVSS_context->pSettings->xVSS.pBGMtrack->
+ pFile, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ pParams->pFileIn = (M4OSA_Void *)M4OSA_32bitAlignedMalloc((length + 1), M4VS,
+ (M4OSA_Char *)"MCS BGM Params: file in");
+
+ if( pParams->pFileIn == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pParams->pFileIn, (void *)pDecodedPath,
+ (length + 1)); /* Copy input file path */
+
+ pParams->isBGM = M4OSA_TRUE;
+ pParams->isCreated = M4OSA_FALSE;
+ xVSS_context->nbStepTotal++;
+ bIsTranscoding = M4OSA_TRUE;
+#endif /* PREVIEW_ENABLED */
+
+ }
+ else if( pSettings->xVSS.pBGMtrack != M4OSA_NULL
+ && isNewBGM == M4OSA_FALSE )
+ {
+#ifdef PREVIEW_ENABLED
+ /* BGM is the same as previously, no need to redecode audio */
+ /* Need to update MCS params chained list, to signal M4xVSS_step function to skip
+ BGM decoding */
+
+ M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+ M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+#endif /* PREVIEW_ENABLED */
+ /* We save output file pointer, because we will need to use it when saving audio
+ mixed file (last save step) */
+
+ xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+ xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+ /* Re-write BGM settings in case they have changed between two sendCommand */
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiAddCts =
+ pSettings->xVSS.pBGMtrack->uiAddCts;
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume =
+ pSettings->xVSS.pBGMtrack->uiAddVolume;
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiBeginLoop =
+ pSettings->xVSS.pBGMtrack->uiBeginLoop;
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiEndLoop =
+ pSettings->xVSS.pBGMtrack->uiEndLoop;
+
+#ifdef PREVIEW_ENABLED
+ /* Parse MCS params chained list to find and delete BGM element */
+
+ while( pParams_temp != M4OSA_NULL )
+ {
+ if( pParams_temp->isBGM == M4OSA_TRUE )
+ {
+ pParams_temp->isCreated = M4OSA_TRUE;
+ break;
+ }
+ pParams_prev = pParams_temp;
+ pParams_temp = pParams_temp->pNext;
+ }
+
+#endif /* PREVIEW_ENABLED */
+
+ M4OSA_TRACE2_0("M4xVSS_SendCommand has been recalled, BGM is the same");
+ }
+ else
+ {
+ M4OSA_TRACE1_0("No BGM in this xVSS command");
+
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+#ifdef PREVIEW_ENABLED
+ /* Need to remove MCS previous params chained list */
+
+ M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+ M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+ /* Parse MCS params chained list to find and delete BGM element */
+ while( pParams_temp != M4OSA_NULL )
+ {
+ if( pParams_temp->isBGM == M4OSA_TRUE )
+ {
+ /* Remove this element */
+ if( pParams_temp->pFileIn != M4OSA_NULL )
+ {
+ free(pParams_temp->pFileIn);
+ pParams_temp->pFileIn = M4OSA_NULL;
+ }
+
+ if( pParams_temp->pFileOut != M4OSA_NULL )
+ {
+ free(pParams_temp->pFileOut);
+ pParams_temp->pFileOut = M4OSA_NULL;
+ }
+ /* Chain previous element with next element */
+ if( pParams_prev != M4OSA_NULL )
+ {
+ pParams_prev->pNext = pParams_temp->pNext;
+ }
+ /* If current pointer is the first of the chained list and next pointer
+ of the chained list is NULL */
+ /* it means that there was only one element in the list */
+ /* => we put the context variable to NULL */
+ if( pParams_temp == xVSS_context->pMCSparamsList
+ && pParams_temp->pNext == M4OSA_NULL )
+ {
+ free(pParams_temp);
+ xVSS_context->pMCSparamsList = M4OSA_NULL;
+ }
+ /* In that case, BGM pointer is the first one, but there are others
+ elements after it */
+ /* So, we need to change first chained list element */
+ else if( pParams_temp->pNext != M4OSA_NULL )
+ {
+ xVSS_context->pMCSparamsList = pParams_temp->pNext;
+ free(pParams_temp);
+ pParams_temp = M4OSA_NULL;
+ }
+ /* In all other cases, nothing else to do except freeing the chained
+ list element */
+ else
+ {
+ free(pParams_temp);
+ pParams_temp = M4OSA_NULL;
+ }
+ break;
+ }
+ pParams_prev = pParams_temp;
+ pParams_temp = pParams_temp->pNext;
+ }
+
+#endif /* PREVIEW_ENABLED */
+ /* Here, we unallocate all BGM components and put xVSS_context->pSettings->
+ xVSS.pBGMtrack to NULL */
+
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile
+ != M4OSA_NULL )
+ {
+ free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+ xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+ }
+ free(xVSS_context->pSettings->xVSS.pBGMtrack);
+ xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+ }
+ }
+ }
+
+ /* Changed to be able to mix with video only files -> in case no master clip is found
+ (i.e only JPG input or video only input) */
+ /* and if there is a BGM, we force the added volume to 100 (i.e replace audio) */
+
+ if( masterClip == -1
+ && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ /* In that case, it means that no input 3GP file has a video track.
+ Therefore, if a mixing is asked, it will fail. Thus, we force replace audio. */
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume = 100;
+ }
+
+ /* Save clip number to know if a M4xVSS_sendCommand has already been called */
+ xVSS_context->previousClipNumber = xVSS_context->pSettings->uiClipNumber;
+
+ /* Change state */
+ xVSS_context->m_state = M4xVSS_kStateAnalyzing;
+
+ /* In case of MMS use case, we compute here the max video bitrate */
+ /* In case of too low bitrate, a specific warning is returned */
+ if( xVSS_context->pSettings->xVSS.outputFileSize != 0 && totalDuration > 0 )
+ {
+ M4OSA_UInt32 targetedBitrate = 0;
+ M4VIDEOEDITING_ClipProperties fileProperties;
+ M4OSA_Double ratio;
+
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ if( xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume
+ == 100 ) /* We are in "replace audio mode, need to check the filetype */
+ {
+ if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+ == M4VIDEOEDITING_kFileType_3GPP )
+ {
+ M4OSA_Void *pDecodedPath;
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath =
+ xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)xVSS_context->pSettings->
+ xVSS.pBGMtrack->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_SendCommand: \
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ err =
+ M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+ &fileProperties);
+
+ /* Get the properties of the BGM track */
+ /*err = M4xVSS_internalGetProperties(xVSS_context, xVSS_context->pSettings->
+ xVSS.pBGMtrack->pFile, &fileProperties);*/
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned an error:\
+ 0x%x", err);
+ return err;
+ }
+
+ if( fileProperties.AudioStreamType
+ != M4VIDEOEDITING_kAMR_NB )
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Impossible to use MMS mode with BGM != AMR-NB");
+ return M4ERR_PARAMETER;
+ }
+ }
+ else if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+ != M4VIDEOEDITING_kFileType_AMR
+ && xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+ != M4VIDEOEDITING_kFileType_MP3 )
+ {
+ M4OSA_TRACE1_0("M4xVSS_sendCommand: Bad input BGM file");
+ return M4ERR_PARAMETER;
+ }
+ }
+ }
+
+ /* Compute targeted bitrate, with 8% margin (moov) */
+ if( totalDuration > 1000 )
+ {
+ targetedBitrate =
+ (M4OSA_UInt32)(( xVSS_context->pSettings->xVSS.outputFileSize
+ * 8 * 0.84) / (totalDuration / 1000));
+ }
+ else
+ {
+ targetedBitrate = 0;
+ }
+
+ /* Remove audio bitrate */
+ if( targetedBitrate >= 12200 )
+ {
+ targetedBitrate -= 12200; /* Only AMR is supported in MMS case */
+ }
+ else
+ {
+ targetedBitrate = 0;
+ }
+
+ /* Compute an indicator of "complexity" depending on nb of sequences and total duration */
+ /* The highest is the number of sequences, the more there are some I frames */
+ /* In that case, it is necessary to reduce the target bitrate */
+ ratio =
+ (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+ * 100000) / (M4OSA_Double)(totalDuration));
+ M4OSA_TRACE2_3(
+ "Ratio clip_nb/duration = %f\nTargeted bitrate = %d\nTotal duration: %d",
+ (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+ * 100000) / (M4OSA_Double)(totalDuration)),
+ targetedBitrate, totalDuration);
+
+ if( ratio > 50 && ratio <= 75 )
+ {
+ /* It means that there is a potential risk of having a higher file size
+ than specified */
+ targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.1);
+ M4OSA_TRACE2_2(
+ "New bitrate1 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+ ratio, targetedBitrate);
+ }
+ else if( ratio > 75 )
+ {
+ targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.15);
+ M4OSA_TRACE2_2(
+ "New bitrate2 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+ ratio, targetedBitrate);
+ }
+
+ /*CR 3283 MMS use case for VAL:
+ Decrease the output file size to keep a margin of 5%
+ The writer will stop when the targeted output file size will be reached*/
+ xVSS_context->pSettings->xVSS.outputFileSize -=
+ (M4OSA_UInt32)(xVSS_context->pSettings->xVSS.outputFileSize * 0.05);
+
+ switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ if( targetedBitrate < 32000 )
+ {
+ xVSS_context->targetedBitrate = 32000;
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ case M4VIDEOEDITING_kQQVGA:
+ if( targetedBitrate < 32000 ) /*48000)*/
+ {
+ xVSS_context->targetedBitrate = 32000; /*48000;*/
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ case M4VIDEOEDITING_kQCIF:
+ if( targetedBitrate < 48000 ) /*64000)*/
+ {
+ xVSS_context->targetedBitrate = 48000; /*64000;*/
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ case M4VIDEOEDITING_kQVGA:
+ if( targetedBitrate < 64000 ) /*128000)*/
+ {
+ xVSS_context->targetedBitrate = 64000; /*128000;*/
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ case M4VIDEOEDITING_kCIF:
+ if( targetedBitrate < 128000 )
+ {
+ xVSS_context->targetedBitrate = 128000;
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ case M4VIDEOEDITING_kVGA:
+ if( targetedBitrate < 192000 )
+ {
+ xVSS_context->targetedBitrate = 192000;
+ return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+ }
+ break;
+
+ default:
+ /* Cannot happen */
+ M4OSA_TRACE1_0(
+ "M4xVSS_sendCommand: Error in output fileSize !");
+ return M4ERR_PARAMETER;
+ break;
+ }
+ xVSS_context->targetedBitrate = (M4OSA_UInt32)targetedBitrate;
+ }
+
+ if( bIsTranscoding )
+ {
+ return M4VSS3GPP_WAR_TRANSCODING_NECESSARY;
+ }
+ else
+ {
+ return M4NO_ERROR;
+ }
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Char* pFilePath)
+ * @brief This function prepare the save
+ * @note The xVSS create 3GP edited final file
+ * This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_ANALYZING_DONE
+ * After this function, the user must call M4xVSS_Step until
+ * it returns another error than M4NO_ERROR.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pFilePath (IN) If the user wants to provide a different
+ * output filename, else can be NULL (allocated by the user)
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStart( M4OSA_Context pContext, M4OSA_Void *pFilePath,
+ M4OSA_UInt32 filePathSize )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err;
+
+ /*Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+ M4VSS3GPP_EditSettings *pEditSavingSettings = M4OSA_NULL;
+ M4OSA_UInt8 i, j;
+ M4OSA_UInt32 offset = 0;
+ M4OSA_UInt8 nbEffects = 0;
+ /*only for UTF conversion support*/
+ M4OSA_Void *pDecodedPath = M4OSA_NULL;
+ M4OSA_UInt32 length = 0;
+ /**/
+
+ /* Check state */
+ if( xVSS_context->m_state != M4xVSS_kStateOpened )
+ {
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_SaveStart function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /* RC: to temporary handle changing of output filepath */
+ /* TO BE CHANGED CLEANLY WITH A MALLOC/MEMCPY !!!! */
+ if( pFilePath != M4OSA_NULL )
+ {
+ if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+ {
+ /*it means that pOutputFile has been allocated in M4xVSS_sendCommand()*/
+ free(xVSS_context->pSettings->pOutputFile);
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ xVSS_context->pSettings->uiOutputPathSize = 0;
+ }
+
+ pDecodedPath = pFilePath;
+ /*As all inputs of the xVSS are in UTF8, convert the output file path into the customer
+ format*/
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)pFilePath, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ filePathSize = length;
+ }
+
+ xVSS_context->pOutputFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(filePathSize + 1, M4VS,
+ (M4OSA_Char *)"M4xVSS_SaveStart: output file");
+
+ if( xVSS_context->pOutputFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pOutputFile, (void *)pDecodedPath, filePathSize + 1);
+ xVSS_context->pOutputFile[filePathSize] = '\0';
+ xVSS_context->pSettings->pOutputFile = xVSS_context->pOutputFile;
+ xVSS_context->pSettings->uiOutputPathSize = filePathSize;
+ }
+
+ /**
+ ***/
+
+ /*FB: Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+ /*It is the same principle as in the PreviewStart()*/
+ pEditSavingSettings =
+ (M4VSS3GPP_EditSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_EditSettings),
+ M4VS, (M4OSA_Char *)"Saving, copy of VSS structure");
+
+ if( pEditSavingSettings == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ /* Copy settings from input structure */
+ memcpy((void *) &(pEditSavingSettings->xVSS),
+ (void *) &(xVSS_context->pSettings->xVSS),
+ sizeof(M4xVSS_EditSettings));
+
+ /* Initialize pEditSavingSettings structure */
+ pEditSavingSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+ pEditSavingSettings->videoFrameRate =
+ xVSS_context->pSettings->videoFrameRate;
+ pEditSavingSettings->uiClipNumber = xVSS_context->pSettings->uiClipNumber;
+ pEditSavingSettings->uiMasterClip =
+ xVSS_context->pSettings->uiMasterClip; /* VSS2.0 mandatory parameter */
+
+ /* Allocate savingSettings.pClipList/pTransitions structure */
+ pEditSavingSettings->pClipList = (M4VSS3GPP_ClipSettings *
+ * )M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings *)
+ *pEditSavingSettings->uiClipNumber,
+ M4VS, (M4OSA_Char *)"xVSS, saving , copy of pClipList");
+
+ if( pEditSavingSettings->pClipList == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ if( pEditSavingSettings->uiClipNumber > 1 )
+ {
+ pEditSavingSettings->pTransitionList = (M4VSS3GPP_TransitionSettings *
+ * )M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings *)
+ *(pEditSavingSettings->uiClipNumber - 1),
+ M4VS, (M4OSA_Char *)"xVSS, saving, copy of pTransitionList");
+
+ if( pEditSavingSettings->pTransitionList == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ }
+ else
+ {
+ pEditSavingSettings->pTransitionList = M4OSA_NULL;
+ }
+
+ for ( i = 0; i < pEditSavingSettings->uiClipNumber; i++ )
+ {
+ pEditSavingSettings->pClipList[i] = (M4VSS3GPP_ClipSettings
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_ClipSettings),
+ M4VS, (M4OSA_Char *)"saving clip settings");
+
+ if( pEditSavingSettings->pClipList[i] == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ if( i < pEditSavingSettings->uiClipNumber
+ - 1 ) /* Because there is 1 less transition than clip number */
+ {
+ pEditSavingSettings->pTransitionList[i] =
+ (M4VSS3GPP_TransitionSettings
+ *)M4OSA_32bitAlignedMalloc(sizeof(M4VSS3GPP_TransitionSettings),
+ M4VS, (M4OSA_Char *)"saving transition settings");
+
+ if( pEditSavingSettings->pTransitionList[i] == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ }
+ }
+
+ for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+ {
+ // Add MP4 file support
+
+ if( ( xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_3GPP)
+ || (xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_MP4)
+ || (xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_M4V)
+ || (xVSS_context->pSettings->pClipList[i]->FileType
+ == M4VIDEOEDITING_kFileType_ARGB8888))
+
+ {
+ /* Copy data from given structure to our saving structure */
+ M4xVSS_DuplicateClipSettings(pEditSavingSettings->pClipList[i],
+ xVSS_context->pSettings->pClipList[i],
+ M4OSA_FALSE /* remove effects */);
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = pEditSavingSettings->pClipList[i]->pFile;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err =
+ M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+ *)pEditSavingSettings->pClipList[i]->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return err;
+ }
+ pDecodedPath = xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer;
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ free(
+ pEditSavingSettings->pClipList[i]->pFile);
+ pEditSavingSettings->pClipList[i]->pFile = (M4OSA_Void
+ *)M4OSA_32bitAlignedMalloc((length + 1),
+ M4VS, (M4OSA_Char *)"saving transition settings");
+
+ if( pEditSavingSettings->pClipList[i]->pFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pEditSavingSettings->pClipList[i]->pFile,
+ (void *)pDecodedPath, length + 1);
+ }
+ /*FB: add file path size because of UTF 16 conversion*/
+ pEditSavingSettings->pClipList[i]->filePathSize = length+1;
+
+ if( i
+ < xVSS_context->pSettings->uiClipNumber
+ - 1 ) /* Because there is 1 less transition than clip number */
+ {
+ memcpy(
+ (void *)pEditSavingSettings->pTransitionList[i],
+ (void *)xVSS_context->pSettings->
+ pTransitionList[i],
+ sizeof(M4VSS3GPP_TransitionSettings));
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0(
+ "M4xVSS_SaveStart: Error when parsing xVSS_context->pSettings->pClipList[i]:\
+ Bad file type");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_PARAMETER;
+ }
+ }
+
+ /* Count the number of video effects, used to know how much memory is needed to allocate*/
+ /* FB 2008/10/15: removed : not compatible with M4VSS3GPP_kVideoEffectType_None
+ for(j=0;j<xVSS_context->pSettings->nbEffects;j++)
+ {
+ if(xVSS_context->pSettings->Effects[j].VideoEffectType != M4VSS3GPP_kVideoEffectType_None)
+ {
+ nbEffects++;
+ }
+ }*/
+ nbEffects = xVSS_context->pSettings->nbEffects;
+
+ /* Allocate effects saving structure with correct number of effects */
+ if( nbEffects != 0 )
+ {
+ pEditSavingSettings->Effects =
+ (M4VSS3GPP_EffectSettings *)M4OSA_32bitAlignedMalloc(nbEffects
+ * sizeof(M4VSS3GPP_EffectSettings), M4VS, (M4OSA_Char
+ *)"Saving settings, effects table of structure settings");
+
+ if( pEditSavingSettings->Effects == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ /* Just copy effect structure to saving structure, as effects time are now */
+ /* relative to output clip time*/
+ memcpy((void *)pEditSavingSettings->Effects,
+ (void *)xVSS_context->pSettings->Effects,
+ nbEffects * sizeof(M4VSS3GPP_EffectSettings));
+ }
+ else
+ {
+ pEditSavingSettings->Effects = M4OSA_NULL;
+ pEditSavingSettings->nbEffects = 0;
+ }
+ pEditSavingSettings->nbEffects = nbEffects;
+
+ if( pFilePath != M4OSA_NULL )
+ {
+ pEditSavingSettings->pOutputFile = pFilePath;
+ }
+
+ /* Save pointer of saving video editor to use in step function */
+ xVSS_context->pCurrentEditSettings = pEditSavingSettings;
+
+ /* Change output file name to temporary output file name, because final file will be
+ generated by audio mixer */
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+
+ M4OSA_Char out_3gp[M4XVSS_MAX_PATH_LEN];
+ M4OSA_Char out_3gp_tmp[M4XVSS_MAX_PATH_LEN];
+
+ /**/
+ pEditSavingSettings->xVSS.pBGMtrack =
+ (M4xVSS_BGMSettings *)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_BGMSettings), M4VS,
+ (M4OSA_Char
+ *)"Saving settings, effects table of structure settings");
+
+ if( pEditSavingSettings->xVSS.pBGMtrack == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+
+ /* Just copy effect structure to saving structure, as effects time are now */
+ /* relative to output clip time*/
+ memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack,
+ (void *)xVSS_context->pSettings->xVSS.pBGMtrack,
+ sizeof(M4xVSS_BGMSettings));
+
+ /* Allocate file name, and copy file name buffer to our structure */
+ pEditSavingSettings->xVSS.pBGMtrack->pFile = M4OSA_32bitAlignedMalloc(
+ (strlen(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+ + 1),
+ M4VS, (M4OSA_Char *)"Saving struct xVSS BGM file path");
+
+ if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+ {
+ M4xVSS_freeCommand(xVSS_context);
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
+ (void *)xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+ strlen(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+ + 1);
+
+ /*Copy BGM track file path*/
+
+ /**
+ * UTF conversion*/
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
+ (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+
+ free(pEditSavingSettings->xVSS.pBGMtrack->pFile);
+ pEditSavingSettings->xVSS.pBGMtrack->pFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS, (M4OSA_Char
+ *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+ if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
+ (void *)pDecodedPath, length + 1);
+ }
+
+ /**/
+
+ M4OSA_chrNCopy(out_3gp, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
+ M4OSA_chrNCopy(out_3gp_tmp, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
+
+ /* Construct output temporary 3GP filename */
+ strncat((char *)out_3gp, (const char *)"savetemp.3gp\0", 13);
+ strncat((char *)out_3gp_tmp, (const char *)"savetemp.tmp\0", 13);
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = out_3gp;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ xVSS_context->pCurrentEditSettings->pOutputFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
+ (M4OSA_Char *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+ if( xVSS_context->pCurrentEditSettings->pOutputFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pCurrentEditSettings->pOutputFile,
+ (void *)pDecodedPath, length + 1);
+ xVSS_context->pCurrentEditSettings->uiOutputPathSize = length + 1;
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = out_3gp_tmp;
+ length = strlen(pDecodedPath);
+
+ if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void *)out_3gp_tmp, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer, &length);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+ err);
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return err;
+ }
+ pDecodedPath =
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the UTF conversion, use the converted file path*/
+ xVSS_context->pCurrentEditSettings->pTemporaryFile =
+ (M4OSA_Void *)M4OSA_32bitAlignedMalloc(length + 1, M4VS,
+ (M4OSA_Char *)"M4xVSS_SaveStart: Temporary file");
+
+ if( xVSS_context->pCurrentEditSettings->pTemporaryFile == M4OSA_NULL )
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)xVSS_context->pCurrentEditSettings->pTemporaryFile,
+ (void *)pDecodedPath, length + 1);
+
+ /* Put nb of step for progression monitoring to 2, because audio mixing is needed */
+ xVSS_context->nbStepTotal = 2;
+ }
+ else
+ {
+ xVSS_context->pCurrentEditSettings->pOutputFile =
+ xVSS_context->pOutputFile;
+ xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+
+ /* Put nb of step for progression monitoring to 1, because no audio mixing is needed */
+ xVSS_context->nbStepTotal = 1;
+ }
+
+ /**
+ ***/
+
+ err = M4xVSS_internalGenerateEditedFile(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_SaveStart: M4xVSS_internalGenerateEditedFile returned an error: 0x%x",
+ err);
+
+ /**/
+ if( xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL
+ && xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+ {
+ free(xVSS_context->pCurrentEditSettings->
+ pOutputFile);
+ xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+
+ if( xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL
+ && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+ {
+ free(xVSS_context->pCurrentEditSettings->
+ pTemporaryFile);
+ xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+ }
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ /* TODO: Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+
+ /* Reinitialize current step number for progression monitoring */
+ xVSS_context->currentStep = 0;
+
+ /* Change xVSS state */
+ xVSS_context->m_state = M4xVSS_kStateSaving;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext)
+ * @brief This function unallocate save ressources and change xVSS
+ * internal state.
+ * @note This function must be called once M4xVSS_Step has returned
+ * M4VSS3GPP_WAR_SAVING_DONE
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStop( M4OSA_Context pContext )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check state */
+ if( xVSS_context->m_state != M4xVSS_kStateSaving )
+ {
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_SaveStop function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /* Free saving structures */
+ M4xVSS_internalFreeSaving(xVSS_context);
+
+ if( xVSS_context->pOutputFile != M4OSA_NULL )
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+
+ /* Change xVSS state */
+ xVSS_context->m_state = M4xVSS_kStateSaved;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress)
+ * @brief This function executes differents tasks, depending of xVSS
+ * internal state.
+ * @note This function:
+ * - analyses editing structure if called after M4xVSS_SendCommand
+ * - generates preview file if called after M4xVSS_PreviewStart
+ * - generates final edited file if called after M4xVSS_SaveStart
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param pProgress (IN/OUT) Pointer on an integer giving a
+ * progress indication (between 0-100)
+ * @return M4NO_ERROR: No error, the user must call M4xVSS_Step again
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ * @return M4VSS3GPP_WAR_PREVIEW_READY: Preview file is generated
+ * @return M4VSS3GPP_WAR_SAVING_DONE: Final edited file is generated
+ * @return M4VSS3GPP_WAR_ANALYZING_DONE: Analyse is done
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_Step( M4OSA_Context pContext, M4OSA_UInt8 *pProgress )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+ M4VSS3GPP_AudioMixingContext pAudioMixingCtxt =
+ xVSS_context->pAudioMixContext;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt8 uiProgress = 0;
+
+ switch( xVSS_context->m_state )
+ {
+ case M4xVSS_kStateSaving:
+ //case M4xVSS_kStateGeneratingPreview:
+ {
+ if( xVSS_context->editingStep
+ == M4xVSS_kMicroStateEditing ) /* VSS -> creating effects, transitions ... */
+ {
+ /* RC: to delete unecessary temp files on the fly */
+ M4VSS3GPP_InternalEditContext *pVSSContext =
+ (M4VSS3GPP_InternalEditContext *)pVssCtxt;
+
+ err = M4VSS3GPP_editStep(pVssCtxt, &uiProgress);
+
+ if( ( err != M4NO_ERROR) && (err != M4VSS3GPP_WAR_EDITING_DONE)
+ && (err != M4VSS3GPP_WAR_SWITCH_CLIP) )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_Step: M4VSS3GPP_editStep returned 0x%x\n", err);
+ M4VSS3GPP_editCleanUp(pVssCtxt);
+ /* TODO ? : Translate error code of VSS to an xVSS error code ? */
+ xVSS_context->pCurrentEditContext = M4OSA_NULL;
+ return err;
+ }
+
+ /* RC: to delete unecessary temp files on the fly */
+ if( err == M4VSS3GPP_WAR_SWITCH_CLIP )
+ {
+#ifndef DO_NOT_REMOVE_TEMP_FILES
+ /* It means we can delete the temporary file */
+ /* First step, check the temp file is not use somewhere else after */
+
+ M4OSA_UInt32 i;
+ M4OSA_Int32 cmpResult = -1;
+
+ for ( i = pVSSContext->uiCurrentClip;
+ i < pVSSContext->uiClipNumber; i++ )
+ {
+ if( pVSSContext->pClipList[pVSSContext->uiCurrentClip
+ - 1].filePathSize
+ == pVSSContext->pClipList[i].filePathSize )
+ {
+ cmpResult = memcmp((void *)pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].pFile, (void *)pVSSContext->pClipList[i].pFile,
+ pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].filePathSize);
+
+ if( cmpResult == 0 )
+ {
+ /* It means we found a corresponding file, we do not delete
+ this temporary file */
+ break;
+ }
+ }
+ }
+
+ if( cmpResult != 0 )
+ {
+ M4OSA_UInt32 ConvertedSize = 0;
+ M4OSA_Char *toto;
+ M4OSA_Char *pTmpStr;
+
+ /* Convert result in UTF8 to check if we can delete it or not */
+ if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+ != M4OSA_NULL && xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer != M4OSA_NULL )
+ {
+ M4xVSS_internalConvertToUTF8(xVSS_context,
+ (M4OSA_Void *)pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].pFile, (M4OSA_Void *)xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer, &ConvertedSize);
+ toto = (M4OSA_Char *)strstr((const char *)xVSS_context->
+ UTFConversionContext.
+ pTempOutConversionBuffer,
+ (const char *)xVSS_context->pTempPath);
+ pTmpStr =
+ xVSS_context->UTFConversionContext.
+ pTempOutConversionBuffer;
+ }
+ else
+ {
+ toto = (M4OSA_Char *)strstr((const char *)pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].pFile, (const char *)xVSS_context->pTempPath);
+ pTmpStr = pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].pFile;
+ }
+
+ if( toto != M4OSA_NULL )
+ {
+ /* As temporary files can be imgXXX.3gp or vidXXX.3gp */
+ pTmpStr +=
+ (strlen((const char *)pTmpStr)
+ - 10); /* Because temporary files have a length at most of
+ 10 bytes */
+ toto = (M4OSA_Char *)strstr((const char *)pTmpStr,
+ (const char *)"img");
+
+ if( toto != M4OSA_NULL )
+ {
+ toto = (M4OSA_Char *)strstr((const char *)pTmpStr,
+ (const char *)"vid");
+ }
+
+ if( err
+ == M4NO_ERROR ) /* It means the file is a temporary file, we
+ can delete it */
+ {
+ remove((const char *)pVSSContext->
+ pClipList[pVSSContext->uiCurrentClip
+ - 1].pFile);
+ }
+ }
+ }
+
+#endif /* DO_NOT_REMOVE_TEMP_FILES*/
+ /* */
+
+ err = M4NO_ERROR;
+ }
+
+ if( err == M4VSS3GPP_WAR_EDITING_DONE )
+ {
+ xVSS_context->currentStep++;
+ /* P4ME00003276: When a step is complete, increment currentStep and reset
+ uiProgress unless progress would be wrong */
+ uiProgress = 0;
+ err = M4xVSS_internalCloseEditedFile(xVSS_context);
+ /* Fix for blrnxpsw#234---> */
+ if( err != M4NO_ERROR )
+ {
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+ M4OSA_TRACE1_1(
+ "M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+ err);
+ return err;
+ }
+ /*<---- Fix for blrnxpsw#234 */
+ if( xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack
+ != M4OSA_NULL )
+ {
+ xVSS_context->editingStep =
+ M4xVSS_kMicroStateAudioMixing;
+ /* Open Audio mixing component */
+ err = M4xVSS_internalGenerateAudioMixFile(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_internalGenerateAudioMixFile returned an error: 0x%x",
+ err);
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+ err = M4NO_ERROR;
+ goto end_step;
+ }
+ else
+ {
+
+ err = M4VSS3GPP_WAR_SAVING_DONE;
+ goto end_step;
+
+ }
+ }
+ }
+ else if( xVSS_context->editingStep
+ == M4xVSS_kMicroStateAudioMixing ) /* Audio mixing: mix/replace audio track
+ with given BGM */
+ {
+ err = M4VSS3GPP_audioMixingStep(pAudioMixingCtxt, &uiProgress);
+
+ if( ( err != M4NO_ERROR)
+ && (err != M4VSS3GPP_WAR_END_OF_AUDIO_MIXING) )
+ {
+ M4OSA_TRACE1_1(
+ "M4VSS3GPP_audioMixingMain: M4VSS3GPP_audioMixingStep returned 0x%x\n",
+ err);
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+
+ if( err == M4VSS3GPP_WAR_END_OF_AUDIO_MIXING )
+ {
+ xVSS_context->currentStep++;
+ /* P4ME00003276: When a step is complete, increment currentStep and reset
+ uiProgress unless progress would be wrong */
+ uiProgress = 0;
+ err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1(
+ "M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x",
+ err);
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+
+ err = M4VSS3GPP_WAR_SAVING_DONE;
+ goto end_step;
+
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0("Bad state in step function !");
+ return M4ERR_STATE;
+ }
+ }
+ break;
+
+ case M4xVSS_kStateAnalyzing:
+ {
+ if( xVSS_context->analyseStep
+ == M4xVSS_kMicroStateAnalysePto3GPP ) /* Pto3GPP, analysing input parameters */
+ {
+ if( xVSS_context->pPTo3GPPcurrentParams == M4OSA_NULL
+ && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+ {
+ xVSS_context->pPTo3GPPcurrentParams =
+ xVSS_context->
+ pPTo3GPPparamsList; /* Current Pto3GPP Parameter is the first element
+ of the list */
+ }
+ else if( xVSS_context->pPTo3GPPcurrentParams != M4OSA_NULL
+ && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+ {
+ xVSS_context->pPTo3GPPcurrentParams =
+ xVSS_context->pPTo3GPPcurrentParams->
+ pNext; /* Current Pto3GPP Parameter is the next element of the list */
+
+ if( xVSS_context->pPTo3GPPcurrentParams
+ == M4OSA_NULL ) /* It means there is no next image to convert */
+ {
+ /* We step to MCS phase */
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalyzeMCS;
+ err = M4NO_ERROR;
+ goto end_step;
+ }
+ }
+ else if( xVSS_context->pPTo3GPPparamsList == M4OSA_NULL )
+ {
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalyzeMCS; /* Change Analyzing micro state to
+ MCS phase */
+ err = M4NO_ERROR;
+ goto end_step;
+ }
+
+ /* Check if this file has to be converted or not */
+ /* If not, we just return M4NO_ERROR, and go to next file */
+ if( xVSS_context->pPTo3GPPcurrentParams->isCreated
+ == M4OSA_FALSE )
+ {
+ /* Opening Pto3GPP */
+ err = M4xVSS_internalStartConvertPictureTo3gp(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartConvertPictureTo3gp \
+ returned error: 0x%x",
+ err)
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateConvertPto3GPP;
+ }
+ }
+ else if( xVSS_context->analyseStep
+ == M4xVSS_kMicroStateConvertPto3GPP ) /* Pto3GPP, converting */
+ {
+ err = M4PTO3GPP_Step(xVSS_context->pM4PTO3GPP_Ctxt);
+ /* update progress bar */
+ if(xVSS_context->pCallBackCtxt->m_NbImage > 1)
+ {
+ uiProgress = (xVSS_context->pCallBackCtxt->m_ImageCounter * 100) / (xVSS_context->pCallBackCtxt->m_NbImage -1);
+ }
+
+ if( ( err != M4NO_ERROR) && (err
+ != ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING)) )
+ {
+ /* TO BE CHECKED NO LEAKS !!!!! */
+ M4OSA_TRACE1_1(
+ "M4xVSS_Step: M4PTO3GPP_Step returned 0x%x\n", err);
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+ else if( err
+ == ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING) )
+ {
+ xVSS_context->currentStep++;
+ /* P4ME00003276: When a step is complete, increment currentStep and reset
+ uiProgress unless progress would be wrong */
+ uiProgress = 0;
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalysePto3GPP; /* We go back to analyze parameters
+ to see if there is a next file to convert */
+ /* RC !!!!!!!! */
+ xVSS_context->pPTo3GPPcurrentParams->isCreated =
+ M4OSA_TRUE; /* To avoid reconverting it if another SendCommand is
+ called */
+ err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+ /*SS:blrnxpsw# 234 */
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_Step:\
+ M4xVSS_internalStopConvertPictureTo3gp returned 0x%x",
+ err);
+ /* TODO ? : Translate error code of VSS to an xVSS error code */
+ return err;
+ }
+ }
+ }
+ else if( xVSS_context->analyseStep
+ ==
+ M4xVSS_kMicroStateAnalyzeMCS ) /* MCS: analyzing input parameters */
+ {
+ if( xVSS_context->pMCScurrentParams == M4OSA_NULL \
+ && xVSS_context->pMCSparamsList != M4OSA_NULL )
+ {
+ xVSS_context->pMCScurrentParams = xVSS_context->
+ pMCSparamsList; /* Current MCS Parameter is the first
+ element of the list */
+ }
+ else if( xVSS_context->pMCScurrentParams != M4OSA_NULL \
+ && xVSS_context->pMCSparamsList != M4OSA_NULL )
+ {
+ xVSS_context->pMCScurrentParams =
+ xVSS_context->pMCScurrentParams->
+ pNext; /* Current MCS Parameter
+ is the next element of the list */
+
+ if( xVSS_context->pMCScurrentParams == M4OSA_NULL )
+ /* It means there is no next image to convert */
+ {
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+ xVSS_context->m_state =
+ M4xVSS_kStateOpened; /* Change xVSS state */
+ err = M4VSS3GPP_WAR_ANALYZING_DONE;
+ goto end_step; /* End of Analysis */
+ }
+ }
+ else if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+ {
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+ xVSS_context->m_state =
+ M4xVSS_kStateOpened; /* Change xVSS state */
+ err = M4VSS3GPP_WAR_ANALYZING_DONE;
+ goto end_step; /* End of Analysis */
+ }
+
+ /* Check if this file has to be transcoded or not */
+ /* If not, we just return M4NO_ERROR, and go to next file */
+ if( xVSS_context->pMCScurrentParams->isCreated == M4OSA_FALSE )
+ {
+ /* Opening MCS */
+ M4OSA_UInt32 rotationDegree = 0;
+ err = M4xVSS_internalStartTranscoding(xVSS_context, &rotationDegree);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
+ error: 0x%x", err);
+ return err;
+ }
+ int32_t index = xVSS_context->pMCScurrentParams->videoclipnumber;
+
+ /* The cuts are done in the MCS, so we need to replace
+ the beginCutTime and endCutTime to keep the entire video*/
+ xVSS_context->pSettings->pClipList[index]->uiBeginCutTime = 0;
+ xVSS_context->pSettings->pClipList[index]->uiEndCutTime = 0;
+
+
+ M4OSA_TRACE1_1("M4xVSS_Step: \
+ M4xVSS_internalStartTranscoding returned \
+ success; MCS context: 0x%x",
+ xVSS_context->pMCS_Ctxt);
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateTranscodeMCS;
+
+ // Retain rotation info of trimmed / transcoded file
+ xVSS_context->pSettings->pClipList[index]->\
+ ClipProperties.videoRotationDegrees = rotationDegree;
+ }
+ }
+ else if( xVSS_context->analyseStep
+ == M4xVSS_kMicroStateTranscodeMCS )
+ /* MCS: transcoding file */
+ {
+ err = M4MCS_step(xVSS_context->pMCS_Ctxt, &uiProgress);
+ /*SS:blrnxpsw# 234 */
+ if( err == ((M4OSA_UInt32)M4MCS_ERR_NOMORE_SPACE) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+
+ if( ( err != M4NO_ERROR)
+ && (err != M4MCS_WAR_TRANSCODING_DONE) )
+ {
+ /* TO BE CHECKED NO LEAKS !!!!! */
+ M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_step returned 0x%x\n",
+ err);
+ /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+ return err;
+ }
+ else if( err == M4MCS_WAR_TRANSCODING_DONE )
+ {
+ xVSS_context->currentStep++;
+ /* P4ME00003276: When a step is complete, increment currentStep and reset
+ uiProgress unless progress would be wrong */
+ uiProgress = 0;
+ xVSS_context->analyseStep =
+ M4xVSS_kMicroStateAnalyzeMCS; /* We go back to
+ analyze parameters to see if there is
+ a next file to transcode */
+ /* RC !!!!!!!!!*/
+ xVSS_context->pMCScurrentParams->isCreated =
+ M4OSA_TRUE; /* To avoid
+ reconverting it if another SendCommand is called */
+ err = M4xVSS_internalStopTranscoding(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ M4OSA_TRACE1_1("M4xVSS_Step:\
+ M4xVSS_internalStopTranscoding returned 0x%x", err);
+ /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+ return err;
+ }
+ }
+ }
+ else
+ {
+ M4OSA_TRACE1_0("Bad micro state in analyzing state")
+ return M4ERR_STATE;
+ }
+ }
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_Step function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+end_step:
+ /* Compute progression */
+ if( xVSS_context->nbStepTotal != 0 )
+ {
+ *pProgress = (M4OSA_UInt8)(( ( xVSS_context->currentStep * 100) \
+ / (xVSS_context->nbStepTotal))
+ + (uiProgress / (xVSS_context->nbStepTotal)));
+
+ if( *pProgress > 100 )
+ {
+ *pProgress = 100;
+ }
+ }
+ else
+ {
+ *pProgress = 100;
+ }
+
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext)
+ * @brief This function deletes current editing profile, unallocate
+ * ressources and change xVSS internal state.
+ * @note After this function, the user can call a new M4xVSS_SendCommand
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CloseCommand( M4OSA_Context pContext )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check state */
+ /* Depending of the state, differents things have to be done */
+ switch( xVSS_context->m_state )
+ {
+ case M4xVSS_kStateOpened:
+ /* Nothing to do here */
+ err = M4xVSS_internalFreeSaving(xVSS_context);
+ break;
+
+ case M4xVSS_kStateSaving:
+ {
+ if( xVSS_context->editingStep == M4xVSS_kMicroStateEditing )
+ {
+ err = M4xVSS_internalCloseEditedFile(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ /* Fix for blrnxpsw#234---->*/
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+ M4OSA_TRACE1_1("M4xVSS_CloseCommand:\
+ M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+ err);
+ /* we are retaining error here and returning error in the end of the
+ function as to aviod memory leak*/
+ //return err;
+ }
+ }
+ else if( xVSS_context->editingStep
+ == M4xVSS_kMicroStateAudioMixing )
+ {
+ err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+ if( err != M4NO_ERROR )
+ {
+ /* Fix for blrnxpsw#234---->*/
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+ M4OSA_TRACE1_1("M4xVSS_CloseCommand: \
+ M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x", err);
+ /* we are retaining error here and returning error in the end of
+ the function as to aviod memory leak*/
+ //return err;
+ /* <----Fix for blrnxpsw#234*/
+ }
+ }
+ err = M4xVSS_internalFreeSaving(xVSS_context);
+ /* We free this pointer only if a BGM track is present, because in that case,
+ this pointer owns to us */
+ if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL ) {
+ /*if(M4OSA_NULL != xVSS_context->pSettings->pOutputFile)
+ {
+ free(xVSS_context->pSettings->pOutputFile);
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ }*/
+ /*if(M4OSA_NULL != xVSS_context->pSettings->pTemporaryFile)
+ {
+ free(xVSS_context->pSettings->pTemporaryFile);
+ xVSS_context->pSettings->pTemporaryFile = M4OSA_NULL;
+ }*/
+ }
+ }
+ break;
+
+ case M4xVSS_kStateSaved:
+ break;
+
+ case M4xVSS_kStateAnalyzing:
+ {
+ if( xVSS_context->analyseStep == M4xVSS_kMicroStateConvertPto3GPP )
+ {
+ /* Free Pto3GPP module */
+ err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+ /* Fix for blrnxpsw#234---->*/
+ if( err != M4NO_ERROR )
+ {
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+ M4OSA_TRACE1_1("M4xVSS_Step: \
+ M4xVSS_internalStopConvertPictureTo3gp returned 0x%x", err);
+ /* we are retaining error here and returning error in the end of the
+ function as to aviod memory leak*/
+ //return err;
+ }
+ /* <-----Fix for blrnxpsw#234>*/
+ }
+ else if( xVSS_context->analyseStep
+ == M4xVSS_kMicroStateTranscodeMCS )
+ {
+ /* Free MCS module */
+ err = M4MCS_abort(xVSS_context->pMCS_Ctxt);
+ /* Fix for blrnxpsw#234---->*/
+ if( err != M4NO_ERROR )
+ {
+ if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+ {
+ err = M4xVSSERR_NO_MORE_SPACE;
+ }
+ M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_abort returned 0x%x",
+ err);
+ /* we are retaining error here and returning error in the end of the
+ function as to aviod memory leak*/
+ //return err;
+ }
+ /* <---Fix for blrnxpsw#234*/
+ }
+ }
+ break;
+
+ default:
+ M4OSA_TRACE1_1(
+ "Bad state when calling M4xVSS_CloseCommand function! State is %d",
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /* Free Send command */
+ M4xVSS_freeCommand(xVSS_context);
+
+ xVSS_context->m_state = M4xVSS_kStateInitialized; /* Change xVSS state */
+
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext)
+ * @brief This function deletes all xVSS ressources
+ * @note This function must be called after M4xVSS_CloseCommand.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CleanUp( M4OSA_Context pContext )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_TRACE3_0("M4xVSS_CleanUp:entering");
+
+ /* Check state */
+ if( xVSS_context->m_state != M4xVSS_kStateInitialized )
+ {
+ M4OSA_TRACE1_1(\
+ "Bad state when calling M4xVSS_CleanUp function! State is %d",\
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ /**
+ * UTF conversion: free temporary buffer*/
+ if( xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+ != M4OSA_NULL )
+ {
+ free(xVSS_context->
+ UTFConversionContext.pTempOutConversionBuffer);
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+ M4OSA_NULL;
+ }
+
+ free(xVSS_context->pTempPath);
+ xVSS_context->pTempPath = M4OSA_NULL;
+
+ free(xVSS_context->pSettings);
+ xVSS_context->pSettings = M4OSA_NULL;
+
+ free(xVSS_context);
+ xVSS_context = M4OSA_NULL;
+ M4OSA_TRACE3_0("M4xVSS_CleanUp:leaving ");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_GetVersion(M4_VersionInfo *pVersion)
+ * @brief This function get the version of the Video Studio 2.1
+ *
+ * @param pVersion (IN) Pointer on the version info struct
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_GetVersion( M4_VersionInfo *pVersion )
+{
+ /* Just used for a grep in code */
+ /* CHANGE_VERSION_HERE */
+ static const M4OSA_Char cVersion[26] = "NXPSW_VideoStudio21_1_3_0";
+
+ if( M4OSA_NULL == pVersion )
+ {
+ return M4ERR_PARAMETER;
+ }
+
+ pVersion->m_major = M4_xVSS_MAJOR;
+ pVersion->m_minor = M4_xVSS_MINOR;
+ pVersion->m_revision = M4_xVSS_REVISION;
+ pVersion->m_structSize = sizeof(M4_VersionInfo);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_CreateClipSettings()
+ * @brief Allows filling a clip settings structure with default values
+ *
+ * @note WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ * pClipSettings->pFile will be allocated in this function.
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pFile (IN) Clip file name
+ * @param filePathSize (IN) Size of the clip path (needed for the UTF16 conversion)
+ * @param nbEffects (IN) Nb of effect settings to allocate
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+ M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+ M4OSA_UInt8 nbEffects )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_TRACE3_1("M4xVSS_CreateClipSettings called with pClipSettings=0x%p",
+ pClipSettings);
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+ "M4xVSS_CreateClipSettings: pClipSettings is NULL");
+
+ /* Create inherited VSS3GPP stuff */
+ /*err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile,nbEffects);*/
+ /*FB: add clip path size (needed for UTF 16 conversion)*/
+ err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile, filePathSize,
+ nbEffects);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+ ERROR in M4VSS3GPP_editCreateClipSettings = 0x%x", err);
+ return err;
+ }
+
+ /* Set the clip settings to default */
+ pClipSettings->xVSS.uiBeginCutPercent = 0;
+ pClipSettings->xVSS.uiEndCutPercent = 0;
+ pClipSettings->xVSS.uiDuration = 0;
+ pClipSettings->xVSS.isPanZoom = M4OSA_FALSE;
+ pClipSettings->xVSS.PanZoomTopleftXa = 0;
+ pClipSettings->xVSS.PanZoomTopleftYa = 0;
+ pClipSettings->xVSS.PanZoomTopleftXb = 0;
+ pClipSettings->xVSS.PanZoomTopleftYb = 0;
+ pClipSettings->xVSS.PanZoomXa = 0;
+ pClipSettings->xVSS.PanZoomXb = 0;
+
+ /**
+ * Return with no error */
+ M4OSA_TRACE3_0("M4xVSS_CreateClipSettings(): returning M4NO_ERROR");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_DuplicateClipSettings()
+ * @brief Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param pClipSettingsDest (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param pClipSettingsOrig (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param bCopyEffects (IN) Flag to know if we have to duplicate effects
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_DuplicateClipSettings( M4VSS3GPP_ClipSettings
+ *pClipSettingsDest,
+ M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+ M4OSA_Bool bCopyEffects )
+{
+ M4OSA_ERR err = M4NO_ERROR;
+
+ M4OSA_TRACE3_2(
+ "M4xVSS_DuplicateClipSettings called with dest=0x%p src=0x%p",
+ pClipSettingsDest, pClipSettingsOrig);
+
+ /* Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+ "M4xVSS_DuplicateClipSettings: pClipSettingsDest is NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+ "M4xVSS_DuplicateClipSettings: pClipSettingsOrig is NULL");
+
+ /* Call inherited VSS3GPP duplication */
+ err = M4VSS3GPP_editDuplicateClipSettings(pClipSettingsDest,
+ pClipSettingsOrig, bCopyEffects);
+
+ if( M4NO_ERROR != err )
+ {
+ M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+ ERROR in M4VSS3GPP_editDuplicateClipSettings = 0x%x", err);
+ return err;
+ }
+
+ /* Return with no error */
+ M4OSA_TRACE3_0("M4xVSS_DuplicateClipSettings(): returning M4NO_ERROR");
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_FreeClipSettings()
+ * @brief Free the pointers allocated in the ClipSetting structure (pFile, Effects, ...).
+ *
+ * @param pClipSettings (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FreeClipSettings( M4VSS3GPP_ClipSettings *pClipSettings )
+{
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+ "M4xVSS_FreeClipSettings: pClipSettings is NULL");
+
+ /* Free inherited VSS3GPP stuff */
+ M4VSS3GPP_editFreeClipSettings(pClipSettings);
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
+ * @brief This function returns the MCS context within the xVSS internal context
+ * @note This function must be called only after VSS state has moved to analyzing state or
+ * beyond
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param mcsContext (OUT) Pointer to pointer of mcs context to return
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getMCSContext( M4OSA_Context pContext,
+ M4OSA_Context *mcsContext )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4xVSS_getMCSContext: pContext is NULL");
+
+ if( xVSS_context->m_state == M4xVSS_kStateInitialized )
+ {
+ M4OSA_TRACE1_1("M4xVSS_getMCSContext: Bad state! State is %d",\
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ *mcsContext = xVSS_context->pMCS_Ctxt;
+
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
+ * M4OSA_Context* mcsContext)
+ * @brief This function returns the VSS3GPP context within the xVSS internal context
+ * @note This function must be called only after VSS state has moved to Generating preview
+ * or beyond
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @param vss3gppContext (OUT) Pointer to pointer of vss3gpp context to return
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_STATE: This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getVSS3GPPContext( M4OSA_Context pContext,
+ M4OSA_Context *vss3gppContext )
+{
+ M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /**
+ * Check input parameter */
+ M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+ "M4xVSS_getVSS3GPPContext: pContext is NULL");
+
+ if( xVSS_context->m_state < M4xVSS_kStateSaving )
+ {
+ M4OSA_TRACE1_1("M4xVSS_getVSS3GPPContext: Bad state! State is %d",\
+ xVSS_context->m_state);
+ return M4ERR_STATE;
+ }
+
+ *vss3gppContext = xVSS_context->pCurrentEditContext;
+
+ return err;
+}
+
+M4OSA_ERR M4xVSS_getVideoDecoderCapabilities(M4DECODER_VideoDecoders **decoders) {
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Call the decoder api directly
+ // to get all the video decoder capablities.
+ err = VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(decoders);
+ return err;
+}
diff --git a/libvideoeditor/vss/src/M4xVSS_internal.c b/libvideoeditor/vss/src/M4xVSS_internal.c
new file mode 100755
index 0000000..5844115
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_internal.c
@@ -0,0 +1,4889 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4xVSS_internal.c
+ * @brief Internal functions of extended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/*for rgb16 color effect*/
+#include "M4VIFI_Defines.h"
+#include "M4VIFI_Clip.h"
+
+/**
+ * component includes */
+#include "M4VFL_transition.h" /**< video effects */
+
+/* Internal header file of VSS is included because of MMS use case */
+#include "M4VSS3GPP_InternalTypes.h"
+
+/*Exif header files to add image rendering support (cropping, black borders)*/
+#include "M4EXIFC_CommonAPI.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+
+#define TRANSPARENT_COLOR 0x7E0
+
+/* Prototype of M4VIFI_xVSS_RGB565toYUV420 function (avoid green effect of transparency color) */
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut);
+
+
+/*special MCS function used only in VideoArtist and VideoStudio to open the media in the normal
+ mode. That way the media duration is accurate*/
+extern M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ M4VIDEOEDITING_FileType InputFileType,
+ M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
+ * @brief This function initializes MCS (3GP transcoder) with the given
+ * parameters
+ * @note The transcoding parameters are given by the internal xVSS context.
+ * This context contains a pointer on the current element of the
+ * chained list of MCS parameters.
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext,
+ M4OSA_UInt32 *rotationDegree)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+ M4MCS_Context mcs_context;
+ M4MCS_OutputParams Params;
+ M4MCS_EncodingParams Rates;
+ M4OSA_UInt32 i;
+ M4VIDEOEDITING_ClipProperties clipProps;
+
+ err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_init: 0x%x", err);
+ return err;
+ }
+
+ err = M4MCS_open(mcs_context, xVSS_context->pMCScurrentParams->pFileIn,
+ xVSS_context->pMCScurrentParams->InputFileType,
+ xVSS_context->pMCScurrentParams->pFileOut,
+ xVSS_context->pMCScurrentParams->pFileTemp);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_open: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ /** Get the clip properties
+ */
+ err = M4MCS_getInputFileProperties(mcs_context, &clipProps);
+ if (err != M4NO_ERROR) {
+ M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+ *rotationDegree = clipProps.videoRotationDegrees;
+
+ /**
+ * Fill MCS parameters with the parameters contained in the current element of the
+ MCS parameters chained list */
+ Params.OutputFileType = xVSS_context->pMCScurrentParams->OutputFileType;
+ Params.OutputVideoFormat = xVSS_context->pMCScurrentParams->OutputVideoFormat;
+ Params.outputVideoProfile= xVSS_context->pMCScurrentParams->outputVideoProfile;
+ Params.outputVideoLevel = xVSS_context->pMCScurrentParams->outputVideoLevel;
+ Params.OutputVideoFrameSize = xVSS_context->pMCScurrentParams->OutputVideoFrameSize;
+ Params.OutputVideoFrameRate = xVSS_context->pMCScurrentParams->OutputVideoFrameRate;
+ Params.OutputAudioFormat = xVSS_context->pMCScurrentParams->OutputAudioFormat;
+ Params.OutputAudioSamplingFrequency =
+ xVSS_context->pMCScurrentParams->OutputAudioSamplingFrequency;
+ Params.bAudioMono = xVSS_context->pMCScurrentParams->bAudioMono;
+ Params.pOutputPCMfile = M4OSA_NULL;
+ /*FB 2008/10/20: add media rendering parameter to keep aspect ratio*/
+ switch(xVSS_context->pMCScurrentParams->MediaRendering)
+ {
+ case M4xVSS_kResizing:
+ Params.MediaRendering = M4MCS_kResizing;
+ break;
+ case M4xVSS_kCropping:
+ Params.MediaRendering = M4MCS_kCropping;
+ break;
+ case M4xVSS_kBlackBorders:
+ Params.MediaRendering = M4MCS_kBlackBorders;
+ break;
+ default:
+ break;
+ }
+ /**/
+ // new params after integrating MCS 2.0
+ // Set the number of audio effects; 0 for now.
+ Params.nbEffects = 0;
+
+ // Set the audio effect; null for now.
+ Params.pEffects = NULL;
+
+ // Set the audio effect; null for now.
+ Params.bDiscardExif = M4OSA_FALSE;
+
+ // Set the audio effect; null for now.
+ Params.bAdjustOrientation = M4OSA_FALSE;
+ // new params after integrating MCS 2.0
+
+ /**
+ * Set output parameters */
+ err = M4MCS_setOutputParams(mcs_context, &Params);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_setOutputParams: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ Rates.OutputVideoBitrate = xVSS_context->pMCScurrentParams->OutputVideoBitrate;
+ Rates.OutputAudioBitrate = xVSS_context->pMCScurrentParams->OutputAudioBitrate;
+ Rates.BeginCutTime = 0;
+ Rates.EndCutTime = 0;
+ Rates.OutputFileSize = 0;
+
+ /*FB: transcoding per parts*/
+ Rates.BeginCutTime = xVSS_context->pMCScurrentParams->BeginCutTime;
+ Rates.EndCutTime = xVSS_context->pMCScurrentParams->EndCutTime;
+ Rates.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
+
+ err = M4MCS_setEncodingParams(mcs_context, &Rates);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_setEncodingParams: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ err = M4MCS_checkParamsAndStart(mcs_context);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_checkParamsAndStart: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ /**
+ * Save MCS context to be able to call MCS step function in M4xVSS_step function */
+ xVSS_context->pMCS_Ctxt = mcs_context;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+ * @brief This function cleans up MCS (3GP transcoder)
+ * @note
+ *
+ * @param pContext (IN) Pointer on the xVSS edit context
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL
+ * @return M4ERR_ALLOC: Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+
+ err = M4MCS_close(xVSS_context->pMCS_Ctxt);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_close: 0x%x", err);
+ M4MCS_abort(xVSS_context->pMCS_Ctxt);
+ return err;
+ }
+
+ /**
+ * Free this MCS instance */
+ err = M4MCS_cleanUp(xVSS_context->pMCS_Ctxt);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_cleanUp: 0x%x", err);
+ return err;
+ }
+
+ xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ * M4OSA_FileReadPointer* pFileReadPtr,
+ * M4VIFI_ImagePlane* pImagePlanes,
+ * M4OSA_UInt32 width,
+ * M4OSA_UInt32 height);
+ * @brief It Coverts and resizes a ARGB8888 image to YUV420
+ * @note
+ * @param pFileIn (IN) The Image input file
+ * @param pFileReadPtr (IN) Pointer on filesystem functions
+ * @param pImagePlanes (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ * ARGB8888 image will be converted and resized to output
+ * YUV420 plane size
+ *@param width (IN) width of the ARGB8888
+ *@param height (IN) height of the ARGB8888
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_ALLOC: memory error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+ M4OSA_Context pARGBIn;
+ M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
+ M4OSA_UInt32 frameSize_argb=(width * height * 4);
+ M4OSA_UInt32 frameSize = (width * height * 3); //Size of RGB888 data.
+ M4OSA_UInt32 i = 0,j= 0;
+ M4OSA_ERR err=M4NO_ERROR;
+
+
+ M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb,
+ M4VS, (M4OSA_Char*)"Image argb data");
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Entering :");
+ if(pTmpData == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+ Failed to allocate memory for Image clip");
+ return M4ERR_ALLOC;
+ }
+
+ M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :width and height %d %d",
+ width ,height);
+ /* Get file size (mandatory for chunk decoding) */
+ err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+ Can't open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888\
+ file %s, error: 0x%x\n",pFileIn, err);
+ pFileReadPtr->closeRead(pARGBIn);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ err = pFileReadPtr->closeRead(pARGBIn);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888 \
+ file %s, error: 0x%x\n",pFileIn, err);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ goto cleanup;
+ }
+
+ rgbPlane1.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
+ (M4OSA_Char*)"Image clip RGB888 data");
+ if(rgbPlane1.pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 \
+ Failed to allocate memory for Image clip");
+ free(pTmpData);
+ return M4ERR_ALLOC;
+ }
+
+ rgbPlane1.u_height = height;
+ rgbPlane1.u_width = width;
+ rgbPlane1.u_stride = width*3;
+ rgbPlane1.u_topleft = 0;
+
+
+ /** Remove the alpha channel */
+ for (i=0, j = 0; i < frameSize_argb; i++) {
+ if ((i % 4) == 0) continue;
+ rgbPlane1.pac_data[j] = pTmpData[i];
+ j++;
+ }
+ free(pTmpData);
+
+ /* To Check if resizing is required with color conversion */
+ if(width != pImagePlanes->u_width || height != pImagePlanes->u_height)
+ {
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Resizing :");
+ frameSize = ( pImagePlanes->u_width * pImagePlanes->u_height * 3);
+ rgbPlane2.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize, M4VS,
+ (M4OSA_Char*)"Image clip RGB888 data");
+ if(rgbPlane2.pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+ free(pTmpData);
+ return M4ERR_ALLOC;
+ }
+ rgbPlane2.u_height = pImagePlanes->u_height;
+ rgbPlane2.u_width = pImagePlanes->u_width;
+ rgbPlane2.u_stride = pImagePlanes->u_width*3;
+ rgbPlane2.u_topleft = 0;
+
+ /* Resizing RGB888 to RGB888 */
+ err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane1, &rgbPlane2);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("error when converting from Resize RGB888 to RGB888: 0x%x\n", err);
+ free(rgbPlane2.pac_data);
+ free(rgbPlane1.pac_data);
+ return err;
+ }
+ /*Converting Resized RGB888 to YUV420 */
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("error when converting from RGB888 to YUV: 0x%x\n", err);
+ free(rgbPlane2.pac_data);
+ free(rgbPlane1.pac_data);
+ return err;
+ }
+ free(rgbPlane2.pac_data);
+ free(rgbPlane1.pac_data);
+
+ M4OSA_TRACE1_0("RGB to YUV done");
+
+
+ }
+ else
+ {
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 NO Resizing :");
+ err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
+ }
+ free(rgbPlane1.pac_data);
+
+ M4OSA_TRACE1_0("RGB to YUV done");
+ }
+cleanup:
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 leaving :");
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+ * M4OSA_FileReadPointer* pFileReadPtr,
+ * M4VIFI_ImagePlane* pImagePlanes,
+ * M4OSA_UInt32 width,
+ * M4OSA_UInt32 height);
+ * @brief It Coverts a ARGB8888 image to YUV420
+ * @note
+ * @param pFileIn (IN) The Image input file
+ * @param pFileReadPtr (IN) Pointer on filesystem functions
+ * @param pImagePlanes (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ * ARGB8888 image will be converted and resized to output
+ * YUV420 plane size
+ * @param width (IN) width of the ARGB8888
+ * @param height (IN) height of the ARGB8888
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_ALLOC: memory error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+ M4OSA_FileReadPointer* pFileReadPtr,
+ M4VIFI_ImagePlane** pImagePlanes,
+ M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4VIFI_ImagePlane *yuvPlane = M4OSA_NULL;
+
+ yuvPlane = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
+ M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+ if(yuvPlane == M4OSA_NULL) {
+ M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+ Failed to allocate memory for Image clip");
+ return M4ERR_ALLOC;
+ }
+ yuvPlane[0].u_height = height;
+ yuvPlane[0].u_width = width;
+ yuvPlane[0].u_stride = width;
+ yuvPlane[0].u_topleft = 0;
+ yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(yuvPlane[0].u_height \
+ * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
+
+ yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
+ yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
+ yuvPlane[1].u_stride = yuvPlane[1].u_width;
+ yuvPlane[1].u_topleft = 0;
+ yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height \
+ * yuvPlane[0].u_width);
+
+ yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
+ yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
+ yuvPlane[2].u_stride = yuvPlane[2].u_width;
+ yuvPlane[2].u_topleft = 0;
+ yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height \
+ * yuvPlane[1].u_width);
+ err = M4xVSS_internalConvertAndResizeARGB8888toYUV420( pFileIn,pFileReadPtr,
+ yuvPlane, width, height);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertAndResizeARGB8888toYUV420 return error: 0x%x\n", err);
+ free(yuvPlane);
+ return err;
+ }
+
+ *pImagePlanes = yuvPlane;
+
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB8888toYUV420 :Leaving");
+ return err;
+
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_PictureCallbackFct (M4OSA_Void* pPictureCtxt,
+ * M4VIFI_ImagePlane* pImagePlanes,
+ * M4OSA_UInt32* pPictureDuration);
+ * @brief It feeds the PTO3GPP with YUV420 pictures.
+ * @note This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
+ * @param pContext (IN) The integrator own context
+ * @param pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
+ * @param pPictureDuration(OUT) Duration of the returned picture
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_PictureCallbackFct(M4OSA_Void* pPictureCtxt, M4VIFI_ImagePlane* pImagePlanes,
+ M4OSA_Double* pPictureDuration)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt8 last_frame_flag = 0;
+ M4xVSS_PictureCallbackCtxt* pC = (M4xVSS_PictureCallbackCtxt*) (pPictureCtxt);
+
+ /*Used for pan&zoom*/
+ M4OSA_UInt8 tempPanzoomXa = 0;
+ M4OSA_UInt8 tempPanzoomXb = 0;
+ M4AIR_Params Params;
+ /**/
+
+ /*Used for cropping and black borders*/
+ M4OSA_Context pPictureContext = M4OSA_NULL;
+ M4OSA_FilePosition pictureSize = 0 ;
+ M4OSA_UInt8* pictureBuffer = M4OSA_NULL;
+ //M4EXIFC_Context pExifContext = M4OSA_NULL;
+ M4EXIFC_BasicTags pBasicTags;
+ M4VIFI_ImagePlane pImagePlanes1 = pImagePlanes[0];
+ M4VIFI_ImagePlane pImagePlanes2 = pImagePlanes[1];
+ M4VIFI_ImagePlane pImagePlanes3 = pImagePlanes[2];
+ /**/
+
+ /**
+ * Check input parameters */
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureCtxt), M4ERR_PARAMETER,
+ "M4xVSS_PictureCallbackFct: pPictureCtxt is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pImagePlanes), M4ERR_PARAMETER,
+ "M4xVSS_PictureCallbackFct: pImagePlanes is M4OSA_NULL");
+ M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureDuration), M4ERR_PARAMETER,
+ "M4xVSS_PictureCallbackFct: pPictureDuration is M4OSA_NULL");
+ M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct :Entering");
+ /*PR P4ME00003181 In case the image number is 0, pan&zoom can not be used*/
+ if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom && pC->m_NbImage == 0)
+ {
+ pC->m_pPto3GPPparams->isPanZoom = M4OSA_FALSE;
+ }
+
+ /*If no cropping/black borders or pan&zoom, just decode and resize the picture*/
+ if(pC->m_mediaRendering == M4xVSS_kResizing && M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+ {
+ /**
+ * Convert and resize input ARGB8888 file to YUV420 */
+ /*To support ARGB8888 : */
+ M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 1: width and heght %d %d",
+ pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+ err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(pC->m_FileIn,
+ pC->m_pFileReadPtr, pImagePlanes,pC->m_pPto3GPPparams->width,
+ pC->m_pPto3GPPparams->height);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+ return err;
+ }
+ }
+ /*In case of cropping, black borders or pan&zoom, call the EXIF reader and the AIR*/
+ else
+ {
+ /**
+ * Computes ratios */
+ if(pC->m_pDecodedPlane == M4OSA_NULL)
+ {
+ /**
+ * Convert input ARGB8888 file to YUV420 */
+ M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 2: width and heght %d %d",
+ pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+ err = M4xVSS_internalConvertARGB8888toYUV420(pC->m_FileIn, pC->m_pFileReadPtr,
+ &(pC->m_pDecodedPlane),pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+ if(pC->m_pDecodedPlane != M4OSA_NULL)
+ {
+ /* YUV420 planar is returned but allocation is made only once
+ (contigous planes in memory) */
+ if(pC->m_pDecodedPlane->pac_data != M4OSA_NULL)
+ {
+ free(pC->m_pDecodedPlane->pac_data);
+ }
+ free(pC->m_pDecodedPlane);
+ pC->m_pDecodedPlane = M4OSA_NULL;
+ }
+ return err;
+ }
+ }
+
+ /*Initialize AIR Params*/
+ Params.m_inputCoord.m_x = 0;
+ Params.m_inputCoord.m_y = 0;
+ Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+ Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+ Params.m_outputSize.m_width = pImagePlanes->u_width;
+ Params.m_outputSize.m_height = pImagePlanes->u_height;
+ Params.m_bOutputStripe = M4OSA_FALSE;
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+ /*Initialize Exif params structure*/
+ pBasicTags.orientation = M4COMMON_kOrientationUnknown;
+
+ /**
+ Pan&zoom params*/
+ if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom)
+ {
+ /*Save ratio values, they can be reused if the new ratios are 0*/
+ tempPanzoomXa = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXa;
+ tempPanzoomXb = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXb;
+ /*Check that the ratio is not 0*/
+ /*Check (a) parameters*/
+ if(pC->m_pPto3GPPparams->PanZoomXa == 0)
+ {
+ M4OSA_UInt8 maxRatio = 0;
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXa >=
+ pC->m_pPto3GPPparams->PanZoomTopleftYa)
+ {
+ /*The ratio is 0, that means the area of the picture defined with (a)
+ parameters is bigger than the image size*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa > 1000)
+ {
+ /*The oversize is maxRatio*/
+ maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa - 1000;
+ }
+ }
+ else
+ {
+ /*The ratio is 0, that means the area of the picture defined with (a)
+ parameters is bigger than the image size*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa > 1000)
+ {
+ /*The oversize is maxRatio*/
+ maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa - 1000;
+ }
+ }
+ /*Modify the (a) parameters:*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXa >= maxRatio)
+ {
+ /*The (a) topleft parameters can be moved to keep the same area size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftXa -= maxRatio;
+ }
+ else
+ {
+ /*Move the (a) topleft parameter to 0 but the ratio will be also further
+ modified to match the image size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftXa = 0;
+ }
+ if(pC->m_pPto3GPPparams->PanZoomTopleftYa >= maxRatio)
+ {
+ /*The (a) topleft parameters can be moved to keep the same area size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftYa -= maxRatio;
+ }
+ else
+ {
+ /*Move the (a) topleft parameter to 0 but the ratio will be also further
+ modified to match the image size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftYa = 0;
+ }
+ /*The new ratio is the original one*/
+ pC->m_pPto3GPPparams->PanZoomXa = tempPanzoomXa;
+ if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 1000)
+ {
+ /*Change the ratio if the area of the picture defined with (a) parameters is
+ bigger than the image size*/
+ pC->m_pPto3GPPparams->PanZoomXa = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
+ }
+ if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 1000)
+ {
+ /*Change the ratio if the area of the picture defined with (a) parameters is
+ bigger than the image size*/
+ pC->m_pPto3GPPparams->PanZoomXa = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
+ }
+ }
+ /*Check (b) parameters*/
+ if(pC->m_pPto3GPPparams->PanZoomXb == 0)
+ {
+ M4OSA_UInt8 maxRatio = 0;
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXb >=
+ pC->m_pPto3GPPparams->PanZoomTopleftYb)
+ {
+ /*The ratio is 0, that means the area of the picture defined with (b)
+ parameters is bigger than the image size*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb > 1000)
+ {
+ /*The oversize is maxRatio*/
+ maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb - 1000;
+ }
+ }
+ else
+ {
+ /*The ratio is 0, that means the area of the picture defined with (b)
+ parameters is bigger than the image size*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb > 1000)
+ {
+ /*The oversize is maxRatio*/
+ maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb - 1000;
+ }
+ }
+ /*Modify the (b) parameters:*/
+ if(pC->m_pPto3GPPparams->PanZoomTopleftXb >= maxRatio)
+ {
+ /*The (b) topleft parameters can be moved to keep the same area size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftXb -= maxRatio;
+ }
+ else
+ {
+ /*Move the (b) topleft parameter to 0 but the ratio will be also further
+ modified to match the image size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftXb = 0;
+ }
+ if(pC->m_pPto3GPPparams->PanZoomTopleftYb >= maxRatio)
+ {
+ /*The (b) topleft parameters can be moved to keep the same area size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftYb -= maxRatio;
+ }
+ else
+ {
+ /*Move the (b) topleft parameter to 0 but the ratio will be also further
+ modified to match the image size*/
+ pC->m_pPto3GPPparams->PanZoomTopleftYb = 0;
+ }
+ /*The new ratio is the original one*/
+ pC->m_pPto3GPPparams->PanZoomXb = tempPanzoomXb;
+ if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 1000)
+ {
+ /*Change the ratio if the area of the picture defined with (b) parameters is
+ bigger than the image size*/
+ pC->m_pPto3GPPparams->PanZoomXb = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
+ }
+ if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 1000)
+ {
+ /*Change the ratio if the area of the picture defined with (b) parameters is
+ bigger than the image size*/
+ pC->m_pPto3GPPparams->PanZoomXb = 1000 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
+ }
+ }
+
+ /**
+ * Computes AIR parameters */
+/* Params.m_inputCoord.m_x = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+ (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+ (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftXb \
+ - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+ Params.m_inputCoord.m_y = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+ (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+ (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftYb\
+ - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+ Params.m_inputSize.m_width = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+ (pC->m_pPto3GPPparams->PanZoomXa +
+ (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+ Params.m_inputSize.m_height = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+ (pC->m_pPto3GPPparams->PanZoomXa +
+ (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+ */
+ // Instead of using pC->m_NbImage we have to use (pC->m_NbImage-1) as pC->m_ImageCounter
+ // will be x-1 max for x no. of frames
+ Params.m_inputCoord.m_x = (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+ (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+ (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftXb\
+ - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
+ Params.m_inputCoord.m_y =
+ (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+ (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+ (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftYb\
+ - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
+
+ Params.m_inputSize.m_width =
+ (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+ (pC->m_pPto3GPPparams->PanZoomXa +
+ (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb\
+ - pC->m_pPto3GPPparams->PanZoomXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
+
+ Params.m_inputSize.m_height =
+ (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+ (pC->m_pPto3GPPparams->PanZoomXa +
+ (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb \
+ - pC->m_pPto3GPPparams->PanZoomXa) *
+ pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage-1)) / 1000));
+
+ if((Params.m_inputSize.m_width + Params.m_inputCoord.m_x)\
+ > pC->m_pDecodedPlane->u_width)
+ {
+ Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width \
+ - Params.m_inputCoord.m_x;
+ }
+
+ if((Params.m_inputSize.m_height + Params.m_inputCoord.m_y)\
+ > pC->m_pDecodedPlane->u_height)
+ {
+ Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height\
+ - Params.m_inputCoord.m_y;
+ }
+
+
+
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ }
+
+
+
+ /**
+ Picture rendering: Black borders*/
+
+ if(pC->m_mediaRendering == M4xVSS_kBlackBorders)
+ {
+ memset((void *)pImagePlanes[0].pac_data,Y_PLANE_BORDER_VALUE,
+ (pImagePlanes[0].u_height*pImagePlanes[0].u_stride));
+ memset((void *)pImagePlanes[1].pac_data,U_PLANE_BORDER_VALUE,
+ (pImagePlanes[1].u_height*pImagePlanes[1].u_stride));
+ memset((void *)pImagePlanes[2].pac_data,V_PLANE_BORDER_VALUE,
+ (pImagePlanes[2].u_height*pImagePlanes[2].u_stride));
+
+ /**
+ First without pan&zoom*/
+ if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+ {
+ switch(pBasicTags.orientation)
+ {
+ default:
+ case M4COMMON_kOrientationUnknown:
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+ case M4COMMON_kOrientationTopLeft:
+ case M4COMMON_kOrientationTopRight:
+ case M4COMMON_kOrientationBottomRight:
+ case M4COMMON_kOrientationBottomLeft:
+ if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+ /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+ //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+ {
+ /*it is height so black borders will be on the top and on the bottom side*/
+ Params.m_outputSize.m_width = pImagePlanes->u_width;
+ Params.m_outputSize.m_height =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height \
+ * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+ /*number of lines at the top*/
+ pImagePlanes[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+ -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+ pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+ pImagePlanes[1].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[1].u_stride;
+ pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+ pImagePlanes[2].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[2].u_stride;
+ pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+ }
+ else
+ {
+ /*it is width so black borders will be on the left and right side*/
+ Params.m_outputSize.m_height = pImagePlanes->u_height;
+ Params.m_outputSize.m_width =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+ * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+
+ pImagePlanes[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+ -Params.m_outputSize.m_width)>>1));
+ pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+ pImagePlanes[1].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+ -(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+ pImagePlanes[2].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+ -(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+ }
+ break;
+ case M4COMMON_kOrientationLeftTop:
+ case M4COMMON_kOrientationLeftBottom:
+ case M4COMMON_kOrientationRightTop:
+ case M4COMMON_kOrientationRightBottom:
+ if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+ /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+ //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+ {
+ /*it is height so black borders will be on the top and on
+ the bottom side*/
+ Params.m_outputSize.m_height = pImagePlanes->u_width;
+ Params.m_outputSize.m_width =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+ * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_height);
+ /*number of lines at the top*/
+ pImagePlanes[0].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+ -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+ pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+ pImagePlanes[1].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+ -(Params.m_outputSize.m_width>>1)))>>1)\
+ *pImagePlanes[1].u_stride)+1;
+ pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+ pImagePlanes[2].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+ -(Params.m_outputSize.m_width>>1)))>>1)\
+ *pImagePlanes[2].u_stride)+1;
+ pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+ }
+ else
+ {
+ /*it is width so black borders will be on the left and right side*/
+ Params.m_outputSize.m_width = pImagePlanes->u_height;
+ Params.m_outputSize.m_height =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+ * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_width);
+
+ pImagePlanes[0].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+ -Params.m_outputSize.m_height))>>1))+1;
+ pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+ pImagePlanes[1].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+ -(Params.m_outputSize.m_height>>1)))>>1))+1;
+ pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+ pImagePlanes[2].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+ -(Params.m_outputSize.m_height>>1)))>>1))+1;
+ pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+ }
+ break;
+ }
+ }
+
+ /**
+ Secondly with pan&zoom*/
+ else
+ {
+ switch(pBasicTags.orientation)
+ {
+ default:
+ case M4COMMON_kOrientationUnknown:
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+ case M4COMMON_kOrientationTopLeft:
+ case M4COMMON_kOrientationTopRight:
+ case M4COMMON_kOrientationBottomRight:
+ case M4COMMON_kOrientationBottomLeft:
+ /*NO ROTATION*/
+ if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+ /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+ //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+ {
+ /*Black borders will be on the top and bottom of the output video*/
+ /*Maximum output height if the input image aspect ratio is kept and if
+ the output width is the screen width*/
+ M4OSA_UInt32 tempOutputSizeHeight =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+ * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+ M4OSA_UInt32 tempInputSizeHeightMax = 0;
+ M4OSA_UInt32 tempFinalInputHeight = 0;
+ /*The output width is the screen width*/
+ Params.m_outputSize.m_width = pImagePlanes->u_width;
+ tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+ /*Maximum input height according to the maximum output height
+ (proportional to the maximum output height)*/
+ tempInputSizeHeightMax = (pImagePlanes->u_height\
+ *Params.m_inputSize.m_height)/tempOutputSizeHeight;
+ tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+ /*Check if the maximum possible input height is contained into the
+ input image height*/
+ if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_height)
+ {
+ /*The maximum possible input height is contained in the input
+ image height,
+ that means no black borders, the input pan zoom area will be extended
+ so that the input AIR height will be the maximum possible*/
+ if(((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+ <= Params.m_inputCoord.m_y
+ && ((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+ <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y\
+ + Params.m_inputSize.m_height))
+ {
+ /*The input pan zoom area can be extended symmetrically on the
+ top and bottom side*/
+ Params.m_inputCoord.m_y -= ((tempInputSizeHeightMax \
+ - Params.m_inputSize.m_height)>>1);
+ }
+ else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+ -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+ {
+ /*There is not enough place above the input pan zoom area to
+ extend it symmetrically,
+ so extend it to the maximum on the top*/
+ Params.m_inputCoord.m_y = 0;
+ }
+ else
+ {
+ /*There is not enough place below the input pan zoom area to
+ extend it symmetrically,
+ so extend it to the maximum on the bottom*/
+ Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height \
+ - tempInputSizeHeightMax;
+ }
+ /*The input height of the AIR is the maximum possible height*/
+ Params.m_inputSize.m_height = tempInputSizeHeightMax;
+ }
+ else
+ {
+ /*The maximum possible input height is greater than the input
+ image height,
+ that means black borders are necessary to keep aspect ratio
+ The input height of the AIR is all the input image height*/
+ Params.m_outputSize.m_height =
+ (tempOutputSizeHeight*pC->m_pDecodedPlane->u_height)\
+ /Params.m_inputSize.m_height;
+ Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+ Params.m_inputCoord.m_y = 0;
+ Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+ pImagePlanes[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+ -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+ pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+ pImagePlanes[1].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)\
+ *pImagePlanes[1].u_stride);
+ pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+ pImagePlanes[2].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+ -(Params.m_outputSize.m_height>>1)))>>1)\
+ *pImagePlanes[2].u_stride);
+ pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+ }
+ }
+ else
+ {
+ /*Black borders will be on the left and right side of the output video*/
+ /*Maximum output width if the input image aspect ratio is kept and if the
+ output height is the screen height*/
+ M4OSA_UInt32 tempOutputSizeWidth =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+ * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+ M4OSA_UInt32 tempInputSizeWidthMax = 0;
+ M4OSA_UInt32 tempFinalInputWidth = 0;
+ /*The output height is the screen height*/
+ Params.m_outputSize.m_height = pImagePlanes->u_height;
+ tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+ /*Maximum input width according to the maximum output width
+ (proportional to the maximum output width)*/
+ tempInputSizeWidthMax =
+ (pImagePlanes->u_width*Params.m_inputSize.m_width)\
+ /tempOutputSizeWidth;
+ tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+ /*Check if the maximum possible input width is contained into the input
+ image width*/
+ if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_width)
+ {
+ /*The maximum possible input width is contained in the input
+ image width,
+ that means no black borders, the input pan zoom area will be extended
+ so that the input AIR width will be the maximum possible*/
+ if(((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1) \
+ <= Params.m_inputCoord.m_x
+ && ((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1)\
+ <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+ + Params.m_inputSize.m_width))
+ {
+ /*The input pan zoom area can be extended symmetrically on the
+ right and left side*/
+ Params.m_inputCoord.m_x -= ((tempInputSizeWidthMax\
+ - Params.m_inputSize.m_width)>>1);
+ }
+ else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+ -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+ {
+ /*There is not enough place above the input pan zoom area to
+ extend it symmetrically,
+ so extend it to the maximum on the left*/
+ Params.m_inputCoord.m_x = 0;
+ }
+ else
+ {
+ /*There is not enough place below the input pan zoom area
+ to extend it symmetrically,
+ so extend it to the maximum on the right*/
+ Params.m_inputCoord.m_x = pC->m_pDecodedPlane->u_width \
+ - tempInputSizeWidthMax;
+ }
+ /*The input width of the AIR is the maximum possible width*/
+ Params.m_inputSize.m_width = tempInputSizeWidthMax;
+ }
+ else
+ {
+ /*The maximum possible input width is greater than the input
+ image width,
+ that means black borders are necessary to keep aspect ratio
+ The input width of the AIR is all the input image width*/
+ Params.m_outputSize.m_width =\
+ (tempOutputSizeWidth*pC->m_pDecodedPlane->u_width)\
+ /Params.m_inputSize.m_width;
+ Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+ Params.m_inputCoord.m_x = 0;
+ Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+ pImagePlanes[0].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+ -Params.m_outputSize.m_width)>>1));
+ pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+ pImagePlanes[1].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+ -(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+ pImagePlanes[2].u_topleft =
+ (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+ -(Params.m_outputSize.m_width>>1)))>>1);
+ pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+ }
+ }
+ break;
+ case M4COMMON_kOrientationLeftTop:
+ case M4COMMON_kOrientationLeftBottom:
+ case M4COMMON_kOrientationRightTop:
+ case M4COMMON_kOrientationRightBottom:
+ /*ROTATION*/
+ if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+ /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+ //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+ {
+ /*Black borders will be on the left and right side of the output video*/
+ /*Maximum output height if the input image aspect ratio is kept and if
+ the output height is the screen width*/
+ M4OSA_UInt32 tempOutputSizeHeight =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+ /pC->m_pDecodedPlane->u_height);
+ M4OSA_UInt32 tempInputSizeHeightMax = 0;
+ M4OSA_UInt32 tempFinalInputHeight = 0;
+ /*The output width is the screen height*/
+ Params.m_outputSize.m_height = pImagePlanes->u_width;
+ Params.m_outputSize.m_width= pImagePlanes->u_height;
+ tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+ /*Maximum input height according to the maximum output height
+ (proportional to the maximum output height)*/
+ tempInputSizeHeightMax =
+ (pImagePlanes->u_height*Params.m_inputSize.m_width)\
+ /tempOutputSizeHeight;
+ tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+ /*Check if the maximum possible input height is contained into the
+ input image width (rotation included)*/
+ if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_width)
+ {
+ /*The maximum possible input height is contained in the input
+ image width (rotation included),
+ that means no black borders, the input pan zoom area will be extended
+ so that the input AIR width will be the maximum possible*/
+ if(((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1) \
+ <= Params.m_inputCoord.m_x
+ && ((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1)\
+ <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+ + Params.m_inputSize.m_width))
+ {
+ /*The input pan zoom area can be extended symmetrically on the
+ right and left side*/
+ Params.m_inputCoord.m_x -= ((tempInputSizeHeightMax \
+ - Params.m_inputSize.m_width)>>1);
+ }
+ else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+ -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+ {
+ /*There is not enough place on the left of the input pan
+ zoom area to extend it symmetrically,
+ so extend it to the maximum on the left*/
+ Params.m_inputCoord.m_x = 0;
+ }
+ else
+ {
+ /*There is not enough place on the right of the input pan zoom
+ area to extend it symmetrically,
+ so extend it to the maximum on the right*/
+ Params.m_inputCoord.m_x =
+ pC->m_pDecodedPlane->u_width - tempInputSizeHeightMax;
+ }
+ /*The input width of the AIR is the maximum possible width*/
+ Params.m_inputSize.m_width = tempInputSizeHeightMax;
+ }
+ else
+ {
+ /*The maximum possible input height is greater than the input
+ image width (rotation included),
+ that means black borders are necessary to keep aspect ratio
+ The input width of the AIR is all the input image width*/
+ Params.m_outputSize.m_width =
+ (tempOutputSizeHeight*pC->m_pDecodedPlane->u_width)\
+ /Params.m_inputSize.m_width;
+ Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+ Params.m_inputCoord.m_x = 0;
+ Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+ pImagePlanes[0].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+ -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+ pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+ pImagePlanes[1].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+ -(Params.m_outputSize.m_width>>1)))>>1)\
+ *pImagePlanes[1].u_stride)+1;
+ pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+ pImagePlanes[2].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+ -(Params.m_outputSize.m_width>>1)))>>1)\
+ *pImagePlanes[2].u_stride)+1;
+ pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+ }
+ }
+ else
+ {
+ /*Black borders will be on the top and bottom of the output video*/
+ /*Maximum output width if the input image aspect ratio is kept and if
+ the output width is the screen height*/
+ M4OSA_UInt32 tempOutputSizeWidth =
+ (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_height)\
+ /pC->m_pDecodedPlane->u_width);
+ M4OSA_UInt32 tempInputSizeWidthMax = 0;
+ M4OSA_UInt32 tempFinalInputWidth = 0, tempFinalOutputWidth = 0;
+ /*The output height is the screen width*/
+ Params.m_outputSize.m_width = pImagePlanes->u_height;
+ Params.m_outputSize.m_height= pImagePlanes->u_width;
+ tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+ /*Maximum input width according to the maximum output width
+ (proportional to the maximum output width)*/
+ tempInputSizeWidthMax =
+ (pImagePlanes->u_width*Params.m_inputSize.m_height)/tempOutputSizeWidth;
+ tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+ /*Check if the maximum possible input width is contained into the input
+ image height (rotation included)*/
+ if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_height)
+ {
+ /*The maximum possible input width is contained in the input
+ image height (rotation included),
+ that means no black borders, the input pan zoom area will be extended
+ so that the input AIR height will be the maximum possible*/
+ if(((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1) \
+ <= Params.m_inputCoord.m_y
+ && ((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1)\
+ <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y \
+ + Params.m_inputSize.m_height))
+ {
+ /*The input pan zoom area can be extended symmetrically on
+ the right and left side*/
+ Params.m_inputCoord.m_y -= ((tempInputSizeWidthMax \
+ - Params.m_inputSize.m_height)>>1);
+ }
+ else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+ -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+ {
+ /*There is not enough place on the top of the input pan zoom
+ area to extend it symmetrically,
+ so extend it to the maximum on the top*/
+ Params.m_inputCoord.m_y = 0;
+ }
+ else
+ {
+ /*There is not enough place on the bottom of the input pan zoom
+ area to extend it symmetrically,
+ so extend it to the maximum on the bottom*/
+ Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height\
+ - tempInputSizeWidthMax;
+ }
+ /*The input height of the AIR is the maximum possible height*/
+ Params.m_inputSize.m_height = tempInputSizeWidthMax;
+ }
+ else
+ {
+ /*The maximum possible input width is greater than the input\
+ image height (rotation included),
+ that means black borders are necessary to keep aspect ratio
+ The input height of the AIR is all the input image height*/
+ Params.m_outputSize.m_height =
+ (tempOutputSizeWidth*pC->m_pDecodedPlane->u_height)\
+ /Params.m_inputSize.m_height;
+ Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+ Params.m_inputCoord.m_y = 0;
+ Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+ pImagePlanes[0].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+ -Params.m_outputSize.m_height))>>1))+1;
+ pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+ pImagePlanes[1].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+ -(Params.m_outputSize.m_height>>1)))>>1))+1;
+ pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+ pImagePlanes[2].u_topleft =
+ ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+ -(Params.m_outputSize.m_height>>1)))>>1))+1;
+ pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+ }
+ }
+ break;
+ }
+ }
+
+ /*Width and height have to be even*/
+ Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+ Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ pImagePlanes[0].u_width = (pImagePlanes[0].u_width>>1)<<1;
+ pImagePlanes[1].u_width = (pImagePlanes[1].u_width>>1)<<1;
+ pImagePlanes[2].u_width = (pImagePlanes[2].u_width>>1)<<1;
+ pImagePlanes[0].u_height = (pImagePlanes[0].u_height>>1)<<1;
+ pImagePlanes[1].u_height = (pImagePlanes[1].u_height>>1)<<1;
+ pImagePlanes[2].u_height = (pImagePlanes[2].u_height>>1)<<1;
+
+ /*Check that values are coherent*/
+ if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+ {
+ Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+ }
+ else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+ {
+ Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+ }
+ }
+
+ /**
+ Picture rendering: Resizing and Cropping*/
+ if(pC->m_mediaRendering != M4xVSS_kBlackBorders)
+ {
+ switch(pBasicTags.orientation)
+ {
+ default:
+ case M4COMMON_kOrientationUnknown:
+ Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+ case M4COMMON_kOrientationTopLeft:
+ case M4COMMON_kOrientationTopRight:
+ case M4COMMON_kOrientationBottomRight:
+ case M4COMMON_kOrientationBottomLeft:
+ Params.m_outputSize.m_height = pImagePlanes->u_height;
+ Params.m_outputSize.m_width = pImagePlanes->u_width;
+ break;
+ case M4COMMON_kOrientationLeftTop:
+ case M4COMMON_kOrientationLeftBottom:
+ case M4COMMON_kOrientationRightTop:
+ case M4COMMON_kOrientationRightBottom:
+ Params.m_outputSize.m_height = pImagePlanes->u_width;
+ Params.m_outputSize.m_width = pImagePlanes->u_height;
+ break;
+ }
+ }
+
+ /**
+ Picture rendering: Cropping*/
+ if(pC->m_mediaRendering == M4xVSS_kCropping)
+ {
+ if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
+ /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+ {
+ M4OSA_UInt32 tempHeight = Params.m_inputSize.m_height;
+ /*height will be cropped*/
+ Params.m_inputSize.m_height = (M4OSA_UInt32)((Params.m_outputSize.m_height \
+ * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
+ Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+ if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+ {
+ Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)\
+ ((pC->m_pDecodedPlane->u_height - Params.m_inputSize.m_height))>>1);
+ }
+ else
+ {
+ Params.m_inputCoord.m_y += (M4OSA_Int32)((M4OSA_Int32)\
+ ((tempHeight - Params.m_inputSize.m_height))>>1);
+ }
+ }
+ else
+ {
+ M4OSA_UInt32 tempWidth= Params.m_inputSize.m_width;
+ /*width will be cropped*/
+ Params.m_inputSize.m_width = (M4OSA_UInt32)((Params.m_outputSize.m_width \
+ * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
+ Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+ if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+ {
+ Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)\
+ ((pC->m_pDecodedPlane->u_width - Params.m_inputSize.m_width))>>1);
+ }
+ else
+ {
+ Params.m_inputCoord.m_x += (M4OSA_Int32)\
+ (((M4OSA_Int32)(tempWidth - Params.m_inputSize.m_width))>>1);
+ }
+ }
+ }
+
+
+
+ /**
+ * Call AIR functions */
+ if(M4OSA_NULL == pC->m_air_context)
+ {
+ err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+ if(err != M4NO_ERROR)
+ {
+ free(pC->m_pDecodedPlane[0].pac_data);
+ free(pC->m_pDecodedPlane);
+ pC->m_pDecodedPlane = M4OSA_NULL;
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+ Error when initializing AIR: 0x%x", err);
+ return err;
+ }
+ }
+
+ err = M4AIR_configure(pC->m_air_context, &Params);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+ Error when configuring AIR: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ free(pC->m_pDecodedPlane[0].pac_data);
+ free(pC->m_pDecodedPlane);
+ pC->m_pDecodedPlane = M4OSA_NULL;
+ return err;
+ }
+
+ err = M4AIR_get(pC->m_air_context, pC->m_pDecodedPlane, pImagePlanes);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when getting AIR plane: 0x%x", err);
+ M4AIR_cleanUp(pC->m_air_context);
+ free(pC->m_pDecodedPlane[0].pac_data);
+ free(pC->m_pDecodedPlane);
+ pC->m_pDecodedPlane = M4OSA_NULL;
+ return err;
+ }
+ pImagePlanes[0] = pImagePlanes1;
+ pImagePlanes[1] = pImagePlanes2;
+ pImagePlanes[2] = pImagePlanes3;
+ }
+
+
+ /**
+ * Increment the image counter */
+ pC->m_ImageCounter++;
+
+ /**
+ * Check end of sequence */
+ last_frame_flag = (pC->m_ImageCounter >= pC->m_NbImage);
+
+ /**
+ * Keep the picture duration */
+ *pPictureDuration = pC->m_timeDuration;
+
+ if (1 == last_frame_flag)
+ {
+ if(M4OSA_NULL != pC->m_air_context)
+ {
+ err = M4AIR_cleanUp(pC->m_air_context);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x", err);
+ return err;
+ }
+ }
+ if(M4OSA_NULL != pC->m_pDecodedPlane)
+ {
+ free(pC->m_pDecodedPlane[0].pac_data);
+ free(pC->m_pDecodedPlane);
+ pC->m_pDecodedPlane = M4OSA_NULL;
+ }
+ return M4PTO3GPP_WAR_LAST_PICTURE;
+ }
+
+ M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct: Leaving ");
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief This function initializes Pto3GPP with the given parameters
+ * @note The "Pictures to 3GPP" parameters are given by the internal xVSS
+ * context. This context contains a pointer on the current element
+ * of the chained list of Pto3GPP parameters.
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+{
+ /************************************************************************/
+ /* Definitions to generate dummy AMR file used to add AMR silence in files generated
+ by Pto3GPP */
+ #define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
+ /* This constant is defined in M4VSS3GPP_InternalConfig.h */
+ extern const M4OSA_UInt8\
+ M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+
+ /* AMR silent frame used to compute dummy AMR silence file */
+ #define M4VSS3GPP_AMR_HEADER_SIZE 6
+ const M4OSA_UInt8 M4VSS3GPP_AMR_HEADER[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+ { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
+ /************************************************************************/
+
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+ M4PTO3GPP_Context pM4PTO3GPP_Ctxt = M4OSA_NULL;
+ M4PTO3GPP_Params Params;
+ M4xVSS_PictureCallbackCtxt* pCallBackCtxt;
+ M4OSA_Bool cmpResult=M4OSA_FALSE;
+ M4OSA_Context pDummyAMRFile;
+ M4OSA_Char out_amr[M4XVSS_MAX_PATH_LEN];
+ /*UTF conversion support*/
+ M4OSA_Char* pDecodedPath = M4OSA_NULL;
+ M4OSA_UInt32 i;
+
+ /**
+ * Create a M4PTO3GPP instance */
+ err = M4PTO3GPP_Init( &pM4PTO3GPP_Ctxt, xVSS_context->pFileReadPtr,
+ xVSS_context->pFileWritePtr);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp returned %ld\n",err);
+ return err;
+ }
+
+ pCallBackCtxt = (M4xVSS_PictureCallbackCtxt*)M4OSA_32bitAlignedMalloc(sizeof(M4xVSS_PictureCallbackCtxt),
+ M4VS,(M4OSA_Char *) "Pto3gpp callback struct");
+ if(pCallBackCtxt == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalStartConvertPictureTo3gp");
+ return M4ERR_ALLOC;
+ }
+
+ Params.OutputVideoFrameSize = xVSS_context->pSettings->xVSS.outputVideoSize;
+ Params.OutputVideoFormat = xVSS_context->pSettings->xVSS.outputVideoFormat;
+ Params.videoProfile = xVSS_context->pSettings->xVSS.outputVideoProfile;
+ Params.videoLevel = xVSS_context->pSettings->xVSS.outputVideoLevel;
+
+ /**
+ * Generate "dummy" amr file containing silence in temporary folder */
+ M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
+ strncat((char *)out_amr, (const char *)"dummy.amr\0", 10);
+
+ /**
+ * UTF conversion: convert the temporary path into the customer format*/
+ pDecodedPath = out_amr;
+
+ if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 length = 0;
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+ (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp:\
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+ return err;
+ }
+ pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the conversion, now use the converted path*/
+
+ err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, pDecodedPath, M4OSA_kFileWrite);
+
+ /*Commented because of the use of the UTF conversion see above*/
+/* err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, out_amr, M4OSA_kFileWrite);
+ */
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't open output dummy amr file %s,\
+ error: 0x%x\n",out_amr, err);
+ return err;
+ }
+
+ err = xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+ (M4OSA_Int8*)M4VSS3GPP_AMR_HEADER, M4VSS3GPP_AMR_HEADER_SIZE);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't write output dummy amr file %s,\
+ error: 0x%x\n",out_amr, err);
+ return err;
+ }
+
+ err = xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+ (M4OSA_Int8*)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048, M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+ Can't write output dummy amr file %s, error: 0x%x\n",out_amr, err);
+ return err;
+ }
+
+ err = xVSS_context->pFileWritePtr->closeWrite(pDummyAMRFile);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+ Can't close output dummy amr file %s, error: 0x%x\n",out_amr, err);
+ return err;
+ }
+
+ /**
+ * Fill parameters for Pto3GPP with the parameters contained in the current element of the
+ * Pto3GPP parameters chained list and with default parameters */
+/*+ New Encoder bitrates */
+ if(xVSS_context->pSettings->xVSS.outputVideoBitrate == 0) {
+ Params.OutputVideoBitrate = M4VIDEOEDITING_kVARIABLE_KBPS;
+ }
+ else {
+ Params.OutputVideoBitrate = xVSS_context->pSettings->xVSS.outputVideoBitrate;
+ }
+ M4OSA_TRACE1_1("M4xVSS_internalStartConvertPicTo3GP: video bitrate = %d",
+ Params.OutputVideoBitrate);
+/*- New Encoder bitrates */
+ Params.OutputFileMaxSize = M4PTO3GPP_kUNLIMITED;
+ Params.pPictureCallbackFct = M4xVSS_PictureCallbackFct;
+ Params.pPictureCallbackCtxt = pCallBackCtxt;
+ /*FB: change to use the converted path (UTF conversion) see the conversion above*/
+ /*Fix :- Adding Audio Track in Image as input :AudioTarckFile Setting to NULL */
+ Params.pInputAudioTrackFile = M4OSA_NULL;//(M4OSA_Void*)pDecodedPath;//out_amr;
+ Params.AudioPaddingMode = M4PTO3GPP_kAudioPaddingMode_Loop;
+ Params.AudioFileFormat = M4VIDEOEDITING_kFileType_AMR;
+ Params.pOutput3gppFile = xVSS_context->pPTo3GPPcurrentParams->pFileOut;
+ Params.pTemporaryFile = xVSS_context->pPTo3GPPcurrentParams->pFileTemp;
+ /*+PR No: blrnxpsw#223*/
+ /*Increasing frequency of Frame, calculating Nos of Frame = duration /FPS */
+ /*Other changes made is @ M4xVSS_API.c @ line 3841 in M4xVSS_SendCommand*/
+ /*If case check for PanZoom removed */
+ Params.NbVideoFrames = (M4OSA_UInt32)
+ (xVSS_context->pPTo3GPPcurrentParams->duration \
+ / xVSS_context->pPTo3GPPcurrentParams->framerate); /* */
+ pCallBackCtxt->m_timeDuration = xVSS_context->pPTo3GPPcurrentParams->framerate;
+ /*-PR No: blrnxpsw#223*/
+ pCallBackCtxt->m_ImageCounter = 0;
+ pCallBackCtxt->m_FileIn = xVSS_context->pPTo3GPPcurrentParams->pFileIn;
+ pCallBackCtxt->m_NbImage = Params.NbVideoFrames;
+ pCallBackCtxt->m_pFileReadPtr = xVSS_context->pFileReadPtr;
+ pCallBackCtxt->m_pDecodedPlane = M4OSA_NULL;
+ pCallBackCtxt->m_pPto3GPPparams = xVSS_context->pPTo3GPPcurrentParams;
+ pCallBackCtxt->m_air_context = M4OSA_NULL;
+ pCallBackCtxt->m_mediaRendering = xVSS_context->pPTo3GPPcurrentParams->MediaRendering;
+
+ /**
+ * Set the input and output files */
+ err = M4PTO3GPP_Open(pM4PTO3GPP_Ctxt, &Params);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Open returned: 0x%x\n",err);
+ if(pCallBackCtxt != M4OSA_NULL)
+ {
+ free(pCallBackCtxt);
+ pCallBackCtxt = M4OSA_NULL;
+ }
+ M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
+ return err;
+ }
+
+ /**
+ * Save context to be able to call Pto3GPP step function in M4xVSS_step function */
+ xVSS_context->pM4PTO3GPP_Ctxt = pM4PTO3GPP_Ctxt;
+ xVSS_context->pCallBackCtxt = pCallBackCtxt;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief This function cleans up Pto3GPP
+ * @note
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+ M4OSA_Char out_amr[M4XVSS_MAX_PATH_LEN];
+ /*UTF conversion support*/
+ M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+ /**
+ * Free the PTO3GPP callback context */
+ if(M4OSA_NULL != xVSS_context->pCallBackCtxt)
+ {
+ free(xVSS_context->pCallBackCtxt);
+ xVSS_context->pCallBackCtxt = M4OSA_NULL;
+ }
+
+ /**
+ * Finalize the output file */
+ err = M4PTO3GPP_Close(xVSS_context->pM4PTO3GPP_Ctxt);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_Close returned 0x%x\n",err);
+ M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+ return err;
+ }
+
+ /**
+ * Free this M4PTO3GPP instance */
+ err = M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4PTO3GPP_CleanUp returned 0x%x\n",err);
+ return err;
+ }
+
+ /**
+ * Remove dummy.amr file */
+ M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, M4XVSS_MAX_PATH_LEN - 1);
+ strncat((char *)out_amr, (const char *)"dummy.amr\0", 10);
+
+ /**
+ * UTF conversion: convert the temporary path into the customer format*/
+ pDecodedPath = out_amr;
+
+ if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 length = 0;
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+ (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalStopConvertPictureTo3gp:\
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+ return err;
+ }
+ pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+ /**
+ * End of the conversion, now use the decoded path*/
+ remove((const char *)pDecodedPath);
+
+ /*Commented because of the use of the UTF conversion*/
+/* remove(out_amr);
+ */
+
+ xVSS_context->pM4PTO3GPP_Ctxt = M4OSA_NULL;
+ xVSS_context->pCallBackCtxt = M4OSA_NULL;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+ * @brief This function converts an RGB565 plane to YUV420 planar
+ * @note It is used only for framing effect
+ * It allocates output YUV planes
+ * @param framingCtx (IN) The framing struct containing input RGB565 plane
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+{
+ M4OSA_ERR err;
+
+ /**
+ * Allocate output YUV planes */
+ framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane),
+ M4VS, (M4OSA_Char *)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+ if(framingCtx->FramingYuv == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+ framingCtx->FramingYuv[0].u_topleft = 0;
+ framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+ framingCtx->FramingYuv[0].pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc((framingCtx->FramingYuv[0].u_width\
+ *framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char *)\
+ "Alloc for the Convertion output YUV");;
+ if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[1].u_topleft = 0;
+ framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data \
+ + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+ framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+ framingCtx->FramingYuv[2].u_topleft = 0;
+ framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+ framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data \
+ + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+ /**
+ * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing
+ effect */
+ err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV:\
+ error when converting from RGB to YUV: 0x%x\n", err);
+ }
+
+ framingCtx->duration = 0;
+ framingCtx->previousClipTime = -1;
+ framingCtx->previewOffsetClipTime = -1;
+
+ /**
+ * Only one element in the chained list (no animated image with RGB buffer...) */
+ framingCtx->pCurrent = framingCtx;
+ framingCtx->pNext = framingCtx;
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_internalSetPlaneTransparent(M4OSA_UInt8* planeIn, M4OSA_UInt32 size)
+{
+ M4OSA_UInt32 i;
+ M4OSA_UInt8* plane = planeIn;
+ M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+ M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+ for(i=0; i<(size>>1); i++)
+ {
+ *plane++ = transparent1;
+ *plane++ = transparent2;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertARBG888toYUV420_FrammingEffect(M4OSA_Context pContext,
+ * M4VSS3GPP_EffectSettings* pEffect,
+ * M4xVSS_FramingStruct* framingCtx,
+ M4VIDEOEDITING_VideoFrameSize OutputVideoResolution)
+ *
+ * @brief This function converts ARGB8888 input file to YUV420 whenused for framming effect
+ * @note The input ARGB8888 file path is contained in the pEffect structure
+ * If the ARGB8888 must be resized to fit output video size, this function
+ * will do it.
+ * @param pContext (IN) The integrator own context
+ * @param pEffect (IN) The effect structure containing all informations on
+ * the file to decode, resizing ...
+ * @param framingCtx (IN/OUT) Structure in which the output RGB will be stored
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ * @return M4ERR_FILE_NOT_FOUND: File not found.
+ ******************************************************************************
+ */
+
+
+M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
+ M4VSS3GPP_EffectSettings* pEffect,
+ M4xVSS_FramingStruct* framingCtx,
+ M4VIDEOEDITING_VideoFrameSize\
+ OutputVideoResolution)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_Context pARGBIn;
+ M4OSA_UInt32 file_size;
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_UInt32 width, height, width_out, height_out;
+ M4OSA_Void* pFile = pEffect->xVSS.pFramingFilePath;
+ M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+ M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+ /*UTF conversion support*/
+ M4OSA_Char* pDecodedPath = M4OSA_NULL;
+ M4OSA_UInt32 i = 0,j = 0;
+ M4VIFI_ImagePlane rgbPlane;
+ M4OSA_UInt32 frameSize_argb=(framingCtx->width * framingCtx->height * 4);
+ M4OSA_UInt32 frameSize;
+ M4OSA_UInt32 tempAlphaPercent = 0;
+ M4VIFI_UInt8* TempPacData = M4OSA_NULL;
+ M4OSA_UInt16 *ptr = M4OSA_NULL;
+ M4OSA_UInt32 z = 0;
+
+ M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Entering ");
+
+ M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect width and height %d %d ",
+ framingCtx->width,framingCtx->height);
+
+ M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_32bitAlignedMalloc(frameSize_argb, M4VS, (M4OSA_Char*)\
+ "Image argb data");
+ if(pTmpData == M4OSA_NULL) {
+ M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+ return M4ERR_ALLOC;
+ }
+ /**
+ * UTF conversion: convert the file path into the customer format*/
+ pDecodedPath = pFile;
+
+ if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 length = 0;
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) pFile,
+ (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalDecodePNG:\
+ M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ return err;
+ }
+ pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /**
+ * End of the conversion, now use the decoded path*/
+
+ /* Open input ARGB8888 file and store it into memory */
+ err = xVSS_context->pFileReadPtr->openRead(&pARGBIn, pDecodedPath, M4OSA_kFileRead);
+
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("Can't open input ARGB8888 file %s, error: 0x%x\n",pFile, err);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ return err;
+ }
+
+ err = xVSS_context->pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+ if(err != M4NO_ERROR)
+ {
+ xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ return err;
+ }
+
+
+ err = xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_2("Can't close input png file %s, error: 0x%x\n",pFile, err);
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+ return err;
+ }
+
+
+ rgbPlane.u_height = framingCtx->height;
+ rgbPlane.u_width = framingCtx->width;
+ rgbPlane.u_stride = rgbPlane.u_width*3;
+ rgbPlane.u_topleft = 0;
+
+ frameSize = (rgbPlane.u_width * rgbPlane.u_height * 3); //Size of RGB888 data
+ rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(((frameSize)+ (2 * framingCtx->width)),
+ M4VS, (M4OSA_Char*)"Image clip RGB888 data");
+ if(rgbPlane.pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+ free(pTmpData);
+ return M4ERR_ALLOC;
+ }
+
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+ Remove the alpha channel ");
+
+ /* premultiplied alpha % on RGB */
+ for (i=0, j = 0; i < frameSize_argb; i += 4) {
+ /* this is alpha value */
+ if ((i % 4) == 0)
+ {
+ tempAlphaPercent = pTmpData[i];
+ }
+
+ /* R */
+ rgbPlane.pac_data[j] = pTmpData[i+1];
+ j++;
+
+ /* G */
+ if (tempAlphaPercent > 0) {
+ rgbPlane.pac_data[j] = pTmpData[i+2];
+ j++;
+ } else {/* In case of alpha value 0, make GREEN to 255 */
+ rgbPlane.pac_data[j] = 255; //pTmpData[i+2];
+ j++;
+ }
+
+ /* B */
+ rgbPlane.pac_data[j] = pTmpData[i+3];
+ j++;
+ }
+
+ free(pTmpData);
+ pTmpData = M4OSA_NULL;
+
+ /* convert RGB888 to RGB565 */
+
+ /* allocate temp RGB 565 buffer */
+ TempPacData = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(frameSize +
+ (4 * (framingCtx->width + framingCtx->height + 1)),
+ M4VS, (M4OSA_Char*)"Image clip RGB565 data");
+ if (TempPacData == M4OSA_NULL) {
+ M4OSA_TRACE1_0("Failed to allocate memory for Image clip RGB565 data");
+ free(rgbPlane.pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ ptr = (M4OSA_UInt16 *)TempPacData;
+ z = 0;
+
+ for (i = 0; i < j ; i += 3)
+ {
+ ptr[z++] = PACK_RGB565(0, rgbPlane.pac_data[i],
+ rgbPlane.pac_data[i+1],
+ rgbPlane.pac_data[i+2]);
+ }
+
+ /* free the RBG888 and assign RGB565 */
+ free(rgbPlane.pac_data);
+ rgbPlane.pac_data = TempPacData;
+
+ /**
+ * Check if output sizes are odd */
+ if(rgbPlane.u_height % 2 != 0)
+ {
+ M4VIFI_UInt8* output_pac_data = rgbPlane.pac_data;
+ M4OSA_UInt32 i;
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+ output height is odd ");
+ output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*2;
+
+ for(i=0;i<rgbPlane.u_width;i++)
+ {
+ *output_pac_data++ = transparent1;
+ *output_pac_data++ = transparent2;
+ }
+
+ /**
+ * We just add a white line to the PNG that will be transparent */
+ rgbPlane.u_height++;
+ }
+ if(rgbPlane.u_width % 2 != 0)
+ {
+ /**
+ * We add a new column of white (=transparent), but we need to parse all RGB lines ... */
+ M4OSA_UInt32 i;
+ M4VIFI_UInt8* newRGBpac_data;
+ M4VIFI_UInt8* output_pac_data, *input_pac_data;
+
+ rgbPlane.u_width++;
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+ output width is odd ");
+ /**
+ * We need to allocate a new RGB output buffer in which all decoded data
+ + white line will be copied */
+ newRGBpac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(rgbPlane.u_height*rgbPlane.u_width*2\
+ *sizeof(M4VIFI_UInt8), M4VS, (M4OSA_Char *)"New Framing GIF Output pac_data RGB");
+
+ if(newRGBpac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in \
+ M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+ free(rgbPlane.pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ output_pac_data= newRGBpac_data;
+ input_pac_data = rgbPlane.pac_data;
+
+ for(i=0;i<rgbPlane.u_height;i++)
+ {
+ memcpy((void *)output_pac_data, (void *)input_pac_data,
+ (rgbPlane.u_width-1)*2);
+
+ output_pac_data += ((rgbPlane.u_width-1)*2);
+ /* Put the pixel to transparency color */
+ *output_pac_data++ = transparent1;
+ *output_pac_data++ = transparent2;
+
+ input_pac_data += ((rgbPlane.u_width-1)*2);
+ }
+ free(rgbPlane.pac_data);
+ rgbPlane.pac_data = newRGBpac_data;
+ }
+
+ /* reset stride */
+ rgbPlane.u_stride = rgbPlane.u_width*2;
+
+ /**
+ * Initialize chained list parameters */
+ framingCtx->duration = 0;
+ framingCtx->previousClipTime = -1;
+ framingCtx->previewOffsetClipTime = -1;
+
+ /**
+ * Only one element in the chained list (no animated image ...) */
+ framingCtx->pCurrent = framingCtx;
+ framingCtx->pNext = framingCtx;
+
+ /**
+ * Get output width/height */
+ switch(OutputVideoResolution)
+ //switch(xVSS_context->pSettings->xVSS.outputVideoSize)
+ {
+ case M4VIDEOEDITING_kSQCIF:
+ width_out = 128;
+ height_out = 96;
+ break;
+ case M4VIDEOEDITING_kQQVGA:
+ width_out = 160;
+ height_out = 120;
+ break;
+ case M4VIDEOEDITING_kQCIF:
+ width_out = 176;
+ height_out = 144;
+ break;
+ case M4VIDEOEDITING_kQVGA:
+ width_out = 320;
+ height_out = 240;
+ break;
+ case M4VIDEOEDITING_kCIF:
+ width_out = 352;
+ height_out = 288;
+ break;
+ case M4VIDEOEDITING_kVGA:
+ width_out = 640;
+ height_out = 480;
+ break;
+ case M4VIDEOEDITING_kWVGA:
+ width_out = 800;
+ height_out = 480;
+ break;
+ case M4VIDEOEDITING_kNTSC:
+ width_out = 720;
+ height_out = 480;
+ break;
+ case M4VIDEOEDITING_k640_360:
+ width_out = 640;
+ height_out = 360;
+ break;
+ case M4VIDEOEDITING_k854_480:
+ // StageFright encoders require %16 resolution
+ width_out = M4ENCODER_854_480_Width;
+ height_out = 480;
+ break;
+ case M4VIDEOEDITING_k1280_720:
+ width_out = 1280;
+ height_out = 720;
+ break;
+ case M4VIDEOEDITING_k1080_720:
+ // StageFright encoders require %16 resolution
+ width_out = M4ENCODER_1080_720_Width;
+ height_out = 720;
+ break;
+ case M4VIDEOEDITING_k960_720:
+ width_out = 960;
+ height_out = 720;
+ break;
+ case M4VIDEOEDITING_k1920_1080:
+ width_out = 1920;
+ height_out = M4ENCODER_1920_1080_Height;
+ break;
+ /**
+ * If output video size is not given, we take QCIF size,
+ * should not happen, because already done in M4xVSS_sendCommand */
+ default:
+ width_out = 176;
+ height_out = 144;
+ break;
+ }
+
+ /**
+ * Allocate output planes structures */
+ framingCtx->FramingRgb = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(sizeof(M4VIFI_ImagePlane), M4VS,
+ (M4OSA_Char *)"Framing Output plane RGB");
+ if(framingCtx->FramingRgb == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+ return M4ERR_ALLOC;
+ }
+ /**
+ * Resize RGB if needed */
+ if((pEffect->xVSS.bResize) &&
+ (rgbPlane.u_width != width_out || rgbPlane.u_height != height_out))
+ {
+ width = width_out;
+ height = height_out;
+
+ M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+ New Width and height %d %d ",width,height);
+
+ framingCtx->FramingRgb->u_height = height_out;
+ framingCtx->FramingRgb->u_width = width_out;
+ framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
+ framingCtx->FramingRgb->u_topleft = 0;
+
+ framingCtx->FramingRgb->pac_data =
+ (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(framingCtx->FramingRgb->u_height*framingCtx->\
+ FramingRgb->u_width*2*sizeof(M4VIFI_UInt8), M4VS,
+ (M4OSA_Char *)"Framing Output pac_data RGB");
+
+ if(framingCtx->FramingRgb->pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in \
+ M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+ free(framingCtx->FramingRgb);
+ free(rgbPlane.pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Resizing Needed ");
+ M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+ rgbPlane.u_height & rgbPlane.u_width %d %d",rgbPlane.u_height,rgbPlane.u_width);
+
+ //err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+ err = M4VIFI_ResizeBilinearRGB565toRGB565(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect :\
+ when resizing RGB plane: 0x%x\n", err);
+ return err;
+ }
+
+ if(rgbPlane.pac_data != M4OSA_NULL)
+ {
+ free(rgbPlane.pac_data);
+ rgbPlane.pac_data = M4OSA_NULL;
+ }
+ }
+ else
+ {
+
+ M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+ Resizing Not Needed ");
+
+ width = rgbPlane.u_width;
+ height = rgbPlane.u_height;
+ framingCtx->FramingRgb->u_height = height;
+ framingCtx->FramingRgb->u_width = width;
+ framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*2;
+ framingCtx->FramingRgb->u_topleft = 0;
+ framingCtx->FramingRgb->pac_data = rgbPlane.pac_data;
+ }
+
+
+ if(pEffect->xVSS.bResize)
+ {
+ /**
+ * Force topleft to 0 for pure framing effect */
+ framingCtx->topleft_x = 0;
+ framingCtx->topleft_y = 0;
+ }
+
+
+ /**
+ * Convert RGB output to YUV 420 to be able to merge it with output video in framing
+ effect */
+ framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_32bitAlignedMalloc(3*sizeof(M4VIFI_ImagePlane), M4VS,
+ (M4OSA_Char *)"Framing Output plane YUV");
+ if(framingCtx->FramingYuv == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+ free(framingCtx->FramingRgb->pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ // Alloc for Y, U and V planes
+ framingCtx->FramingYuv[0].u_width = ((width+1)>>1)<<1;
+ framingCtx->FramingYuv[0].u_height = ((height+1)>>1)<<1;
+ framingCtx->FramingYuv[0].u_topleft = 0;
+ framingCtx->FramingYuv[0].u_stride = ((width+1)>>1)<<1;
+ framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc
+ ((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height), M4VS,
+ (M4OSA_Char *)"Alloc for the output Y");
+ if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+ free(framingCtx->FramingYuv);
+ free(framingCtx->FramingRgb->pac_data);
+ return M4ERR_ALLOC;
+ }
+ framingCtx->FramingYuv[1].u_width = (((width+1)>>1)<<1)>>1;
+ framingCtx->FramingYuv[1].u_height = (((height+1)>>1)<<1)>>1;
+ framingCtx->FramingYuv[1].u_topleft = 0;
+ framingCtx->FramingYuv[1].u_stride = (((width+1)>>1)<<1)>>1;
+
+
+ framingCtx->FramingYuv[1].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
+ framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height, M4VS,
+ (M4OSA_Char *)"Alloc for the output U");
+ if (framingCtx->FramingYuv[1].pac_data == M4OSA_NULL) {
+ free(framingCtx->FramingYuv[0].pac_data);
+ free(framingCtx->FramingYuv);
+ free(framingCtx->FramingRgb->pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ framingCtx->FramingYuv[2].u_width = (((width+1)>>1)<<1)>>1;
+ framingCtx->FramingYuv[2].u_height = (((height+1)>>1)<<1)>>1;
+ framingCtx->FramingYuv[2].u_topleft = 0;
+ framingCtx->FramingYuv[2].u_stride = (((width+1)>>1)<<1)>>1;
+
+
+ framingCtx->FramingYuv[2].pac_data = (M4VIFI_UInt8*)M4OSA_32bitAlignedMalloc(
+ framingCtx->FramingYuv[2].u_width * framingCtx->FramingYuv[0].u_height, M4VS,
+ (M4OSA_Char *)"Alloc for the output V");
+ if (framingCtx->FramingYuv[2].pac_data == M4OSA_NULL) {
+ free(framingCtx->FramingYuv[1].pac_data);
+ free(framingCtx->FramingYuv[0].pac_data);
+ free(framingCtx->FramingYuv);
+ free(framingCtx->FramingRgb->pac_data);
+ return M4ERR_ALLOC;
+ }
+
+ M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+ convert RGB to YUV ");
+
+ //err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+ err = M4VIFI_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("SPS png: error when converting from RGB to YUV: 0x%x\n", err);
+ }
+ M4OSA_TRACE3_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Leaving ");
+ return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+ *
+ * @brief This function prepares VSS for editing
+ * @note It also set special xVSS effect as external effects for the VSS
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4VSS3GPP_EditContext pVssCtxt;
+ M4OSA_UInt32 i,j;
+ M4OSA_ERR err;
+
+ /**
+ * Create a VSS 3GPP edition instance */
+ err = M4VSS3GPP_editInit( &pVssCtxt, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: M4VSS3GPP_editInit returned 0x%x\n",
+ err);
+ M4VSS3GPP_editCleanUp(pVssCtxt);
+ /**
+ * Set the VSS context to NULL */
+ xVSS_context->pCurrentEditContext = M4OSA_NULL;
+ return err;
+ }
+
+ M4VSS3GPP_InternalEditContext* pVSSContext =
+ (M4VSS3GPP_InternalEditContext*)pVssCtxt;
+ pVSSContext->xVSS.outputVideoFormat =
+ xVSS_context->pSettings->xVSS.outputVideoFormat;
+ pVSSContext->xVSS.outputVideoSize =
+ xVSS_context->pSettings->xVSS.outputVideoSize ;
+ pVSSContext->xVSS.outputAudioFormat =
+ xVSS_context->pSettings->xVSS.outputAudioFormat;
+ pVSSContext->xVSS.outputAudioSamplFreq =
+ xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
+ pVSSContext->xVSS.outputVideoBitrate =
+ xVSS_context->pSettings->xVSS.outputVideoBitrate ;
+ pVSSContext->xVSS.outputAudioBitrate =
+ xVSS_context->pSettings->xVSS.outputAudioBitrate ;
+ pVSSContext->xVSS.bAudioMono =
+ xVSS_context->pSettings->xVSS.bAudioMono;
+ pVSSContext->xVSS.outputVideoProfile =
+ xVSS_context->pSettings->xVSS.outputVideoProfile;
+ pVSSContext->xVSS.outputVideoLevel =
+ xVSS_context->pSettings->xVSS.outputVideoLevel;
+ /* In case of MMS use case, we fill directly into the VSS context the targeted bitrate */
+ if(xVSS_context->targetedBitrate != 0)
+ {
+ M4VSS3GPP_InternalEditContext* pVSSContext = (M4VSS3GPP_InternalEditContext*)pVssCtxt;
+
+ pVSSContext->bIsMMS = M4OSA_TRUE;
+ pVSSContext->uiMMSVideoBitrate = xVSS_context->targetedBitrate;
+ pVSSContext->MMSvideoFramerate = xVSS_context->pSettings->videoFrameRate;
+ }
+
+ /*Warning: since the adding of the UTF conversion, pSettings has been changed in the next
+ part in pCurrentEditSettings (there is a specific current editing structure for the saving,
+ as for the preview)*/
+
+ /**
+ * Set the external video effect functions, for saving mode (to be moved to
+ M4xVSS_saveStart() ?)*/
+ for (i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+ {
+ for (j=0; j<xVSS_context->pCurrentEditSettings->nbEffects; j++)
+ {
+ if (M4xVSS_kVideoEffectType_BlackAndWhite ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_BlackAndWhite;
+ /*commented FB*/
+ /**
+ * We do not need to set the color context, it is already set
+ during sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Pink ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_Pink; /**< we don't
+ // use any function context */
+ /*commented FB*/
+ /**
+ * We do not need to set the color context,
+ it is already set during sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Green ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_Green;
+ /**< we don't use any function context */
+ /*commented FB*/
+ /**
+ * We do not need to set the color context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Sepia ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_Sepia;
+ /**< we don't use any function context */
+ /*commented FB*/
+ /**
+ * We do not need to set the color context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Fifties ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectFifties;
+ /**
+ * We do not need to set the framing context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Negative ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_Negative;
+ /**< we don't use any function context */
+ /*commented FB*/
+ /**
+ * We do not need to set the color context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Framing ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectFraming;
+ /**
+ * We do not need to set the framing context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_ZoomIn ==
+ xVSS_context->pSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectZoom;
+ xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+ (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomIn; /**< we don't use any
+ function context */
+ }
+ if (M4xVSS_kVideoEffectType_ZoomOut ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectZoom;
+ xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+ (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomOut; /**< we don't use any
+ function context */
+ }
+ if (M4xVSS_kVideoEffectType_ColorRGB16 ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+ /**< we don't use any function context */
+ /**
+ * We do not need to set the color context, it is already set during
+ sendCommand function */
+ }
+ if (M4xVSS_kVideoEffectType_Gradient ==
+ xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+ {
+ xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+ M4VSS3GPP_externalVideoEffectColor;
+ //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+ // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+ /**< we don't use any function context */
+ /**
+ * We do not need to set the color context, it is already set during
+ sendCommand function */
+ }
+
+ }
+ }
+
+ /**
+ * Open the VSS 3GPP */
+ err = M4VSS3GPP_editOpen(pVssCtxt, xVSS_context->pCurrentEditSettings);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+ M4VSS3GPP_editOpen returned 0x%x\n",err);
+ M4VSS3GPP_editCleanUp(pVssCtxt);
+ /**
+ * Set the VSS context to NULL */
+ xVSS_context->pCurrentEditContext = M4OSA_NULL;
+ return err;
+ }
+
+ /**
+ * Save VSS context to be able to close / free VSS later */
+ xVSS_context->pCurrentEditContext = pVssCtxt;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+ *
+ * @brief This function cleans up VSS
+ * @note
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+ M4OSA_ERR err;
+
+ if(xVSS_context->pCurrentEditContext != M4OSA_NULL)
+ {
+ /**
+ * Close the VSS 3GPP */
+ err = M4VSS3GPP_editClose(pVssCtxt);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile:\
+ M4VSS3GPP_editClose returned 0x%x\n",err);
+ M4VSS3GPP_editCleanUp(pVssCtxt);
+ /**
+ * Set the VSS context to NULL */
+ xVSS_context->pCurrentEditContext = M4OSA_NULL;
+ return err;
+ }
+
+ /**
+ * Free this VSS3GPP edition instance */
+ err = M4VSS3GPP_editCleanUp(pVssCtxt);
+ /**
+ * Set the VSS context to NULL */
+ xVSS_context->pCurrentEditContext = M4OSA_NULL;
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile: \
+ M4VSS3GPP_editCleanUp returned 0x%x\n",err);
+ return err;
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+ *
+ * @brief This function prepares VSS for audio mixing
+ * @note It takes its parameters from the BGM settings in the xVSS internal context
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+/***
+ * FB: the function has been modified since the structure used for the saving is now the
+ * pCurrentEditSettings and not the pSettings
+ * This change has been added for the UTF support
+ * All the "xVSS_context->pSettings" has been replaced by "xVSS_context->pCurrentEditSettings"
+ ***/
+M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4VSS3GPP_AudioMixingSettings* pAudioMixSettings;
+ M4VSS3GPP_AudioMixingContext pAudioMixingCtxt;
+ M4OSA_ERR err;
+ M4VIDEOEDITING_ClipProperties fileProperties;
+
+ /**
+ * Allocate audio mixing settings structure and fill it with BGM parameters */
+ pAudioMixSettings = (M4VSS3GPP_AudioMixingSettings*)M4OSA_32bitAlignedMalloc
+ (sizeof(M4VSS3GPP_AudioMixingSettings), M4VS, (M4OSA_Char *)"pAudioMixSettings");
+ if(pAudioMixSettings == M4OSA_NULL)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalGenerateAudioMixFile");
+ return M4ERR_ALLOC;
+ }
+
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType ==
+ M4VIDEOEDITING_kFileType_3GPP)
+ {
+ err = M4xVSS_internalGetProperties((M4OSA_Context)xVSS_context,
+ (M4OSA_Char*)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile,
+ &fileProperties);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+ impossible to retrieve audio BGM properties ->\
+ reencoding audio background music", err);
+ fileProperties.AudioStreamType =
+ xVSS_context->pCurrentEditSettings->xVSS.outputAudioFormat+1;
+ /* To force BGM encoding */
+ }
+ }
+
+ pAudioMixSettings->bRemoveOriginal = M4OSA_FALSE;
+ pAudioMixSettings->AddedAudioFileType =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType;
+ pAudioMixSettings->pAddedAudioTrackFile =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile;
+ pAudioMixSettings->uiAddVolume =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume;
+
+ pAudioMixSettings->outputAudioFormat = xVSS_context->pSettings->xVSS.outputAudioFormat;
+ pAudioMixSettings->outputASF = xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
+ pAudioMixSettings->outputAudioBitrate = xVSS_context->pSettings->xVSS.outputAudioBitrate;
+ pAudioMixSettings->uiSamplingFrequency =
+ xVSS_context->pSettings->xVSS.pBGMtrack->uiSamplingFrequency;
+ pAudioMixSettings->uiNumChannels = xVSS_context->pSettings->xVSS.pBGMtrack->uiNumChannels;
+
+ pAudioMixSettings->b_DuckingNeedeed =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->b_DuckingNeedeed;
+ pAudioMixSettings->fBTVolLevel =
+ (M4OSA_Float )xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume/100;
+ pAudioMixSettings->InDucking_threshold =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->InDucking_threshold;
+ pAudioMixSettings->InDucking_lowVolume =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->lowVolume/100;
+ pAudioMixSettings->fPTVolLevel =
+ (M4OSA_Float)xVSS_context->pSettings->PTVolLevel/100;
+ pAudioMixSettings->bLoop = xVSS_context->pSettings->xVSS.pBGMtrack->bLoop;
+
+ if(xVSS_context->pSettings->xVSS.bAudioMono)
+ {
+ pAudioMixSettings->outputNBChannels = 1;
+ }
+ else
+ {
+ pAudioMixSettings->outputNBChannels = 2;
+ }
+
+ /**
+ * Fill audio mix settings with BGM parameters */
+ pAudioMixSettings->uiBeginLoop =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiBeginLoop;
+ pAudioMixSettings->uiEndLoop =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiEndLoop;
+ pAudioMixSettings->uiAddCts =
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddCts;
+
+ /**
+ * Output file of the audio mixer will be final file (audio mixing is the last step) */
+ pAudioMixSettings->pOutputClipFile = xVSS_context->pOutputFile;
+ pAudioMixSettings->pTemporaryFile = xVSS_context->pTemporaryFile;
+
+ /**
+ * Input file of the audio mixer is a temporary file containing all audio/video editions */
+ pAudioMixSettings->pOriginalClipFile = xVSS_context->pCurrentEditSettings->pOutputFile;
+
+ /**
+ * Save audio mixing settings pointer to be able to free it in
+ M4xVSS_internalCloseAudioMixedFile function */
+ xVSS_context->pAudioMixSettings = pAudioMixSettings;
+
+ /**
+ * Create a VSS 3GPP audio mixing instance */
+ err = M4VSS3GPP_audioMixingInit(&pAudioMixingCtxt, pAudioMixSettings,
+ xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+
+ /**
+ * Save audio mixing context to be able to call audio mixing step function in
+ M4xVSS_step function */
+ xVSS_context->pAudioMixContext = pAudioMixingCtxt;
+
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+ M4VSS3GPP_audioMixingInit returned 0x%x\n",err);
+ //M4VSS3GPP_audioMixingCleanUp(pAudioMixingCtxt);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+ *
+ * @brief This function cleans up VSS for audio mixing
+ * @note
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+
+ /**
+ * Free this VSS3GPP audio mixing instance */
+ if(xVSS_context->pAudioMixContext != M4OSA_NULL)
+ {
+ err = M4VSS3GPP_audioMixingCleanUp(xVSS_context->pAudioMixContext);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalCloseAudioMixedFile:\
+ M4VSS3GPP_audioMixingCleanUp returned 0x%x\n",err);
+ return err;
+ }
+ }
+
+ /**
+ * Free VSS audio mixing settings */
+ if(xVSS_context->pAudioMixSettings != M4OSA_NULL)
+ {
+ free(xVSS_context->pAudioMixSettings);
+ xVSS_context->pAudioMixSettings = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+ *
+ * @brief This function cleans up preview edition structure used to generate
+ * preview.3gp file given to the VPS
+ * @note It also free the preview structure given to the VPS
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_UInt8 i;
+
+ /**
+ * Free clip/transition settings */
+ for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+ {
+ M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+ free((xVSS_context->pCurrentEditSettings->pClipList[i]));
+ xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+ /**
+ * Because there is 1 less transition than clip number */
+ if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+ {
+ free((xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+ xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+ }
+ }
+
+ /**
+ * Free clip/transition list */
+ if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+ {
+ free((xVSS_context->pCurrentEditSettings->pClipList));
+ xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+ }
+ if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+ {
+ free((xVSS_context->pCurrentEditSettings->pTransitionList));
+ xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+ }
+
+ /**
+ * Free output preview file path */
+ if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pCurrentEditSettings->pOutputFile);
+ xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+ }
+
+ /**
+ * Free temporary preview file path */
+ if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+ {
+ remove((const char *)xVSS_context->pCurrentEditSettings->pTemporaryFile);
+ free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+ xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+ }
+
+ /**
+ * Free "local" BGM settings */
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+ {
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+ }
+ free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+ }
+
+ /**
+ * Free current edit settings structure */
+ if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+ {
+ free(xVSS_context->pCurrentEditSettings);
+ xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+ }
+
+ /**
+ * Free preview effects given to application */
+ if(M4OSA_NULL != xVSS_context->pPreviewSettings->Effects)
+ {
+ free(xVSS_context->pPreviewSettings->Effects);
+ xVSS_context->pPreviewSettings->Effects = M4OSA_NULL;
+ xVSS_context->pPreviewSettings->nbEffects = 0;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+ *
+ * @brief This function cleans up saving edition structure used to generate
+ * output.3gp file given to the VPS
+ * @note
+ * @param pContext (IN) The integrator own context
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_UInt8 i;
+
+ if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+ {
+ /**
+ * Free clip/transition settings */
+ for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+ {
+ M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+ free((xVSS_context->pCurrentEditSettings->pClipList[i]));
+ xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+ /**
+ * Because there is 1 less transition than clip number */
+ if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+ {
+ free(\
+ (xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+ xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+ }
+ }
+
+ /**
+ * Free clip/transition list */
+ if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+ {
+ free((xVSS_context->pCurrentEditSettings->pClipList));
+ xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+ }
+ if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+ {
+ free((xVSS_context->pCurrentEditSettings->pTransitionList));
+ xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+ }
+
+ if(xVSS_context->pCurrentEditSettings->Effects != M4OSA_NULL)
+ {
+ free((xVSS_context->pCurrentEditSettings->Effects));
+ xVSS_context->pCurrentEditSettings->Effects = M4OSA_NULL;
+ xVSS_context->pCurrentEditSettings->nbEffects = 0;
+ }
+
+ /**
+ * Free output saving file path */
+ if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+ {
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+ {
+ remove((const char *)xVSS_context->pCurrentEditSettings->pOutputFile);
+ free(xVSS_context->pCurrentEditSettings->pOutputFile);
+ }
+ if(xVSS_context->pOutputFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pOutputFile);
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+ }
+
+ /**
+ * Free temporary saving file path */
+ if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+ {
+ remove((const char *)xVSS_context->pCurrentEditSettings->pTemporaryFile);
+ free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+ xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+ }
+
+ /**
+ * Free "local" BGM settings */
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+ {
+ if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+ }
+ free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+ xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+ }
+
+ /**
+ * Free current edit settings structure */
+ free(xVSS_context->pCurrentEditSettings);
+ xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_freeSettings(M4OSA_Context pContext)
+ *
+ * @brief This function cleans up an M4VSS3GPP_EditSettings structure
+ * @note
+ * @param pSettings (IN) Pointer on M4VSS3GPP_EditSettings structure to free
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings)
+{
+ M4OSA_UInt8 i,j;
+
+ /**
+ * For each clip ... */
+ for(i=0; i<pSettings->uiClipNumber; i++)
+ {
+ /**
+ * ... free clip settings */
+ if(pSettings->pClipList[i] != M4OSA_NULL)
+ {
+ M4xVSS_FreeClipSettings(pSettings->pClipList[i]);
+
+ free((pSettings->pClipList[i]));
+ pSettings->pClipList[i] = M4OSA_NULL;
+ }
+
+ /**
+ * ... free transition settings */
+ if(i < pSettings->uiClipNumber-1) /* Because there is 1 less transition than clip number */
+ {
+ if(pSettings->pTransitionList[i] != M4OSA_NULL)
+ {
+ switch (pSettings->pTransitionList[i]->VideoTransitionType)
+ {
+ case M4xVSS_kVideoTransitionType_AlphaMagic:
+
+ /**
+ * In case of Alpha Magic transition,
+ some extra parameters need to be freed */
+ if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt\
+ != M4OSA_NULL)
+ {
+ free((((M4xVSS_internal_AlphaMagicSettings*)\
+ pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+ pPlane->pac_data));
+ ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i\
+ ]->pExtVideoTransitionFctCtxt)->pPlane->pac_data = M4OSA_NULL;
+
+ free((((M4xVSS_internal_AlphaMagicSettings*)\
+ pSettings->pTransitionList[i]->\
+ pExtVideoTransitionFctCtxt)->pPlane));
+ ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i]\
+ ->pExtVideoTransitionFctCtxt)->pPlane = M4OSA_NULL;
+
+ free((pSettings->pTransitionList[i]->\
+ pExtVideoTransitionFctCtxt));
+ pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+ for(j=i+1;j<pSettings->uiClipNumber-1;j++)
+ {
+ if(pSettings->pTransitionList[j] != M4OSA_NULL)
+ {
+ if(pSettings->pTransitionList[j]->VideoTransitionType ==
+ M4xVSS_kVideoTransitionType_AlphaMagic)
+ {
+ M4OSA_UInt32 pCmpResult=0;
+ pCmpResult = strcmp((const char *)pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings->\
+ pAlphaFilePath,
+ (const char *)pSettings->pTransitionList[j]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings->\
+ pAlphaFilePath);
+ if(pCmpResult == 0)
+ {
+ /* Free extra internal alpha magic structure and put
+ it to NULL to avoid refreeing it */
+ free((pSettings->\
+ pTransitionList[j]->pExtVideoTransitionFctCtxt));
+ pSettings->pTransitionList[j]->\
+ pExtVideoTransitionFctCtxt = M4OSA_NULL;
+ }
+ }
+ }
+ }
+ }
+
+ if(pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings != M4OSA_NULL)
+ {
+ if(pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings->\
+ pAlphaFilePath != M4OSA_NULL)
+ {
+ free(pSettings->\
+ pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings->\
+ pAlphaFilePath);
+ pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings->\
+ pAlphaFilePath = M4OSA_NULL;
+ }
+ free(pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings);
+ pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pAlphaMagicSettings = M4OSA_NULL;
+
+ }
+
+ break;
+
+
+ case M4xVSS_kVideoTransitionType_SlideTransition:
+ if (M4OSA_NULL != pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pSlideTransitionSettings)
+ {
+ free(pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pSlideTransitionSettings);
+ pSettings->pTransitionList[i]->\
+ xVSS.transitionSpecific.pSlideTransitionSettings = M4OSA_NULL;
+ }
+ if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt != M4OSA_NULL)
+ {
+ free((pSettings->pTransitionList[i]->\
+ pExtVideoTransitionFctCtxt));
+ pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+ }
+ break;
+ default:
+ break;
+
+ }
+ /**
+ * Free transition settings structure */
+ free((pSettings->pTransitionList[i]));
+ pSettings->pTransitionList[i] = M4OSA_NULL;
+ }
+ }
+ }
+
+ /**
+ * Free clip list */
+ if(pSettings->pClipList != M4OSA_NULL)
+ {
+ free((pSettings->pClipList));
+ pSettings->pClipList = M4OSA_NULL;
+ }
+
+ /**
+ * Free transition list */
+ if(pSettings->pTransitionList != M4OSA_NULL)
+ {
+ free((pSettings->pTransitionList));
+ pSettings->pTransitionList = M4OSA_NULL;
+ }
+
+ /**
+ * RC: Free effects list */
+ if(pSettings->Effects != M4OSA_NULL)
+ {
+ for(i=0; i<pSettings->nbEffects; i++)
+ {
+ /**
+ * For each clip, free framing structure if needed */
+ if(pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Framing
+ || pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Text)
+ {
+#ifdef DECODE_GIF_ON_SAVING
+ M4xVSS_FramingContext* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+#else
+ M4xVSS_FramingStruct* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+ M4xVSS_FramingStruct* framingCtx_save;
+ M4xVSS_Framing3102Struct* framingCtx_first = framingCtx;
+#endif
+
+#ifdef DECODE_GIF_ON_SAVING
+ if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non existant
+ pointer */
+ {
+ if(framingCtx->aFramingCtx != M4OSA_NULL)
+ {
+ {
+ if(framingCtx->aFramingCtx->FramingRgb != M4OSA_NULL)
+ {
+ free(framingCtx->aFramingCtx->\
+ FramingRgb->pac_data);
+ framingCtx->aFramingCtx->FramingRgb->pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx->FramingRgb);
+ framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+ }
+ }
+ if(framingCtx->aFramingCtx->FramingYuv != M4OSA_NULL)
+ {
+ free(framingCtx->aFramingCtx->\
+ FramingYuv[0].pac_data);
+ framingCtx->aFramingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx->\
+ FramingYuv[1].pac_data);
+ framingCtx->aFramingCtx->FramingYuv[1].pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx->\
+ FramingYuv[2].pac_data);
+ framingCtx->aFramingCtx->FramingYuv[2].pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx->FramingYuv);
+ framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+ }
+ free(framingCtx->aFramingCtx);
+ framingCtx->aFramingCtx = M4OSA_NULL;
+ }
+ if(framingCtx->aFramingCtx_last != M4OSA_NULL)
+ {
+ if(framingCtx->aFramingCtx_last->FramingRgb != M4OSA_NULL)
+ {
+ free(framingCtx->aFramingCtx_last->\
+ FramingRgb->pac_data);
+ framingCtx->aFramingCtx_last->FramingRgb->pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx_last->\
+ FramingRgb);
+ framingCtx->aFramingCtx_last->FramingRgb = M4OSA_NULL;
+ }
+ if(framingCtx->aFramingCtx_last->FramingYuv != M4OSA_NULL)
+ {
+ free(framingCtx->aFramingCtx_last->\
+ FramingYuv[0].pac_data);
+ framingCtx->aFramingCtx_last->FramingYuv[0].pac_data = M4OSA_NULL;
+ free(framingCtx->aFramingCtx_last->FramingYuv);
+ framingCtx->aFramingCtx_last->FramingYuv = M4OSA_NULL;
+ }
+ free(framingCtx->aFramingCtx_last);
+ framingCtx->aFramingCtx_last = M4OSA_NULL;
+ }
+ if(framingCtx->pEffectFilePath != M4OSA_NULL)
+ {
+ free(framingCtx->pEffectFilePath);
+ framingCtx->pEffectFilePath = M4OSA_NULL;
+ }
+ /*In case there are still allocated*/
+ if(framingCtx->pSPSContext != M4OSA_NULL)
+ {
+ // M4SPS_destroy(framingCtx->pSPSContext);
+ framingCtx->pSPSContext = M4OSA_NULL;
+ }
+ /*Alpha blending structure*/
+ if(framingCtx->alphaBlendingStruct != M4OSA_NULL)
+ {
+ free(framingCtx->alphaBlendingStruct);
+ framingCtx->alphaBlendingStruct = M4OSA_NULL;
+ }
+
+ free(framingCtx);
+ framingCtx = M4OSA_NULL;
+ }
+#else
+ do
+ {
+ if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non
+ existant pointer */
+ {
+ if(framingCtx->FramingRgb != M4OSA_NULL)
+ {
+ free(framingCtx->FramingRgb->pac_data);
+ framingCtx->FramingRgb->pac_data = M4OSA_NULL;
+ free(framingCtx->FramingRgb);
+ framingCtx->FramingRgb = M4OSA_NULL;
+ }
+ if(framingCtx->FramingYuv != M4OSA_NULL)
+ {
+ free(framingCtx->FramingYuv[0].pac_data);
+ framingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+ free(framingCtx->FramingYuv);
+ framingCtx->FramingYuv = M4OSA_NULL;
+ }
+ framingCtx_save = framingCtx->pNext;
+ free(framingCtx);
+ framingCtx = M4OSA_NULL;
+ framingCtx = framingCtx_save;
+ }
+ else
+ {
+ /*FB: bug fix P4ME00003002*/
+ break;
+ }
+ } while(framingCtx_first != framingCtx);
+#endif
+ }
+ else if( M4xVSS_kVideoEffectType_Fifties == pSettings->Effects[i].VideoEffectType)
+ {
+ /* Free Fifties context */
+ M4xVSS_FiftiesStruct* FiftiesCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+ if(FiftiesCtx != M4OSA_NULL)
+ {
+ free(FiftiesCtx);
+ FiftiesCtx = M4OSA_NULL;
+ }
+
+ }
+ else if( M4xVSS_kVideoEffectType_ColorRGB16 == pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_BlackAndWhite == pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_Pink == pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_Green == pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_Sepia == pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_Negative== pSettings->Effects[i].VideoEffectType
+ || M4xVSS_kVideoEffectType_Gradient== pSettings->Effects[i].VideoEffectType)
+ {
+ /* Free Color context */
+ M4xVSS_ColorStruct* ColorCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+ if(ColorCtx != M4OSA_NULL)
+ {
+ free(ColorCtx);
+ ColorCtx = M4OSA_NULL;
+ }
+ }
+
+ /* Free simple fields */
+ if(pSettings->Effects[i].xVSS.pFramingFilePath != M4OSA_NULL)
+ {
+ free(pSettings->Effects[i].xVSS.pFramingFilePath);
+ pSettings->Effects[i].xVSS.pFramingFilePath = M4OSA_NULL;
+ }
+ if(pSettings->Effects[i].xVSS.pFramingBuffer != M4OSA_NULL)
+ {
+ free(pSettings->Effects[i].xVSS.pFramingBuffer);
+ pSettings->Effects[i].xVSS.pFramingBuffer = M4OSA_NULL;
+ }
+ if(pSettings->Effects[i].xVSS.pTextBuffer != M4OSA_NULL)
+ {
+ free(pSettings->Effects[i].xVSS.pTextBuffer);
+ pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+ }
+ }
+ free(pSettings->Effects);
+ pSettings->Effects = M4OSA_NULL;
+ }
+
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+// M4OSA_UInt8 i,j;
+
+ /* Free "local" BGM settings */
+ if(xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL)
+ {
+ if(xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+ xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+ }
+ free(xVSS_context->pSettings->xVSS.pBGMtrack);
+ xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+ }
+
+ M4xVSS_freeSettings(xVSS_context->pSettings);
+
+ if(xVSS_context->pPTo3GPPparamsList != M4OSA_NULL)
+ {
+ M4xVSS_Pto3GPP_params* pParams = xVSS_context->pPTo3GPPparamsList;
+ M4xVSS_Pto3GPP_params* pParams_sauv;
+
+ while(pParams != M4OSA_NULL)
+ {
+ if(pParams->pFileIn != M4OSA_NULL)
+ {
+ free(pParams->pFileIn);
+ pParams->pFileIn = M4OSA_NULL;
+ }
+ if(pParams->pFileOut != M4OSA_NULL)
+ {
+ /* Delete temporary file */
+ remove((const char *)pParams->pFileOut);
+ free(pParams->pFileOut);
+ pParams->pFileOut = M4OSA_NULL;
+ }
+ if(pParams->pFileTemp != M4OSA_NULL)
+ {
+ /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+ remove((const char *)pParams->pFileTemp);
+ free(pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+ pParams->pFileTemp = M4OSA_NULL;
+ }
+ pParams_sauv = pParams;
+ pParams = pParams->pNext;
+ free(pParams_sauv);
+ pParams_sauv = M4OSA_NULL;
+ }
+ }
+
+ if(xVSS_context->pMCSparamsList != M4OSA_NULL)
+ {
+ M4xVSS_MCS_params* pParams = xVSS_context->pMCSparamsList;
+ M4xVSS_MCS_params* pParams_sauv;
+
+ while(pParams != M4OSA_NULL)
+ {
+ if(pParams->pFileIn != M4OSA_NULL)
+ {
+ free(pParams->pFileIn);
+ pParams->pFileIn = M4OSA_NULL;
+ }
+ if(pParams->pFileOut != M4OSA_NULL)
+ {
+ /* Delete temporary file */
+ remove((const char *)pParams->pFileOut);
+ free(pParams->pFileOut);
+ pParams->pFileOut = M4OSA_NULL;
+ }
+ if(pParams->pFileTemp != M4OSA_NULL)
+ {
+ /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+ remove((const char *)pParams->pFileTemp);
+ free(pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+ pParams->pFileTemp = M4OSA_NULL;
+ }
+ pParams_sauv = pParams;
+ pParams = pParams->pNext;
+ free(pParams_sauv);
+ pParams_sauv = M4OSA_NULL;
+ }
+ }
+
+ if(xVSS_context->pcmPreviewFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pcmPreviewFile);
+ xVSS_context->pcmPreviewFile = M4OSA_NULL;
+ }
+ if(xVSS_context->pSettings->pOutputFile != M4OSA_NULL
+ && xVSS_context->pOutputFile != M4OSA_NULL)
+ {
+ free(xVSS_context->pSettings->pOutputFile);
+ xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+ xVSS_context->pOutputFile = M4OSA_NULL;
+ }
+
+ /* Reinit all context variables */
+ xVSS_context->previousClipNumber = 0;
+ xVSS_context->editingStep = M4xVSS_kMicroStateEditing;
+ xVSS_context->analyseStep = M4xVSS_kMicroStateAnalysePto3GPP;
+ xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+ xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+ xVSS_context->pMCSparamsList = M4OSA_NULL;
+ xVSS_context->pMCScurrentParams = M4OSA_NULL;
+ xVSS_context->tempFileIndex = 0;
+ xVSS_context->targetedTimescale = 0;
+
+ return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext,
+ * M4OSA_Char* pFile,
+ * M4VIDEOEDITING_ClipProperties *pFileProperties)
+ *
+ * @brief This function retrieve properties of an input 3GP file using MCS
+ * @note
+ * @param pContext (IN) The integrator own context
+ * @param pFile (IN) 3GP file to analyse
+ * @param pFileProperties (IN/OUT) Pointer on a structure that will contain
+ * the 3GP file properties
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
+ M4VIDEOEDITING_ClipProperties *pFileProperties)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+ M4MCS_Context mcs_context;
+
+ err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_init: 0x%x", err);
+ return err;
+ }
+
+ /*open the MCS in the "normal opening" mode to retrieve the exact duration*/
+ err = M4MCS_open_normalMode(mcs_context, pFile, M4VIDEOEDITING_kFileType_3GPP,
+ M4OSA_NULL, M4OSA_NULL);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_open: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ err = M4MCS_getInputFileProperties(mcs_context, pFileProperties);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
+ M4MCS_abort(mcs_context);
+ return err;
+ }
+
+ err = M4MCS_abort(mcs_context);
+ if (err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_abort: 0x%x", err);
+ return err;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+ * M4OSA_UInt32* pTargetedTimeScale)
+ *
+ * @brief This function retrieve targeted time scale
+ * @note
+ * @param pContext (IN) The integrator own context
+ * @param pTargetedTimeScale (OUT) Targeted time scale
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+ M4VSS3GPP_EditSettings* pSettings,
+ M4OSA_UInt32* pTargetedTimeScale)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+ M4OSA_UInt32 totalDuration = 0;
+ M4OSA_UInt8 i = 0;
+ M4OSA_UInt32 tempTimeScale = 0, tempDuration = 0;
+
+ for(i=0;i<pSettings->uiClipNumber;i++)
+ {
+ /*search timescale only in mpeg4 case*/
+ if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP
+ || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_MP4
+ || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_M4V)
+ {
+ M4VIDEOEDITING_ClipProperties fileProperties;
+
+ /*UTF conversion support*/
+ M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+ /**
+ * UTF conversion: convert into the customer format, before being used*/
+ pDecodedPath = pSettings->pClipList[i]->pFile;
+
+ if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 length = 0;
+ err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+ (M4OSA_Void*) pSettings->pClipList[i]->pFile,
+ (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+ &length);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_Init:\
+ M4xVSS_internalConvertToUTF8 returns err: 0x%x",err);
+ return err;
+ }
+ pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ }
+
+ /*End of the conversion: use the decoded path*/
+ err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath, &fileProperties);
+
+ /*get input file properties*/
+ /*err = M4xVSS_internalGetProperties(xVSS_context, pSettings->\
+ pClipList[i]->pFile, &fileProperties);*/
+ if(M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalGetTargetedTimeScale:\
+ M4xVSS_internalGetProperties returned: 0x%x", err);
+ return err;
+ }
+ if(fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+ {
+ if(pSettings->pClipList[i]->uiEndCutTime > 0)
+ {
+ if(tempDuration < (pSettings->pClipList[i]->uiEndCutTime \
+ - pSettings->pClipList[i]->uiBeginCutTime))
+ {
+ tempTimeScale = fileProperties.uiVideoTimeScale;
+ tempDuration = (pSettings->pClipList[i]->uiEndCutTime\
+ - pSettings->pClipList[i]->uiBeginCutTime);
+ }
+ }
+ else
+ {
+ if(tempDuration < (fileProperties.uiClipDuration\
+ - pSettings->pClipList[i]->uiBeginCutTime))
+ {
+ tempTimeScale = fileProperties.uiVideoTimeScale;
+ tempDuration = (fileProperties.uiClipDuration\
+ - pSettings->pClipList[i]->uiBeginCutTime);
+ }
+ }
+ }
+ }
+ if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_ARGB8888)
+ {
+ /*the timescale is 30 for PTO3GP*/
+ *pTargetedTimeScale = 30;
+ return M4NO_ERROR;
+
+ }
+ }
+
+ if(tempTimeScale >= 30)/*Define a minimum time scale, otherwise if the timescale is not
+ enough, there will be an infinite loop in the shell encoder*/
+ {
+ *pTargetedTimeScale = tempTimeScale;
+ }
+ else
+ {
+ *pTargetedTimeScale = 30;
+ }
+
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *PlaneIn,
+ M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind)
+{
+ M4VIFI_Int32 plane_number;
+ M4VIFI_UInt32 i,j;
+ M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+ M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
+
+ for (plane_number = 0; plane_number < 3; plane_number++)
+ {
+ p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
+ p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
+ for (i = 0; i < PlaneOut[plane_number].u_height; i++)
+ {
+ /**
+ * Chrominance */
+ if(plane_number==1 || plane_number==2)
+ {
+ //switch ((M4OSA_UInt32)pFunctionContext)
+ // commented because a structure for the effects context exist
+ switch (ColorContext->colorEffectType)
+ {
+ case M4xVSS_kVideoEffectType_BlackAndWhite:
+ memset((void *)p_buf_dest,128,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Pink:
+ memset((void *)p_buf_dest,255,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Green:
+ memset((void *)p_buf_dest,0,
+ PlaneIn[plane_number].u_width);
+ break;
+ case M4xVSS_kVideoEffectType_Sepia:
+ if(plane_number==1)
+ {
+ memset((void *)p_buf_dest,117,
+ PlaneIn[plane_number].u_width);
+ }
+ else
+ {
+ memset((void *)p_buf_dest,139,
+ PlaneIn[plane_number].u_width);
+ }
+ break;
+ case M4xVSS_kVideoEffectType_Negative:
+ memcpy((void *)p_buf_dest,
+ (void *)p_buf_src ,PlaneOut[plane_number].u_width);
+ break;
+
+ case M4xVSS_kVideoEffectType_ColorRGB16:
+ {
+ M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+ /*first get the r, g, b*/
+ b = (ColorContext->rgb16ColorData & 0x001f);
+ g = (ColorContext->rgb16ColorData & 0x07e0)>>5;
+ r = (ColorContext->rgb16ColorData & 0xf800)>>11;
+
+ /*keep y, but replace u and v*/
+ if(plane_number==1)
+ {
+ /*then convert to u*/
+ u = U16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)u,
+ PlaneIn[plane_number].u_width);
+ }
+ if(plane_number==2)
+ {
+ /*then convert to v*/
+ v = V16(r, g, b);
+ memset((void *)p_buf_dest, (M4OSA_UInt8)v,
+ PlaneIn[plane_number].u_width);
+ }
+ }
+ break;
+ case M4xVSS_kVideoEffectType_Gradient:
+ {
+ M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+ /*first get the r, g, b*/
+ b = (ColorContext->rgb16ColorData & 0x001f);
+ g = (ColorContext->rgb16ColorData & 0x07e0)>>5;
+ r = (ColorContext->rgb16ColorData & 0xf800)>>11;
+
+ /*for color gradation*/
+ b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
+ g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
+ r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
+
+ /*keep y, but replace u and v*/
+ if(plane_number==1)
+ {
+ /*then convert to u*/
+ u = U16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)u,
+ PlaneIn[plane_number].u_width);
+ }
+ if(plane_number==2)
+ {
+ /*then convert to v*/
+ v = V16(r, g, b);
+ memset((void *)p_buf_dest,(M4OSA_UInt8)v,
+ PlaneIn[plane_number].u_width);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ /**
+ * Luminance */
+ else
+ {
+ //switch ((M4OSA_UInt32)pFunctionContext)
+ // commented because a structure for the effects context exist
+ switch (ColorContext->colorEffectType)
+ {
+ case M4xVSS_kVideoEffectType_Negative:
+ for(j=0;j<PlaneOut[plane_number].u_width;j++)
+ {
+ p_buf_dest[j] = 255 - p_buf_src[j];
+ }
+ break;
+ default:
+ memcpy((void *)p_buf_dest,
+ (void *)p_buf_src ,PlaneOut[plane_number].u_width);
+ break;
+ }
+ }
+ p_buf_src += PlaneIn[plane_number].u_stride;
+ p_buf_dest += PlaneOut[plane_number].u_stride;
+ }
+ }
+
+ return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function add a fixed or animated image on an input YUV420 planar frame
+ * @note
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData,
+ M4VIFI_ImagePlane PlaneIn[3],
+ M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind )
+{
+ M4VIFI_UInt32 x,y;
+
+ M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
+ M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
+ M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
+
+ M4xVSS_FramingStruct* Framing = M4OSA_NULL;
+ M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
+ M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
+
+ M4VIFI_UInt8 *p_out0;
+ M4VIFI_UInt8 *p_out1;
+ M4VIFI_UInt8 *p_out2;
+
+ M4VIFI_UInt32 topleft[2];
+
+ M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+ M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+#ifndef DECODE_GIF_ON_SAVING
+ Framing = (M4xVSS_FramingStruct *)userData;
+ currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
+ FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+ /*FB*/
+#ifdef DECODE_GIF_ON_SAVING
+ M4OSA_ERR err;
+ Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+ currentFraming = (M4xVSS_FramingStruct *)Framing;
+ FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+ /*end FB*/
+
+ /**
+ * Initialize input / output plane pointers */
+ p_in_Y += PlaneIn[0].u_topleft;
+ p_in_U += PlaneIn[1].u_topleft;
+ p_in_V += PlaneIn[2].u_topleft;
+
+ p_out0 = PlaneOut[0].pac_data;
+ p_out1 = PlaneOut[1].pac_data;
+ p_out2 = PlaneOut[2].pac_data;
+
+ /**
+ * Depending on time, initialize Framing frame to use */
+ if(Framing->previousClipTime == -1)
+ {
+ Framing->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /**
+ * If the current clip time has reach the duration of one frame of the framing picture
+ * we need to step to next framing picture */
+
+ Framing->previousClipTime = pProgress->uiOutputTime;
+ FramingRGB = currentFraming->FramingRgb->pac_data;
+ topleft[0] = currentFraming->topleft_x;
+ topleft[1] = currentFraming->topleft_y;
+
+ for( x=0 ;x < PlaneIn[0].u_height ; x++)
+ {
+ for( y=0 ;y < PlaneIn[0].u_width ; y++)
+ {
+ /**
+ * To handle framing with input size != output size
+ * Framing is applyed if coordinates matches between framing/topleft and input plane */
+ if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+ y >= topleft[0] &&
+ x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
+ x >= topleft[1])
+ {
+ /*Alpha blending support*/
+ M4OSA_Float alphaBlending = 1;
+ M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct =\
+ (M4xVSS_internalEffectsAlphaBlending*)\
+ ((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
+
+ if(alphaBlendingStruct != M4OSA_NULL)
+ {
+ if(pProgress->uiProgress \
+ < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
+ {
+ if(alphaBlendingStruct->m_fadeInTime == 0) {
+ alphaBlending = alphaBlendingStruct->m_start / 100;
+ } else {
+ alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle\
+ - alphaBlendingStruct->m_start)\
+ *pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
+ alphaBlending += alphaBlendingStruct->m_start;
+ alphaBlending /= 100;
+ }
+ }
+ else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->\
+ m_fadeInTime*10) && pProgress->uiProgress < 1000\
+ - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
+ {
+ alphaBlending = (M4OSA_Float)\
+ ((M4OSA_Float)alphaBlendingStruct->m_middle/100);
+ }
+ else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)\
+ (alphaBlendingStruct->m_fadeOutTime*10))
+ {
+ if(alphaBlendingStruct->m_fadeOutTime == 0) {
+ alphaBlending = alphaBlendingStruct->m_end / 100;
+ } else {
+ alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle \
+ - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)\
+ /(alphaBlendingStruct->m_fadeOutTime*10);
+ alphaBlending += alphaBlendingStruct->m_end;
+ alphaBlending /= 100;
+ }
+ }
+ }
+ /**/
+
+ if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+ (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+ (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
+ }
+ else
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=
+ (*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])\
+ +(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
+ *( p_out0+y+x*PlaneOut[0].u_stride)+=
+ (*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+ (*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)\
+ +((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))\
+ *alphaBlending;
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=
+ (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+ (*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)\
+ +((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))\
+ *alphaBlending;
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=
+ (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
+ }
+ if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+ y == PlaneIn[0].u_width-1)
+ {
+ FramingRGB = FramingRGB + 2 \
+ * (topleft[0] + currentFraming->FramingYuv[0].u_width \
+ - PlaneIn[0].u_width + 1);
+ }
+ else
+ {
+ FramingRGB = FramingRGB + 2;
+ }
+ }
+ /**
+ * Just copy input plane to output plane */
+ else
+ {
+ *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
+ *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+ *(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
+ *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+ *(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
+ }
+ }
+ }
+
+
+ return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function make a video look as if it was taken in the fifties
+ * @note
+ * @param pUserData (IN) Context
+ * @param pPlaneIn (IN) Input YUV420 planar
+ * @param pPlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ * @return M4ERR_PARAMETER: pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind )
+{
+ M4VIFI_UInt32 x, y, xShift;
+ M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
+ M4VIFI_UInt8 *pOutY, *pInYbegin;
+ M4VIFI_UInt8 *pInCr,* pOutCr;
+ M4VIFI_Int32 plane_number;
+
+ /* Internal context*/
+ M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
+
+ /* Check the inputs (debug only) */
+ M4OSA_DEBUG_IF2((pFiftiesData == M4OSA_NULL),M4ERR_PARAMETER,
+ "xVSS: p_FiftiesData is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+ M4OSA_DEBUG_IF2((pPlaneOut == M4OSA_NULL),M4ERR_PARAMETER,
+ "xVSS: p_PlaneOut is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+ M4OSA_DEBUG_IF2((pProgress == M4OSA_NULL),M4ERR_PARAMETER,
+ "xVSS: p_Progress is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+
+ /* Initialize input / output plane pointers */
+ pInY += pPlaneIn[0].u_topleft;
+ pOutY = pPlaneOut[0].pac_data;
+ pInYbegin = pInY;
+
+ /* Initialize the random */
+ if(p_FiftiesData->previousClipTime < 0)
+ {
+ M4OSA_randInit();
+ M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+ M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+ p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /* Choose random values if we have reached the duration of a partial effect */
+ else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime)\
+ > p_FiftiesData->fiftiesEffectDuration)
+ {
+ M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+ M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+ p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+ }
+
+ /* Put in Sepia the chrominance */
+ for (plane_number = 1; plane_number < 3; plane_number++)
+ {
+ pInCr = pPlaneIn[plane_number].pac_data + pPlaneIn[plane_number].u_topleft;
+ pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
+
+ for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
+ {
+ if (1 == plane_number)
+ memset((void *)pOutCr, 117,pPlaneIn[plane_number].u_width); /* U value */
+ else
+ memset((void *)pOutCr, 139,pPlaneIn[plane_number].u_width); /* V value */
+
+ pInCr += pPlaneIn[plane_number].u_stride;
+ pOutCr += pPlaneOut[plane_number].u_stride;
+ }
+ }
+
+ /* Compute the new pixels values */
+ for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
+ {
+ M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
+
+ /* Compute the xShift (random value) */
+ if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
+ xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
+ else
+ xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) \
+ % (pPlaneIn[0].u_height - 1);
+
+ /* Initialize the pointers */
+ p_outYtmp = pOutY + 1; /* yShift of 1 pixel */
+ p_inYtmp = pInYbegin + (xShift * pPlaneIn[0].u_stride); /* Apply the xShift */
+
+ for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
+ {
+ /* Set Y value */
+ if (xShift > (pPlaneIn[0].u_height - 4))
+ *p_outYtmp = 40; /* Add some horizontal black lines between the
+ two parts of the image */
+ else if ( y == p_FiftiesData->stripeRandomValue)
+ *p_outYtmp = 90; /* Add a random vertical line for the bulk */
+ else
+ *p_outYtmp = *p_inYtmp;
+
+
+ /* Go to the next pixel */
+ p_outYtmp++;
+ p_inYtmp++;
+
+ /* Restart at the beginning of the line for the last pixel*/
+ if (y == (pPlaneIn[0].u_width - 2))
+ p_outYtmp = pOutY;
+ }
+
+ /* Go to the next line */
+ pOutY += pPlaneOut[0].u_stride;
+ }
+
+ return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom( )
+ * @brief Zoom in/out video effect functions.
+ * @note The external video function is used only if VideoEffectType is set to
+ * M4VSS3GPP_kVideoEffectType_ZoomIn or M4VSS3GPP_kVideoEffectType_ZoomOut.
+ *
+ * @param pFunctionContext (IN) The function context, previously set by the integrator
+ * @param pInputPlanes (IN) Input YUV420 image: pointer to an array of three valid
+ * image planes (Y, U and V)
+ * @param pOutputPlanes (IN/OUT) Output (filtered) YUV420 image: pointer to an array of
+ * three valid image planes (Y, U and V)
+ * @param pProgress (IN) Set of information about the video transition progress.
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom(
+ M4OSA_Void *pFunctionContext,
+ M4VIFI_ImagePlane *pInputPlanes,
+ M4VIFI_ImagePlane *pOutputPlanes,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiEffectKind
+)
+{
+ M4OSA_UInt32 boxWidth;
+ M4OSA_UInt32 boxHeight;
+ M4OSA_UInt32 boxPosX;
+ M4OSA_UInt32 boxPosY;
+ M4OSA_UInt32 ratio = 0;
+ /* * 1.189207 between ratio */
+ /* zoom between x1 and x16 */
+ M4OSA_UInt32 ratiotab[17] ={1024,1218,1448,1722,2048,2435,2896,3444,4096,4871,5793,\
+ 6889,8192,9742,11585,13777,16384};
+ M4OSA_UInt32 ik;
+
+ M4VIFI_ImagePlane boxPlane[3];
+
+ if(M4xVSS_kVideoEffectType_ZoomOut == (M4OSA_UInt32)pFunctionContext)
+ {
+ //ratio = 16 - (15 * pProgress->uiProgress)/1000;
+ ratio = 16 - pProgress->uiProgress / 66 ;
+ }
+ else if(M4xVSS_kVideoEffectType_ZoomIn == (M4OSA_UInt32)pFunctionContext)
+ {
+ //ratio = 1 + (15 * pProgress->uiProgress)/1000;
+ ratio = 1 + pProgress->uiProgress / 66 ;
+ }
+
+ for(ik=0;ik<3;ik++){
+
+ boxPlane[ik].u_stride = pInputPlanes[ik].u_stride;
+ boxPlane[ik].pac_data = pInputPlanes[ik].pac_data;
+
+ boxHeight = ( pInputPlanes[ik].u_height << 10 ) / ratiotab[ratio];
+ boxWidth = ( pInputPlanes[ik].u_width << 10 ) / ratiotab[ratio];
+ boxPlane[ik].u_height = (boxHeight)&(~1);
+ boxPlane[ik].u_width = (boxWidth)&(~1);
+
+ boxPosY = (pInputPlanes[ik].u_height >> 1) - (boxPlane[ik].u_height >> 1);
+ boxPosX = (pInputPlanes[ik].u_width >> 1) - (boxPlane[ik].u_width >> 1);
+ boxPlane[ik].u_topleft = boxPosY * boxPlane[ik].u_stride + boxPosX;
+ }
+
+ M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, (M4VIFI_ImagePlane*)&boxPlane, pOutputPlanes);
+
+ /**
+ * Return */
+ return(M4NO_ERROR);
+}
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_AlphaMagic( M4OSA_Void *userData,
+ * M4VIFI_ImagePlane PlaneIn1[3],
+ * M4VIFI_ImagePlane PlaneIn2[3],
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param userData (IN) Contains a pointer on a settings structure
+ * @param PlaneIn1 (IN) Input YUV420 planar from video 1
+ * @param PlaneIn2 (IN) Input YUV420 planar from video 2
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiTransitionKind(IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiTransitionKind)
+{
+
+ M4OSA_ERR err;
+
+ M4xVSS_internal_AlphaMagicSettings* alphaContext;
+ M4VIFI_Int32 alphaProgressLevel;
+
+ M4VIFI_ImagePlane* planeswap;
+ M4VIFI_UInt32 x,y;
+
+ M4VIFI_UInt8 *p_out0;
+ M4VIFI_UInt8 *p_out1;
+ M4VIFI_UInt8 *p_out2;
+ M4VIFI_UInt8 *alphaMask;
+ /* "Old image" */
+ M4VIFI_UInt8 *p_in1_Y;
+ M4VIFI_UInt8 *p_in1_U;
+ M4VIFI_UInt8 *p_in1_V;
+ /* "New image" */
+ M4VIFI_UInt8 *p_in2_Y;
+ M4VIFI_UInt8 *p_in2_U;
+ M4VIFI_UInt8 *p_in2_V;
+
+ err = M4NO_ERROR;
+
+ alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+ alphaProgressLevel = (pProgress->uiProgress * 128)/1000;
+
+ if( alphaContext->isreverse != M4OSA_FALSE)
+ {
+ alphaProgressLevel = 128 - alphaProgressLevel;
+ planeswap = PlaneIn1;
+ PlaneIn1 = PlaneIn2;
+ PlaneIn2 = planeswap;
+ }
+
+ p_out0 = PlaneOut[0].pac_data;
+ p_out1 = PlaneOut[1].pac_data;
+ p_out2 = PlaneOut[2].pac_data;
+
+ alphaMask = alphaContext->pPlane->pac_data;
+
+ /* "Old image" */
+ p_in1_Y = PlaneIn1[0].pac_data;
+ p_in1_U = PlaneIn1[1].pac_data;
+ p_in1_V = PlaneIn1[2].pac_data;
+ /* "New image" */
+ p_in2_Y = PlaneIn2[0].pac_data;
+ p_in2_U = PlaneIn2[1].pac_data;
+ p_in2_V = PlaneIn2[2].pac_data;
+
+ /**
+ * For each column ... */
+ for( y=0; y<PlaneOut->u_height; y++ )
+ {
+ /**
+ * ... and each row of the alpha mask */
+ for( x=0; x<PlaneOut->u_width; x++ )
+ {
+ /**
+ * If the value of the current pixel of the alpha mask is > to the current time
+ * ( current time is normalized on [0-255] ) */
+ if( alphaProgressLevel < alphaMask[x+y*PlaneOut->u_width] )
+ {
+ /* We keep "old image" in output plane */
+ *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+ *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+ *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+ *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+ *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+ }
+ else
+ {
+ /* We take "new image" in output plane */
+ *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+ *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+ *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+ *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+ *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+ }
+ }
+ }
+
+ return(err);
+}
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_AlphaMagicBlending( M4OSA_Void *userData,
+ * M4VIFI_ImagePlane PlaneIn1[3],
+ * M4VIFI_ImagePlane PlaneIn2[3],
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param userData (IN) Contains a pointer on a settings structure
+ * @param PlaneIn1 (IN) Input YUV420 planar from video 1
+ * @param PlaneIn2 (IN) Input YUV420 planar from video 2
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiTransitionKind(IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind)
+{
+ M4OSA_ERR err;
+
+ M4xVSS_internal_AlphaMagicSettings* alphaContext;
+ M4VIFI_Int32 alphaProgressLevel;
+ M4VIFI_Int32 alphaBlendLevelMin;
+ M4VIFI_Int32 alphaBlendLevelMax;
+ M4VIFI_Int32 alphaBlendRange;
+
+ M4VIFI_ImagePlane* planeswap;
+ M4VIFI_UInt32 x,y;
+ M4VIFI_Int32 alphaMaskValue;
+
+ M4VIFI_UInt8 *p_out0;
+ M4VIFI_UInt8 *p_out1;
+ M4VIFI_UInt8 *p_out2;
+ M4VIFI_UInt8 *alphaMask;
+ /* "Old image" */
+ M4VIFI_UInt8 *p_in1_Y;
+ M4VIFI_UInt8 *p_in1_U;
+ M4VIFI_UInt8 *p_in1_V;
+ /* "New image" */
+ M4VIFI_UInt8 *p_in2_Y;
+ M4VIFI_UInt8 *p_in2_U;
+ M4VIFI_UInt8 *p_in2_V;
+
+
+ err = M4NO_ERROR;
+
+ alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+ alphaProgressLevel = (pProgress->uiProgress * 128)/1000;
+
+ if( alphaContext->isreverse != M4OSA_FALSE)
+ {
+ alphaProgressLevel = 128 - alphaProgressLevel;
+ planeswap = PlaneIn1;
+ PlaneIn1 = PlaneIn2;
+ PlaneIn2 = planeswap;
+ }
+
+ alphaBlendLevelMin = alphaProgressLevel-alphaContext->blendingthreshold;
+
+ alphaBlendLevelMax = alphaProgressLevel+alphaContext->blendingthreshold;
+
+ alphaBlendRange = (alphaContext->blendingthreshold)*2;
+
+ p_out0 = PlaneOut[0].pac_data;
+ p_out1 = PlaneOut[1].pac_data;
+ p_out2 = PlaneOut[2].pac_data;
+
+ alphaMask = alphaContext->pPlane->pac_data;
+
+ /* "Old image" */
+ p_in1_Y = PlaneIn1[0].pac_data;
+ p_in1_U = PlaneIn1[1].pac_data;
+ p_in1_V = PlaneIn1[2].pac_data;
+ /* "New image" */
+ p_in2_Y = PlaneIn2[0].pac_data;
+ p_in2_U = PlaneIn2[1].pac_data;
+ p_in2_V = PlaneIn2[2].pac_data;
+
+ /* apply Alpha Magic on each pixel */
+ for( y=0; y<PlaneOut->u_height; y++ )
+ {
+ for( x=0; x<PlaneOut->u_width; x++ )
+ {
+ alphaMaskValue = alphaMask[x+y*PlaneOut->u_width];
+ if( alphaBlendLevelMax < alphaMaskValue )
+ {
+ /* We keep "old image" in output plane */
+ *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+ *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+ *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+ *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+ *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+ }
+ else if( (alphaBlendLevelMin < alphaMaskValue)&&
+ (alphaMaskValue <= alphaBlendLevelMax ) )
+ {
+ /* We blend "old and new image" in output plane */
+ *( p_out0+x+y*PlaneOut[0].u_stride)=(M4VIFI_UInt8)
+ (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_Y+x+y*PlaneIn1[0].u_stride))
+ +(alphaBlendLevelMax-alphaMaskValue)\
+ *( *(p_in2_Y+x+y*PlaneIn2[0].u_stride)) )/alphaBlendRange );
+
+ *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=(M4VIFI_UInt8)\
+ (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_U+(x>>1)+(y>>1)\
+ *PlaneIn1[1].u_stride))
+ +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_U+(x>>1)+(y>>1)\
+ *PlaneIn2[1].u_stride)) )/alphaBlendRange );
+
+ *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+ (M4VIFI_UInt8)(( (alphaMaskValue-alphaBlendLevelMin)\
+ *( *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride))
+ +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_V+(x>>1)+(y>>1)\
+ *PlaneIn2[2].u_stride)) )/alphaBlendRange );
+
+ }
+ else
+ {
+ /* We take "new image" in output plane */
+ *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+ *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+ *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+ *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+ *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+ }
+ }
+ }
+
+ return(err);
+}
+
+#define M4XXX_SampleAddress(plane, x, y) ( (plane).pac_data + (plane).u_topleft + (y)\
+ * (plane).u_stride + (x) )
+
+static void M4XXX_CopyPlane(M4VIFI_ImagePlane* dest, M4VIFI_ImagePlane* source)
+{
+ M4OSA_UInt32 height, width, sourceStride, destStride, y;
+ M4OSA_MemAddr8 sourceWalk, destWalk;
+
+ /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+ recomputed from memory. */
+ height = dest->u_height;
+ width = dest->u_width;
+
+ sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*source, 0, 0);
+ sourceStride = source->u_stride;
+
+ destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*dest, 0, 0);
+ destStride = dest->u_stride;
+
+ for (y=0; y<height; y++)
+ {
+ memcpy((void *)destWalk, (void *)sourceWalk, width);
+ destWalk += destStride;
+ sourceWalk += sourceStride;
+ }
+}
+
+static M4OSA_ERR M4xVSS_VerticalSlideTransition(M4VIFI_ImagePlane* topPlane,
+ M4VIFI_ImagePlane* bottomPlane,
+ M4VIFI_ImagePlane *PlaneOut,
+ M4OSA_UInt32 shiftUV)
+{
+ M4OSA_UInt32 i;
+
+ /* Do three loops, one for each plane type, in order to avoid having too many buffers
+ "hot" at the same time (better for cache). */
+ for (i=0; i<3; i++)
+ {
+ M4OSA_UInt32 topPartHeight, bottomPartHeight, width, sourceStride, destStride, y;
+ M4OSA_MemAddr8 sourceWalk, destWalk;
+
+ /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+ recomputed from memory. */
+ if (0 == i) /* Y plane */
+ {
+ bottomPartHeight = 2*shiftUV;
+ }
+ else /* U and V planes */
+ {
+ bottomPartHeight = shiftUV;
+ }
+ topPartHeight = PlaneOut[i].u_height - bottomPartHeight;
+ width = PlaneOut[i].u_width;
+
+ sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(topPlane[i], 0, bottomPartHeight);
+ sourceStride = topPlane[i].u_stride;
+
+ destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+ destStride = PlaneOut[i].u_stride;
+
+ /* First the part from the top source clip frame. */
+ for (y=0; y<topPartHeight; y++)
+ {
+ memcpy((void *)destWalk, (void *)sourceWalk, width);
+ destWalk += destStride;
+ sourceWalk += sourceStride;
+ }
+
+ /* and now change the vars to copy the part from the bottom source clip frame. */
+ sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(bottomPlane[i], 0, 0);
+ sourceStride = bottomPlane[i].u_stride;
+
+ /* destWalk is already at M4XXX_SampleAddress(PlaneOut[i], 0, topPartHeight) */
+
+ for (y=0; y<bottomPartHeight; y++)
+ {
+ memcpy((void *)destWalk, (void *)sourceWalk, width);
+ destWalk += destStride;
+ sourceWalk += sourceStride;
+ }
+ }
+ return M4NO_ERROR;
+}
+
+static M4OSA_ERR M4xVSS_HorizontalSlideTransition(M4VIFI_ImagePlane* leftPlane,
+ M4VIFI_ImagePlane* rightPlane,
+ M4VIFI_ImagePlane *PlaneOut,
+ M4OSA_UInt32 shiftUV)
+{
+ M4OSA_UInt32 i, y;
+ /* If we shifted by exactly 0, or by the width of the target image, then we would get the left
+ frame or the right frame, respectively. These cases aren't handled too well by the general
+ handling, since they result in 0-size memcopies, so might as well particularize them. */
+
+ if (0 == shiftUV) /* output left frame */
+ {
+ for (i = 0; i<3; i++) /* for each YUV plane */
+ {
+ M4XXX_CopyPlane(&(PlaneOut[i]), &(leftPlane[i]));
+ }
+
+ return M4NO_ERROR;
+ }
+
+ if (PlaneOut[1].u_width == shiftUV) /* output right frame */
+ {
+ for (i = 0; i<3; i++) /* for each YUV plane */
+ {
+ M4XXX_CopyPlane(&(PlaneOut[i]), &(rightPlane[i]));
+ }
+
+ return M4NO_ERROR;
+ }
+
+
+ /* Do three loops, one for each plane type, in order to avoid having too many buffers
+ "hot" at the same time (better for cache). */
+ for (i=0; i<3; i++)
+ {
+ M4OSA_UInt32 height, leftPartWidth, rightPartWidth;
+ M4OSA_UInt32 leftStride, rightStride, destStride;
+ M4OSA_MemAddr8 leftWalk, rightWalk, destWalkLeft, destWalkRight;
+
+ /* cache the vars used in the loop so as to avoid them being repeatedly fetched
+ and recomputed from memory. */
+ height = PlaneOut[i].u_height;
+
+ if (0 == i) /* Y plane */
+ {
+ rightPartWidth = 2*shiftUV;
+ }
+ else /* U and V planes */
+ {
+ rightPartWidth = shiftUV;
+ }
+ leftPartWidth = PlaneOut[i].u_width - rightPartWidth;
+
+ leftWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(leftPlane[i], rightPartWidth, 0);
+ leftStride = leftPlane[i].u_stride;
+
+ rightWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(rightPlane[i], 0, 0);
+ rightStride = rightPlane[i].u_stride;
+
+ destWalkLeft = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+ destWalkRight = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], leftPartWidth, 0);
+ destStride = PlaneOut[i].u_stride;
+
+ for (y=0; y<height; y++)
+ {
+ memcpy((void *)destWalkLeft, (void *)leftWalk, leftPartWidth);
+ leftWalk += leftStride;
+
+ memcpy((void *)destWalkRight, (void *)rightWalk, rightPartWidth);
+ rightWalk += rightStride;
+
+ destWalkLeft += destStride;
+ destWalkRight += destStride;
+ }
+ }
+
+ return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind)
+{
+ M4xVSS_internal_SlideTransitionSettings* settings =
+ (M4xVSS_internal_SlideTransitionSettings*)userData;
+ M4OSA_UInt32 shiftUV;
+
+ M4OSA_TRACE1_0("inside M4xVSS_SlideTransition");
+ if ((M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+ || (M4xVSS_SlideTransition_LeftOutRightIn == settings->direction) )
+ {
+ /* horizontal slide */
+ shiftUV = ((PlaneOut[1]).u_width * pProgress->uiProgress)/1000;
+ M4OSA_TRACE1_2("M4xVSS_SlideTransition upper: shiftUV = %d,progress = %d",
+ shiftUV,pProgress->uiProgress );
+ if (M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+ {
+ /* Put the previous clip frame right, the next clip frame left, and reverse shiftUV
+ (since it's a shift from the left frame) so that we start out on the right
+ (i.e. not left) frame, it
+ being from the previous clip. */
+ return M4xVSS_HorizontalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+ (PlaneOut[1]).u_width - shiftUV);
+ }
+ else /* Left out, right in*/
+ {
+ return M4xVSS_HorizontalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+ }
+ }
+ else
+ {
+ /* vertical slide */
+ shiftUV = ((PlaneOut[1]).u_height * pProgress->uiProgress)/1000;
+ M4OSA_TRACE1_2("M4xVSS_SlideTransition bottom: shiftUV = %d,progress = %d",shiftUV,
+ pProgress->uiProgress );
+ if (M4xVSS_SlideTransition_TopOutBottomIn == settings->direction)
+ {
+ /* Put the previous clip frame top, the next clip frame bottom. */
+ return M4xVSS_VerticalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+ }
+ else /* Bottom out, top in */
+ {
+ return M4xVSS_VerticalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+ (PlaneOut[1]).u_height - shiftUV);
+ }
+ }
+
+ /* Note: it might be worthwhile to do some parameter checking, see if dimensions match, etc.,
+ at least in debug mode. */
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4xVSS_FadeBlackTransition(M4OSA_Void *pFunctionContext,
+ * M4VIFI_ImagePlane *PlaneIn,
+ * M4VIFI_ImagePlane *PlaneOut,
+ * M4VSS3GPP_ExternalProgress *pProgress,
+ * M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief This function apply a fade to black and then a fade from black
+ * @note
+ * @param pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param PlaneIn (IN) Input YUV420 planar
+ * @param PlaneOut (IN/OUT) Output YUV420 planar
+ * @param pProgress (IN/OUT) Progress indication (0-100)
+ * @param uiEffectKind (IN) Unused
+ *
+ * @return M4VIFI_OK: No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+ M4VIFI_ImagePlane PlaneIn2[3],
+ M4VIFI_ImagePlane *PlaneOut,
+ M4VSS3GPP_ExternalProgress *pProgress,
+ M4OSA_UInt32 uiTransitionKind)
+{
+ M4OSA_Int32 tmp = 0;
+ M4OSA_ERR err = M4NO_ERROR;
+
+
+ if((pProgress->uiProgress) < 500)
+ {
+ /**
+ * Compute where we are in the effect (scale is 0->1024) */
+ tmp = (M4OSA_Int32)((1.0 - ((M4OSA_Float)(pProgress->uiProgress*2)/1000)) * 1024 );
+
+ /**
+ * Apply the darkening effect */
+ err = M4VFL_modifyLumaWithScale( (M4ViComImagePlane*)PlaneIn1,
+ (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition: M4VFL_modifyLumaWithScale returns\
+ error 0x%x, returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+ return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+ }
+ }
+ else
+ {
+ /**
+ * Compute where we are in the effect (scale is 0->1024). */
+ tmp = (M4OSA_Int32)( (((M4OSA_Float)(((pProgress->uiProgress-500)*2))/1000)) * 1024 );
+
+ /**
+ * Apply the darkening effect */
+ err = M4VFL_modifyLumaWithScale((M4ViComImagePlane*)PlaneIn2,
+ (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+ if (M4NO_ERROR != err)
+ {
+ M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition:\
+ M4VFL_modifyLumaWithScale returns error 0x%x,\
+ returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+ return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+ }
+ }
+
+
+ return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext,
+ * M4OSA_Void* pBufferIn,
+ * M4OSA_Void* pBufferOut,
+ * M4OSA_UInt32* convertedSize)
+ *
+ * @brief This function convert from the customer format to UTF8
+ * @note
+ * @param pContext (IN) The integrator own context
+ * @param pBufferIn (IN) Buffer to convert
+ * @param pBufferOut (OUT) Converted buffer
+ * @param convertedSize (OUT) Size of the converted buffer
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+ M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+
+ pBufferOut = pBufferIn;
+ if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+ memset((void *)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,0
+ ,(M4OSA_UInt32)xVSS_context->UTFConversionContext.m_TempOutConversionSize);
+
+ err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+ (M4OSA_UInt8*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+ (M4OSA_UInt32*)&ConvertedSize);
+ if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+ {
+ M4OSA_TRACE2_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+
+ /*free too small buffer*/
+ free(xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer);
+
+ /*re-allocate the buffer*/
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+ (M4OSA_Void*)M4OSA_32bitAlignedMalloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+ (M4OSA_Char *)"M4xVSS_internalConvertToUTF8: UTF conversion buffer");
+ if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertToUTF8");
+ return M4ERR_ALLOC;
+ }
+ xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+ memset((void *)xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
+ UTFConversionContext.m_TempOutConversionSize);
+
+ err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+ (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+ (M4OSA_UInt32*)&ConvertedSize);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+ return err;
+ }
+ }
+ else if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+ return err;
+ }
+ /*decoded path*/
+ pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ (*convertedSize) = ConvertedSize;
+ }
+ return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext)
+ *
+ * @brief This function convert from UTF8 to the customer format
+ * @note
+ * @param pContext (IN) The integrator own context
+ * @param pBufferIn (IN) Buffer to convert
+ * @param pBufferOut (OUT) Converted buffer
+ * @param convertedSize (OUT) Size of the converted buffer
+ *
+ * @return M4NO_ERROR: No error
+ * @return M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+ M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+ M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+ M4OSA_ERR err;
+
+ pBufferOut = pBufferIn;
+ if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+ && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+ {
+ M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+ memset((void *)xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
+ UTFConversionContext.m_TempOutConversionSize);
+
+ err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct\
+ ((M4OSA_Void*)pBufferIn,(M4OSA_UInt8*)xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer, (M4OSA_UInt32*)&ConvertedSize);
+ if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+ {
+ M4OSA_TRACE2_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+
+ /*free too small buffer*/
+ free(xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer);
+
+ /*re-allocate the buffer*/
+ xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+ (M4OSA_Void*)M4OSA_32bitAlignedMalloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+ (M4OSA_Char *)"M4xVSS_internalConvertFromUTF8: UTF conversion buffer");
+ if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+ {
+ M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertFromUTF8");
+ return M4ERR_ALLOC;
+ }
+ xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+ memset((void *)xVSS_context->\
+ UTFConversionContext.pTempOutConversionBuffer,0,(M4OSA_UInt32)xVSS_context->\
+ UTFConversionContext.m_TempOutConversionSize);
+
+ err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct((M4OSA_Void*)pBufferIn,
+ (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+ (M4OSA_UInt32*)&ConvertedSize);
+ if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+ return err;
+ }
+ }
+ else if(err != M4NO_ERROR)
+ {
+ M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+ return err;
+ }
+ /*decoded path*/
+ pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+ (*convertedSize) = ConvertedSize;
+ }
+
+
+ return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/VideoEditorResampler.cpp b/libvideoeditor/vss/src/VideoEditorResampler.cpp
new file mode 100755
index 0000000..38dffb7
--- /dev/null
+++ b/libvideoeditor/vss/src/VideoEditorResampler.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 1
+#include <audio_utils/primitives.h>
+#include <utils/Log.h>
+#include "AudioMixer.h"
+#include "VideoEditorResampler.h"
+
+namespace android {
+
+struct VideoEditorResampler : public AudioBufferProvider {
+
+ public:
+
+ virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+ virtual void releaseBuffer(Buffer* buffer);
+
+ enum { //Sampling freq
+ kFreq8000Hz = 8000,
+ kFreq11025Hz = 11025,
+ kFreq12000Hz = 12000,
+ kFreq16000Hz = 16000,
+ kFreq22050Hz = 22050,
+ kFreq24000Hz = 24000,
+ kFreq32000Hz = 32000,
+ kFreq44100 = 44100,
+ kFreq48000 = 48000,
+ };
+
+ AudioResampler *mResampler;
+ int16_t* mInput;
+ int nbChannels;
+ int nbSamples;
+ M4OSA_Int32 outSamplingRate;
+ M4OSA_Int32 inSamplingRate;
+
+ int16_t *mTmpInBuffer;
+};
+
+#define MAX_SAMPLEDURATION_FOR_CONVERTION 40 //ms
+
+status_t VideoEditorResampler::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) {
+
+ uint32_t dataSize = pBuffer->frameCount * this->nbChannels * sizeof(int16_t);
+ mTmpInBuffer = (int16_t*)malloc(dataSize);
+ memcpy(mTmpInBuffer, this->mInput, dataSize);
+ pBuffer->raw = (void*)mTmpInBuffer;
+
+ return OK;
+}
+
+void VideoEditorResampler::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) {
+
+ if(pBuffer->raw != NULL) {
+ free(pBuffer->raw);
+ pBuffer->raw = NULL;
+ mTmpInBuffer = NULL;
+ }
+ pBuffer->frameCount = 0;
+}
+
+extern "C" {
+
+M4OSA_Context LVAudioResamplerCreate(M4OSA_Int32 bitDepth, M4OSA_Int32 inChannelCount,
+ M4OSA_Int32 sampleRate, M4OSA_Int32 quality) {
+
+ VideoEditorResampler *context = new VideoEditorResampler();
+ context->mResampler = AudioResampler::create(
+ bitDepth, inChannelCount, sampleRate, AudioResampler::DEFAULT);
+ if (context->mResampler == NULL) {
+ return NULL;
+ }
+ context->mResampler->setSampleRate(android::VideoEditorResampler::kFreq32000Hz);
+ context->mResampler->setVolume(0x1000, 0x1000);
+ context->nbChannels = inChannelCount;
+ context->outSamplingRate = sampleRate;
+ context->mInput = NULL;
+ context->mTmpInBuffer = NULL;
+
+ return ((M4OSA_Context )context);
+}
+
+
+void LVAudiosetSampleRate(M4OSA_Context resamplerContext, M4OSA_Int32 inSampleRate) {
+
+ VideoEditorResampler *context =
+ (VideoEditorResampler *)resamplerContext;
+ context->mResampler->setSampleRate(inSampleRate);
+ /*
+ * nbSamples is calculated for 40ms worth of data;hence sample rate
+ * is used to calculate the nbSamples
+ */
+ context->inSamplingRate = inSampleRate;
+ // Allocate buffer for maximum allowed number of samples.
+ context->mInput = (int16_t*)malloc( (inSampleRate * MAX_SAMPLEDURATION_FOR_CONVERTION *
+ context->nbChannels * sizeof(int16_t)) / 1000);
+}
+
+void LVAudiosetVolume(M4OSA_Context resamplerContext, M4OSA_Int16 left, M4OSA_Int16 right) {
+
+ VideoEditorResampler *context =
+ (VideoEditorResampler *)resamplerContext;
+ context->mResampler->setVolume(left,right);
+}
+
+void LVDestroy(M4OSA_Context resamplerContext) {
+
+ VideoEditorResampler *context =
+ (VideoEditorResampler *)resamplerContext;
+
+ if (context->mTmpInBuffer != NULL) {
+ free(context->mTmpInBuffer);
+ context->mTmpInBuffer = NULL;
+ }
+
+ if (context->mInput != NULL) {
+ free(context->mInput);
+ context->mInput = NULL;
+ }
+
+ if (context->mResampler != NULL) {
+ delete context->mResampler;
+ context->mResampler = NULL;
+ }
+
+ if (context != NULL) {
+ delete context;
+ context = NULL;
+ }
+}
+
+void LVAudioresample_LowQuality(M4OSA_Int16* out, M4OSA_Int16* input,
+ M4OSA_Int32 outFrameCount, M4OSA_Context resamplerContext) {
+
+ VideoEditorResampler *context =
+ (VideoEditorResampler *)resamplerContext;
+ int32_t *pTmpBuffer = NULL;
+
+ context->nbSamples = (context->inSamplingRate * outFrameCount) / context->outSamplingRate;
+ memcpy(context->mInput,input,(context->nbSamples * context->nbChannels * sizeof(int16_t)));
+
+ /*
+ SRC module always gives stereo output, hence 2 for stereo audio
+ */
+ pTmpBuffer = (int32_t*)malloc(outFrameCount * 2 * sizeof(int32_t));
+ memset(pTmpBuffer, 0x00, outFrameCount * 2 * sizeof(int32_t));
+
+ context->mResampler->resample((int32_t *)pTmpBuffer,
+ (size_t)outFrameCount, (VideoEditorResampler *)resamplerContext);
+ // Convert back to 16 bits
+ ditherAndClamp((int32_t*)out, pTmpBuffer, outFrameCount);
+ free(pTmpBuffer);
+ pTmpBuffer = NULL;
+}
+
+}
+
+} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/Android.mk b/libvideoeditor/vss/stagefrightshells/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
new file mode 100755
index 0000000..7a9a012
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditor3gpReader.cpp
+* @brief StageFright shell 3GP Reader
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_3GPREADER_H
+#define VIDEOEDITOR_3GPREADER_H
+
+#include "M4READER_Common.h"
+
+M4OSA_ERR VideoEditor3gpReader_getInterface(
+ M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+
+#endif /* VIDEOEDITOR_3GPREADER_H */
+
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
new file mode 100755
index 0000000..0d3b801
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorAudioDecoder.cpp
+* @brief StageFright shell Audio Decoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_AUDIODECODER_H
+#define VIDEOEDITOR_AUDIODECODER_H
+
+#include "M4AD_Common.h"
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface);
+
+#endif /* VIDEOEDITOR_AUDIODECODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
new file mode 100755
index 0000000..f4f6b04
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorAudioEncoder.cpp
+* @brief StageFright shell Audio Encoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_AUDIOENCODER_H
+#define VIDEOEDITOR_AUDIOENCODER_H
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Memory.h"
+#include "M4ENCODER_AudioCommon.h"
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+#endif /* VIDEOEDITOR_AUDIOENCODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
new file mode 100755
index 0000000..3aff6a7
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorBuffer.c
+* @brief StageFright shell Buffer
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_BUFFER_H
+#define VIDEOEDITOR_BUFFER_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CharStar.h"
+#include "M4_Utils.h"
+
+#include "LV_Macros.h"
+
+/*--- Core id for VIDEOEDITOR Buffer allocations ---*/
+#define VIDEOEDITOR_BUFFER_EXTERNAL 0x012F
+
+/* ----- errors -----*/
+#define M4ERR_NO_BUFFER_AVAILABLE \
+ M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000001)
+#define M4ERR_NO_BUFFER_MATCH \
+ M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000002)
+
+typedef enum {
+ VIDEOEDITOR_BUFFER_kEmpty = 0,
+ VIDEOEDITOR_BUFFER_kFilled,
+} VIDEOEDITOR_BUFFER_State;
+
+/**
+ ************************************************************************
+ * Structure LVOMX_BUFFER_Buffer
+ * @brief One OMX Buffer and data related to it
+ ************************************************************************
+*/
+typedef struct {
+ M4OSA_Void* pData; /**< Pointer to the data*/
+ M4OSA_UInt32 size;
+ VIDEOEDITOR_BUFFER_State state; /**< Buffer state */
+ M4OSA_UInt32 idx; /**< Index of the buffer inside the pool */
+ M4_MediaTime buffCTS; /**< Time stamp of the buffer */
+} VIDEOEDITOR_BUFFER_Buffer;
+
+/**
+ ************************************************************************
+ * Structure LVOMX_BUFFER_Pool
+ * @brief Structure to manage buffers
+ ************************************************************************
+*/
+typedef struct {
+ VIDEOEDITOR_BUFFER_Buffer* pNXPBuffer;
+ M4OSA_UInt32 NB;
+ M4OSA_Char* poolName;
+} VIDEOEDITOR_BUFFER_Pool;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif //__cplusplus
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ * M4OSA_UInt32 nbBuffers)
+ * @brief Allocate a pool of nbBuffers buffers
+ *
+ * @param ppool : IN The buffer pool to create
+ * @param nbBuffers : IN The number of buffers in the pool
+ * @param poolName : IN a name given to the pool
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName);
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(LVOMX_BUFFER_Pool* ppool)
+ * @brief Deallocate a buffer pool
+ *
+ * @param ppool : IN The buffer pool to free
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool);
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ * VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+ * @brief Returns a buffer in a given state
+ *
+ * @param ppool : IN The buffer pool
+ * @param desiredState : IN The buffer state
+ * @param pNXPBuffer : IN The selected buffer
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ VIDEOEDITOR_BUFFER_State desiredState,
+ VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
+
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* ppool,
+ M4OSA_UInt32 lSize);
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
+ VIDEOEDITOR_BUFFER_State desiredState,
+ VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
+
+#ifdef __cplusplus
+}
+#endif //__cplusplus
+#endif /*VIDEOEDITOR_BUFFER_H*/
+
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h
new file mode 100755
index 0000000..4c3b517
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMain.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VIDEO_EDITOR_API_H__
+#define __VIDEO_EDITOR_API_H__
+
+#include "M4OSA_Types.h"
+
+typedef enum
+{
+ MSG_TYPE_PROGRESS_INDICATION, // Playback progress indication event
+ MSG_TYPE_PLAYER_ERROR, // Playback error
+ MSG_TYPE_PREVIEW_END, // Preview of clips is complete
+ MSG_TYPE_OVERLAY_UPDATE, // update overlay during preview
+ MSG_TYPE_OVERLAY_CLEAR, // clear the overlay
+} progress_callback_msg_type;
+
+typedef struct {
+ int overlaySettingsIndex;
+ int clipIndex;
+} VideoEditorCurretEditInfo;
+
+typedef struct
+{
+ M4OSA_Void *pFile; /** PCM file path */
+ M4OSA_Bool bRemoveOriginal; /** If true, the original audio track
+ is not taken into account */
+ M4OSA_UInt32 uiNbChannels; /** Number of channels (1=mono, 2=stereo) of BGM clip*/
+ M4OSA_UInt32 uiSamplingFrequency; /** Sampling audio frequency (8000 for amr, 16000 or
+ more for aac) of BGM clip*/
+ M4OSA_UInt32 uiExtendedSamplingFrequency; /** Extended frequency for AAC+,
+ eAAC+ streams of BGM clip*/
+ M4OSA_UInt32 uiAddCts; /** Time, in milliseconds, at which the added
+ audio track is inserted */
+ M4OSA_UInt32 uiAddVolume; /** Volume, in percentage, of the added audio track */
+ M4OSA_UInt32 beginCutMs;
+ M4OSA_UInt32 endCutMs;
+ M4OSA_Int32 fileType;
+ M4OSA_Bool bLoop; /** Looping on/off **/
+ /* Audio ducking */
+ M4OSA_UInt32 uiInDucking_threshold; /** Threshold value at which
+ background music shall duck */
+ M4OSA_UInt32 uiInDucking_lowVolume; /** lower the background track to
+ this factor of current level */
+ M4OSA_Bool bInDucking_enable; /** enable ducking */
+ M4OSA_UInt32 uiBTChannelCount; /** channel count for BT */
+ M4OSA_Void *pPCMFilePath;
+} M4xVSS_AudioMixingSettings;
+
+typedef struct
+{
+ M4OSA_Void *pBuffer; /* YUV420 buffer of frame to be rendered*/
+ M4OSA_UInt32 timeMs; /* time stamp of the frame to be rendered*/
+ M4OSA_UInt32 uiSurfaceWidth; /* Surface display width*/
+ M4OSA_UInt32 uiSurfaceHeight; /* Surface display height*/
+ M4OSA_UInt32 uiFrameWidth; /* Frame width*/
+ M4OSA_UInt32 uiFrameHeight; /* Frame height*/
+ M4OSA_Bool bApplyEffect; /* Apply video effects before render*/
+ M4OSA_UInt32 clipBeginCutTime; /* Clip begin cut time relative to storyboard */
+ M4OSA_UInt32 clipEndCutTime; /* Clip end cut time relative to storyboard */
+ M4OSA_UInt32 videoRotationDegree; /* Video rotation degree */
+
+} VideoEditor_renderPreviewFrameStr;
+#endif /*__VIDEO_EDITOR_API_H__*/
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
new file mode 100755
index 0000000..5b3be40
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorMp3Reader.cpp
+* @brief StageFright shell MP3 Reader
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_MP3READER_H
+#define VIDEOEDITOR_MP3READER_H
+
+#include "M4READER_Common.h"
+
+M4OSA_ERR VideoEditorMp3Reader_getInterface(
+ M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface);
+
+#endif /* VIDEOEDITOR_MP3READER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
new file mode 100755
index 0000000..a21b21d
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorUtils.cpp
+* @brief StageFright shell Utilities
+*************************************************************************
+*/
+#ifndef ANDROID_UTILS_H_
+#define ANDROID_UTILS_H_
+
+/*******************
+ * HEADERS *
+ *******************/
+
+#include "M4OSA_Debug.h"
+
+#include "utils/Log.h"
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ *************************************************************************
+ * VIDEOEDITOR_CHECK(test, errCode)
+ * @note This macro displays an error message and goes to function cleanUp label
+ * if the test fails.
+ *************************************************************************
+ */
+#define VIDEOEDITOR_CHECK(test, errCode) \
+{ \
+ if( !(test) ) { \
+ ALOGV("!!! %s (L%d) check failed : " #test ", yields error 0x%.8x", \
+ __FILE__, __LINE__, errCode); \
+ err = (errCode); \
+ goto cleanUp; \
+ } \
+}
+
+/**
+ *************************************************************************
+ * SAFE_FREE(p)
+ * @note This macro calls free and makes sure the pointer is set to NULL.
+ *************************************************************************
+ */
+#define SAFE_FREE(p) \
+{ \
+ if(M4OSA_NULL != (p)) { \
+ free((p)) ; \
+ (p) = M4OSA_NULL ; \
+ } \
+}
+
+/**
+ *************************************************************************
+ * SAFE_MALLOC(p, type, count, comment)
+ * @note This macro allocates a buffer, checks for success and fills the buffer
+ * with 0.
+ *************************************************************************
+ */
+#define SAFE_MALLOC(p, type, count, comment) \
+{ \
+ (p) = (type*)M4OSA_32bitAlignedMalloc(sizeof(type)*(count), 0xFF,(M4OSA_Char*)comment);\
+ VIDEOEDITOR_CHECK(M4OSA_NULL != (p), M4ERR_ALLOC); \
+ memset((void *)(p), 0,sizeof(type)*(count)); \
+}
+
+
+ /********************
+ * UTILITIES *
+ ********************/
+
+
+namespace android {
+
+/*--------------------------*/
+/* DISPLAY METADATA CONTENT */
+/*--------------------------*/
+void displayMetaData(const sp<MetaData> meta);
+
+// Build the AVC codec spcific info from the StageFright encoders output
+status_t buildAVCCodecSpecificData(uint8_t **outputData, size_t *outputSize,
+ const uint8_t *data, size_t size, MetaData *param);
+
+}//namespace android
+
+
+#endif //ANDROID_UTILS_H_
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
new file mode 100755
index 0000000..1eea3a6
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorVideoDecoder.cpp
+* @brief StageFright shell video decoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_VIDEODECODER_H
+#define VIDEOEDITOR_VIDEODECODER_H
+
+#include "M4DECODER_Common.h"
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
+ M4DECODER_VideoType *pDecoderType,
+ M4OSA_Context *pDecoderInterface);
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
+ M4DECODER_VideoType *pDecoderType,
+ M4OSA_Context *pDecoderInterface);
+
+M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_MPEG4(
+ M4DECODER_VideoType *pDecoderType,
+ M4OSA_Context *pDecInterface);
+
+M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_H264(
+ M4DECODER_VideoType *pDecoderType,
+ M4OSA_Context *pDecInterface);
+
+M4OSA_ERR VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(
+ M4DECODER_VideoDecoders** decoders);
+
+#endif // VIDEOEDITOR_VIDEODECODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
new file mode 100755
index 0000000..cca5ee9
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+* @file VideoEditorVideoDecoder_Internal.h
+* @brief StageFright shell video decoder internal header file*
+*************************************************************************
+*/
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Memory.h"
+#include "M4_Common.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4DA_Types.h"
+#include "M4READER_Common.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4DECODER_Common.h"
+#include "M4OSA_Semaphore.h"
+#include "VideoEditorBuffer.h"
+#include "M4VD_Tools.h"
+#include "I420ColorConverter.h"
+
+#include <utils/RefBase.h>
+#include <android/rect.h>
+#include <OMX_Video.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+
+#define VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR 0
+#define VIDEOEDITOR_VIDEC_SHELL_VER_MINOR 0
+#define VIDEOEDITOR_VIDEC_SHELL_VER_REVISION 1
+
+/* ERRORS */
+#define M4ERR_SF_DECODER_RSRC_FAIL M4OSA_ERR_CREATE(M4_ERR, 0xFF, 0x0001)
+
+namespace android {
+
+typedef enum {
+ VIDEOEDITOR_kMpeg4VideoDec,
+ VIDEOEDITOR_kH263VideoDec,
+ VIDEOEDITOR_kH264VideoDec
+} VIDEOEDITOR_CodecType;
+
+
+/*typedef struct{
+ M4OSA_UInt32 stream_byte;
+ M4OSA_UInt32 stream_index;
+ M4OSA_MemAddr8 in;
+
+} VIDEOEDITOR_VIDEO_Bitstream_ctxt;*/
+
+typedef M4VS_Bitstream_ctxt VIDEOEDITOR_VIDEO_Bitstream_ctxt;
+
+typedef struct {
+
+ /** Stagefrigth params */
+ OMXClient mClient; /**< OMX Client session instance. */
+ sp<MediaSource> mVideoDecoder; /**< Stagefright decoder instance */
+ sp<MediaSource> mReaderSource; /**< Reader access > */
+
+ /* READER */
+ M4READER_GlobalInterface *m_pReaderGlobal;
+ M4READER_DataInterface *m_pReader;
+ M4_AccessUnit *m_pNextAccessUnitToDecode;
+
+ /* STREAM PARAMS */
+ M4_VideoStreamHandler* m_pVideoStreamhandler;
+
+ /* User filter params. */
+ M4VIFI_PlanConverterFunctionType *m_pFilter;
+ M4OSA_Void *m_pFilterUserData;
+
+ M4_MediaTime m_lastDecodedCTS;
+ M4_MediaTime m_lastRenderCts;
+ M4OSA_Bool mReachedEOS;
+ VIDEOEDITOR_CodecType mDecoderType;
+ M4DECODER_VideoSize m_VideoSize;
+ M4DECODER_MPEG4_DecoderConfigInfo m_Dci; /**< Decoder Config info */
+ VIDEOEDITOR_BUFFER_Pool *m_pDecBufferPool; /**< Decoded buffer pool */
+ OMX_COLOR_FORMATTYPE decOuputColorFormat;
+
+ M4OSA_UInt32 mNbInputFrames;
+ M4OSA_Double mFirstInputCts;
+ M4OSA_Double mLastInputCts;
+ M4OSA_UInt32 mNbRenderedFrames;
+ M4OSA_Double mFirstRenderedCts;
+ M4OSA_Double mLastRenderedCts;
+ M4OSA_UInt32 mNbOutputFrames;
+ M4OSA_Double mFirstOutputCts;
+ M4OSA_Double mLastOutputCts;
+ M4OSA_Int32 mGivenWidth, mGivenHeight; //Used in case of
+ //INFO_FORMAT_CHANGED
+ ARect mCropRect; // These are obtained from kKeyCropRect.
+ I420ColorConverter* mI420ColorConverter;
+
+} VideoEditorVideoDecoder_Context;
+
+} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
new file mode 100755
index 0000000..fd5154f
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorVideoEncoder.cpp
+* @brief StageFright shell video encoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_VIDEOENCODER_H
+#define VIDEOEDITOR_VIDEOENCODER_H
+
+#include "M4ENCODER_common.h"
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+#endif //VIDEOEDITOR_VIDEOENCODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/src/Android.mk b/libvideoeditor/vss/stagefrightshells/src/Android.mk
new file mode 100755
index 0000000..acc6b0e
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/Android.mk
@@ -0,0 +1,72 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ MediaBufferPuller.cpp \
+ VideoEditorVideoDecoder.cpp \
+ VideoEditorAudioDecoder.cpp \
+ VideoEditorMp3Reader.cpp \
+ VideoEditor3gpReader.cpp \
+ VideoEditorUtils.cpp \
+ VideoEditorBuffer.c \
+ VideoEditorVideoEncoder.cpp \
+ VideoEditorAudioEncoder.cpp
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/base/media/libmediaplayerservice \
+ $(TOP)/frameworks/base/media/libstagefright \
+ $(TOP)/frameworks/base/media/libstagefright/include \
+ $(TOP)/frameworks/base/media/libstagefright/rtsp \
+ $(call include-path-for, corecg graphics) \
+ $(TOP)/frameworks/av/libvideoeditor/lvpp \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/mcs/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
+ $(TOP)/frameworks/native/include/media/editor \
+ $(TOP)/frameworks/native/include/media/openmax
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libutils \
+ libandroid_runtime \
+ libnativehelper \
+ libmedia \
+ libbinder \
+ libstagefright \
+ libstagefright_foundation \
+ libstagefright_omx \
+ libgui \
+ libvideoeditorplayer
+
+LOCAL_CFLAGS += \
+
+
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal \
+ libstagefright_color_conversion
+
+
+LOCAL_MODULE:= libvideoeditor_stagefrightshells
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp
new file mode 100644
index 0000000..acc8268
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaBufferPuller"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include "MediaBufferPuller.h"
+
+namespace android {
+
+
+MediaBufferPuller::MediaBufferPuller(const sp<MediaSource>& source)
+ : mSource(source),
+ mAskToStart(false),
+ mAskToStop(false),
+ mAcquireStopped(false),
+ mReleaseStopped(false),
+ mSourceError(OK) {
+
+ androidCreateThread(acquireThreadStart, this);
+ androidCreateThread(releaseThreadStart, this);
+}
+
+MediaBufferPuller::~MediaBufferPuller() {
+ stop();
+}
+
+bool MediaBufferPuller::hasMediaSourceReturnedError() const {
+ Mutex::Autolock autolock(mLock);
+ return ((mSourceError != OK) ? true : false);
+}
+void MediaBufferPuller::start() {
+ Mutex::Autolock autolock(mLock);
+ mAskToStart = true;
+ mAcquireCond.signal();
+ mReleaseCond.signal();
+}
+
+void MediaBufferPuller::stop() {
+ Mutex::Autolock autolock(mLock);
+ mAskToStop = true;
+ mAcquireCond.signal();
+ mReleaseCond.signal();
+ while (!mAcquireStopped || !mReleaseStopped) {
+ mUserCond.wait(mLock);
+ }
+
+ // Release remaining buffers
+ for (size_t i = 0; i < mBuffers.size(); i++) {
+ mBuffers.itemAt(i)->release();
+ }
+
+ for (size_t i = 0; i < mReleaseBuffers.size(); i++) {
+ mReleaseBuffers.itemAt(i)->release();
+ }
+
+ mBuffers.clear();
+ mReleaseBuffers.clear();
+}
+
+MediaBuffer* MediaBufferPuller::getBufferNonBlocking() {
+ Mutex::Autolock autolock(mLock);
+ if (mBuffers.empty()) {
+ return NULL;
+ } else {
+ MediaBuffer* b = mBuffers.itemAt(0);
+ mBuffers.removeAt(0);
+ return b;
+ }
+}
+
+MediaBuffer* MediaBufferPuller::getBufferBlocking() {
+ Mutex::Autolock autolock(mLock);
+ while (mBuffers.empty() && !mAcquireStopped) {
+ mUserCond.wait(mLock);
+ }
+
+ if (mBuffers.empty()) {
+ return NULL;
+ } else {
+ MediaBuffer* b = mBuffers.itemAt(0);
+ mBuffers.removeAt(0);
+ return b;
+ }
+}
+
+void MediaBufferPuller::putBuffer(MediaBuffer* buffer) {
+ Mutex::Autolock autolock(mLock);
+ mReleaseBuffers.push(buffer);
+ mReleaseCond.signal();
+}
+
+int MediaBufferPuller::acquireThreadStart(void* arg) {
+ MediaBufferPuller* self = (MediaBufferPuller*)arg;
+ self->acquireThreadFunc();
+ return 0;
+}
+
+int MediaBufferPuller::releaseThreadStart(void* arg) {
+ MediaBufferPuller* self = (MediaBufferPuller*)arg;
+ self->releaseThreadFunc();
+ return 0;
+}
+
+void MediaBufferPuller::acquireThreadFunc() {
+ mLock.lock();
+
+ // Wait for the start signal
+ while (!mAskToStart && !mAskToStop) {
+ mAcquireCond.wait(mLock);
+ }
+
+ // Loop until we are asked to stop, or there is nothing more to read
+ while (!mAskToStop) {
+ MediaBuffer* pBuffer;
+ mLock.unlock();
+ status_t result = mSource->read(&pBuffer, NULL);
+ mLock.lock();
+ mSourceError = result;
+ if (result != OK) {
+ break;
+ }
+ mBuffers.push(pBuffer);
+ mUserCond.signal();
+ }
+
+ mAcquireStopped = true;
+ mUserCond.signal();
+ mLock.unlock();
+}
+
+void MediaBufferPuller::releaseThreadFunc() {
+ mLock.lock();
+
+ // Wait for the start signal
+ while (!mAskToStart && !mAskToStop) {
+ mReleaseCond.wait(mLock);
+ }
+
+ // Loop until we are asked to stop
+ while (1) {
+ if (mReleaseBuffers.empty()) {
+ if (mAskToStop) {
+ break;
+ } else {
+ mReleaseCond.wait(mLock);
+ continue;
+ }
+ }
+ MediaBuffer* pBuffer = mReleaseBuffers.itemAt(0);
+ mReleaseBuffers.removeAt(0);
+ mLock.unlock();
+ pBuffer->release();
+ mLock.lock();
+ }
+
+ mReleaseStopped = true;
+ mUserCond.signal();
+ mLock.unlock();
+}
+
+}; // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h
new file mode 100644
index 0000000..ed72a53
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/MediaBufferPuller.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MEDIA_BUFFER_PULLER_H
+#define _MEDIA_BUFFER_PULLER_H
+
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+
+namespace android {
+
+struct MediaSource;
+struct MediaBuffer;
+
+/*
+ * An object of this class can pull a list of media buffers
+ * from a MediaSource repeatedly. The user can then get the
+ * buffers from that list.
+ */
+struct MediaBufferPuller {
+public:
+ MediaBufferPuller(const sp<MediaSource>& source);
+ ~MediaBufferPuller();
+
+ // Start to build up the list of the buffers.
+ void start();
+
+ // Release the list of the available buffers, and stop
+ // pulling buffers from the MediaSource.
+ void stop();
+
+ // Get a buffer from the list. If there is no buffer available
+ // at the time this method is called, NULL is returned.
+ MediaBuffer* getBufferBlocking();
+
+ // Get a buffer from the list. If there is no buffer available
+ // at the time this method is called, it blocks waiting for
+ // a buffer to become available or until stop() is called.
+ MediaBuffer* getBufferNonBlocking();
+
+ // Add a buffer to the end of the list available media buffers
+ void putBuffer(MediaBuffer* buffer);
+
+ // Check whether the source returned an error or not.
+ bool hasMediaSourceReturnedError() const;
+
+private:
+ static int acquireThreadStart(void* arg);
+ void acquireThreadFunc();
+
+ static int releaseThreadStart(void* arg);
+ void releaseThreadFunc();
+
+ sp<MediaSource> mSource;
+ Vector<MediaBuffer*> mBuffers;
+ Vector<MediaBuffer*> mReleaseBuffers;
+
+ mutable Mutex mLock;
+ Condition mUserCond; // for the user of this class
+ Condition mAcquireCond; // for the acquire thread
+ Condition mReleaseCond; // for the release thread
+
+ bool mAskToStart; // Asks the threads to start
+ bool mAskToStop; // Asks the threads to stop
+ bool mAcquireStopped; // The acquire thread has stopped
+ bool mReleaseStopped; // The release thread has stopped
+ status_t mSourceError; // Error returned by MediaSource read
+
+ // Don't call me!
+ MediaBufferPuller(const MediaBufferPuller&);
+ MediaBufferPuller& operator=(const MediaBufferPuller&);
+};
+
+} // namespace android
+
+#endif // _MEDIA_BUFFER_PULLER_H
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
new file mode 100755
index 0000000..c4c4d84
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
@@ -0,0 +1,2008 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditor3gpReader.cpp
+* @brief StageFright shell 3GP Reader
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_3GPREADER"
+
+/**
+ * HEADERS
+ *
+ */
+#define VIDEOEDITOR_BITSTREAM_PARSER
+
+#include "M4OSA_Debug.h"
+#include "VideoEditor3gpReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorUtils.h"
+#include "M4READER_3gpCom.h"
+#include "M4_Common.h"
+#include "M4OSA_FileWriter.h"
+
+#ifdef VIDEOEDITOR_BITSTREAM_PARSER
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4_Utils.h"
+#endif
+
+#include "ESDS.h"
+#include "utils/Log.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ * SOURCE CLASS
+ */
+namespace android {
+/**
+ * ENGINE INTERFACE
+ */
+
+/**
+ ************************************************************************
+ * @brief Array of AMR NB/WB bitrates
+ * @note Array to match the mode and the bit rate
+ ************************************************************************
+*/
+const M4OSA_UInt32 VideoEditor3gpReader_AmrBitRate [2 /* 8kHz / 16kHz */]
+ [9 /* the bitrate mode */] =
+{
+ {4750, 5150, 5900, 6700, 7400, 7950, 10200, 12200, 0},
+ {6600, 8850, 12650, 14250, 15850, 18250, 19850, 23050, 23850}
+};
+
+/**
+ *******************************************************************************
+ * structure VideoEditor3gpReader_Context
+ * @brief:This structure defines the context of the StageFright 3GP shell Reader
+ *******************************************************************************
+*/
+typedef struct {
+ sp<DataSource> mDataSource;
+ sp<MediaExtractor> mExtractor;
+ sp<MediaSource> mAudioSource;
+ sp<MediaSource> mVideoSource;
+ M4_StreamHandler* mAudioStreamHandler;
+ M4_StreamHandler* mVideoStreamHandler;
+ M4SYS_AccessUnit mAudioAu;
+ M4SYS_AccessUnit mVideoAu;
+ M4OSA_Time mMaxDuration;
+ int64_t mFileSize;
+ M4_StreamType mStreamType;
+ M4OSA_UInt32 mStreamId;
+ int32_t mTracks;
+ int32_t mCurrTrack;
+ M4OSA_Bool mAudioSeeking;
+ M4OSA_Time mAudioSeekTime;
+ M4OSA_Bool mVideoSeeking;
+ M4OSA_Time mVideoSeekTime;
+
+} VideoEditor3gpReader_Context;
+
+#ifdef VIDEOEDITOR_BITSTREAM_PARSER
+/**
+ ************************************************************************
+ * structure VideoEditor3gpReader_BitStreamParserContext
+ * @brief Internal BitStreamParser context
+ ************************************************************************
+*/
+typedef struct {
+ M4OSA_UInt32* mPbitStream; /**< bitstream pointer (32bits aligned) */
+ M4OSA_Int32 mSize; /**< bitstream size in bytes */
+ M4OSA_Int32 mIndex; /**< byte index */
+ M4OSA_Int32 mBitIndex; /**< bit index */
+ M4OSA_Int32 mStructSize; /**< size of structure */
+} VideoEditor3gpReader_BitStreamParserContext;
+
+/**
+ *******************************************************************************
+ * @brief Allocates the context and initializes internal data.
+ * @param pContext (OUT) Pointer to the BitStreamParser context to create.
+ * @param bitStream A pointer to the bitstream
+ * @param size The size of the bitstream in bytes
+ *******************************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserInit(void** pContext,
+ void* pBitStream, M4OSA_Int32 size) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext;
+
+ *pContext=M4OSA_NULL;
+ pStreamContext = (VideoEditor3gpReader_BitStreamParserContext*)M4OSA_32bitAlignedMalloc(
+ sizeof(VideoEditor3gpReader_BitStreamParserContext), M4READER_3GP,
+ (M4OSA_Char*)"3GP BitStreamParser Context");
+ if (M4OSA_NULL == pStreamContext) {
+ return;
+ }
+ pStreamContext->mPbitStream=(M4OSA_UInt32*)pBitStream;
+ pStreamContext->mSize=size;
+ pStreamContext->mIndex=0;
+ pStreamContext->mBitIndex=0;
+ pStreamContext->mStructSize =
+ sizeof(VideoEditor3gpReader_BitStreamParserContext);
+
+ *pContext=pStreamContext;
+}
+/**
+ **********************************************************************
+ * @brief Clean up context
+ * @param pContext (IN/OUT) BitStreamParser context.
+ **********************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserCleanUp(void* pContext) {
+ free((M4OSA_Int32*)pContext);
+}
+/**
+ *****************************************************************************
+ * @brief Read the next <length> bits in the bitstream.
+ * @note The function does not update the bitstream pointer.
+ * @param pContext (IN/OUT) BitStreamParser context.
+ * @param length (IN) The number of bits to extract from the bitstream
+ * @return the read bits
+ *****************************************************************************
+*/
+static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserShowBits(void* pContext,
+ M4OSA_Int32 length) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+ (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+ M4OSA_UInt32 u_mask;
+ M4OSA_UInt32 retval;
+ M4OSA_Int32 i_ovf;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
+ "VideoEditor3gpReader_BitStreamParserShowBits:invalid context pointer");
+
+ retval=(M4OSA_UInt32)GET_MEMORY32(pStreamContext->\
+ mPbitStream[ pStreamContext->mIndex ]);
+ i_ovf = pStreamContext->mBitIndex + length - 32;
+ u_mask = (length >= 32) ? 0xffffffff: (1 << length) - 1;
+
+ /* do we have enough bits availble in the current word(32bits)*/
+ if (i_ovf <= 0) {
+ retval=(retval >> (- i_ovf)) & u_mask;
+ } else {
+ M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
+ pStreamContext->mPbitStream[ pStreamContext->mIndex + 1 ]);
+ M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
+
+ u_msb_mask = ((1 << (32 - pStreamContext->mBitIndex)) - 1) << i_ovf;
+ u_msb_value = retval << i_ovf;
+ u_lsb_mask = (1 << i_ovf) - 1;
+ u_lsb_value = u_nextword >> (32 - i_ovf);
+ retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
+ }
+ /* return the bits...*/
+ return retval;
+}
+/**
+ ************************************************************************
+ * @brief Increment the bitstream pointer of <length> bits.
+ * @param pContext (IN/OUT) BitStreamParser context.
+ * @param length (IN) The number of bit to shift the bitstream
+ ************************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserFlushBits(void* pContext,
+ M4OSA_Int32 length) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext=(
+ VideoEditor3gpReader_BitStreamParserContext*)pContext;
+ M4OSA_Int32 val;
+
+ if (M4OSA_NULL == pStreamContext) {
+ return;
+ }
+ val=pStreamContext->mBitIndex + length;
+ /* update the bits...*/
+ pStreamContext->mBitIndex += length;
+
+ if (val - 32 >= 0) {
+ /* update the bits...*/
+ pStreamContext->mBitIndex -= 32;
+ /* update the words*/
+ pStreamContext->mIndex++;
+ }
+}
+
+static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserGetBits(
+ void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 bitLength) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+ (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+ M4OSA_Int32 bitLocation, bitIndex;
+ M4OSA_UInt32 retval=0;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
+ "VideoEditor3gpReader_BitStreamParserGetBits: invalid context pointer");
+
+ /* computes the word location*/
+ bitLocation=bitPos/32;
+ bitIndex=(bitPos) % 32;
+
+ if (bitLocation < pStreamContext->mSize) {
+ M4OSA_UInt32 u_mask;
+ M4OSA_Int32 i_ovf = bitIndex + bitLength - 32;
+ retval=(M4OSA_UInt32)GET_MEMORY32(
+ pStreamContext->mPbitStream[ bitLocation ]);
+
+ u_mask = (bitLength >= 32) ? 0xffffffff: (1 << bitLength) - 1;
+
+ if (i_ovf <= 0) {
+ retval=(retval >> (- i_ovf)) & u_mask;
+ } else {
+ M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
+ pStreamContext->mPbitStream[ bitLocation + 1 ]);
+ M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
+
+ u_msb_mask = ((1 << (32 - bitIndex)) - 1) << i_ovf;
+ u_msb_value = retval << i_ovf;
+ u_lsb_mask = (1 << i_ovf) - 1;
+ u_lsb_value = u_nextword >> (32 - i_ovf);
+ retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
+ }
+ }
+ return retval;
+}
+
+static void VideoEditor3gpReader_BitStreamParserRestart(void* pContext) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+ (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+ if (M4OSA_NULL == pStreamContext) {
+ return;
+ }
+ /* resets the bitstream pointers*/
+ pStreamContext->mIndex=0;
+ pStreamContext->mBitIndex=0;
+}
+/**
+ *******************************************************************************
+ * @brief Get a pointer to the current byte pointed by the bitstream pointer.
+ * @note It should be used carefully as the pointer is in the bitstream itself
+ * and no copy is made.
+ * @param pContext (IN/OUT) BitStreamParser context.
+ * @return Pointer to the current location in the bitstream
+ *******************************************************************************
+*/
+static M4OSA_UInt8* VideoEditor3gpReader_GetCurrentbitStreamPointer(
+ void* pContext) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+ (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
+
+ return (M4OSA_UInt8*)((M4OSA_UInt8*)pStreamContext->mPbitStream + \
+ pStreamContext->mIndex * sizeof(M4OSA_UInt32) + \
+ pStreamContext->mBitIndex/8) ;
+}
+
+static M4OSA_Int32 VideoEditor3gpReader_BitStreamParserGetSize(void* pContext) {
+ VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+ (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
+
+ return pStreamContext->mSize;
+}
+
+
+static void VideoEditor3gpReader_MPEG4BitStreamParserInit(void** pContext,
+ void* pBitStream, M4OSA_Int32 size) {
+ VideoEditor3gpReader_BitStreamParserInit(pContext, pBitStream, size);
+}
+static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromInteger(void* pContext,
+ M4OSA_UInt32 val) {
+ M4OSA_UInt32 length=0;
+ M4OSA_UInt32 numBytes=0;
+ M4OSA_UInt32 b=0;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
+
+ /* the length is encoded as a sequence of bytes. The highest bit is used
+ to indicate that the length continues on the next byte.
+
+ The length can be: 0x80 0x80 0x80 0x22
+ of just 0x22 (highest bit not set)
+
+ */
+
+ do {
+ b=(val & ((0xff)<< (8 * numBytes)))>> (8 * numBytes);
+ length=(length << 7) | (b & 0x7f);
+ numBytes++;
+ } while ((b & 0x80) && numBytes < 4);
+
+ return length;
+}
+
+/**
+ *******************************************************************************
+ * @brief Decode an MPEG4 Systems descriptor size from an encoded SDL size data
+ * @note The value is read from the current bitstream location.
+ * @param pContext (IN/OUT) BitStreamParser context.
+ * @return Size in a human readable form
+ *******************************************************************************
+*/
+static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromStream(void* pContext){
+ M4OSA_UInt32 length=0;
+ M4OSA_UInt32 numBytes=0;
+ M4OSA_UInt32 b=0;
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
+
+ /* the length is encoded as a sequence of bytes. The highest bit is used
+ to indicate that the length continues on the next byte.
+
+ The length can be: 0x80 0x80 0x80 0x22
+ of just 0x22 (highest bit not set)
+ */
+
+ do {
+ b=VideoEditor3gpReader_BitStreamParserShowBits(pContext, 8);
+ VideoEditor3gpReader_BitStreamParserFlushBits(pContext, 8);
+ length=(length << 7) | (b & 0x7f);
+ numBytes++;
+ } while ((b & 0x80) && numBytes < 4);
+
+ return length;
+}
+#endif /* VIDEOEDITOR_BITSTREAM_PARSER */
+/**
+************************************************************************
+* @brief create an instance of the 3gp reader
+ * @note allocates the context
+ *
+ * @param pContext: (OUT) pointer on a reader context
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not valid
+************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_create(M4OSA_Context *pContext) {
+ VideoEditor3gpReader_Context* pC = NULL;
+ M4OSA_ERR err = M4NO_ERROR;
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext , M4ERR_PARAMETER);
+
+ ALOGV("VideoEditor3gpReader_create begin");
+
+ /* Context allocation & initialization */
+ SAFE_MALLOC(pC, VideoEditor3gpReader_Context, 1, "VideoEditor3gpReader");
+
+ memset(pC, sizeof(VideoEditor3gpReader_Context), 0);
+
+ pC->mAudioStreamHandler = M4OSA_NULL;
+ pC->mAudioAu.dataAddress = M4OSA_NULL;
+ pC->mVideoStreamHandler = M4OSA_NULL;
+ pC->mVideoAu.dataAddress = M4OSA_NULL;
+
+ pC->mAudioSeeking = M4OSA_FALSE;
+ pC->mAudioSeekTime = 0;
+
+ pC->mVideoSeeking = M4OSA_FALSE;
+ pC->mVideoSeekTime = 0;
+
+ pC->mMaxDuration = 0;
+
+ *pContext=pC;
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditor3gpReader_create no error");
+ } else {
+ ALOGV("VideoEditor3gpReader_create ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditor3gpReader_create end ");
+ return err;
+}
+
+/**
+**************************************************************************
+* @brief destroy the instance of the 3gp reader
+* @note after this call the context is invalid
+* @param context: (IN) Context of the reader
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER pContext parameter is not properly set
+**************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_destroy(M4OSA_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditor3gpReader_Context* pC = M4OSA_NULL;
+
+ ALOGV("VideoEditor3gpReader_destroy begin");
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ pC = (VideoEditor3gpReader_Context*)pContext;
+
+ SAFE_FREE(pC->mAudioAu.dataAddress);
+ pC->mAudioAu.dataAddress = M4OSA_NULL;
+ SAFE_FREE(pC->mVideoAu.dataAddress);
+ pC->mVideoAu.dataAddress = M4OSA_NULL;
+ SAFE_FREE(pC);
+ pContext = M4OSA_NULL;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditor3gpReader_destroy no error");
+ }
+ else
+ {
+ ALOGV("VideoEditor3gpReader_destroy ERROR 0x%X", err);
+ }
+
+ ALOGV("VideoEditor3gpReader_destroy end ");
+ return err;
+}
+
+/**
+************************************************************************
+* @brief open the reader and initializes its created instance
+* @note this function open the media file
+* @param context: (IN) Context of the reader
+* @param pFileDescriptor: (IN) Pointer to proprietary data identifying
+* the media to open
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER the context is NULL
+* @return M4ERR_UNSUPPORTED_MEDIA_TYPE
+* the media is DRM protected
+************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_open(M4OSA_Context pContext,
+ M4OSA_Void* pFileDescriptor) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)pContext;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditor3gpReader_open start ");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_open: invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_open: invalid pointer pFileDescriptor");
+
+ ALOGV("VideoEditor3gpReader_open Datasource start %s",
+ (char*)pFileDescriptor);
+ //pC->mDataSource = DataSource::CreateFromURI((char*)pFileDescriptor);
+ pC->mDataSource = new FileSource ((char*)pFileDescriptor);
+
+ if (pC->mDataSource == NULL) {
+ ALOGV("VideoEditor3gpReader_open Datasource error");
+ return M4ERR_PARAMETER;
+ }
+
+ pC->mExtractor = MediaExtractor::Create(pC->mDataSource,
+ MEDIA_MIMETYPE_CONTAINER_MPEG4);
+
+ if (pC->mExtractor == NULL) {
+ ALOGV("VideoEditor3gpReader_open extractor error");
+ return M4ERR_PARAMETER;
+ }
+
+ int32_t isDRMProtected = 0;
+ sp<MetaData> meta = pC->mExtractor->getMetaData();
+ meta->findInt32(kKeyIsDRM, &isDRMProtected);
+ if (isDRMProtected) {
+ ALOGV("VideoEditorMp3Reader_open error - DRM Protected");
+ return M4ERR_UNSUPPORTED_MEDIA_TYPE;
+ }
+
+ ALOGV("VideoEditor3gpReader_open end ");
+ return err;
+}
+
+/**
+************************************************************************
+* @brief close the reader
+* @note close the 3GP file
+* @param context: (IN) Context of the reader
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER the context is NULL
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_close(M4OSA_Context context) {
+ VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
+ M4READER_AudioSbrUserdata *pAudioSbrUserData;
+ M4_AccessUnit *pAU;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditor3gpReader_close begin");
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_close: invalid context pointer");
+
+ if (pC->mAudioStreamHandler) {
+ ALOGV("VideoEditor3gpReader_close Audio");
+
+ if (M4OSA_NULL != pC->mAudioStreamHandler->m_pDecoderSpecificInfo) {
+ free(pC->mAudioStreamHandler->\
+ m_pDecoderSpecificInfo);
+ pC->mAudioStreamHandler->m_decoderSpecificInfoSize = 0;
+ pC->mAudioStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ if ((M4DA_StreamTypeAudioAac == pC->mAudioStreamHandler->m_streamType)
+ && (M4OSA_NULL != pC->mAudioStreamHandler->m_pUserData)) {
+ pAudioSbrUserData = (M4READER_AudioSbrUserdata*)(\
+ pC->mAudioStreamHandler->m_pUserData);
+
+ pAU = (M4_AccessUnit*)pAudioSbrUserData->m_pFirstAU;
+ if (M4OSA_NULL != pAU) {
+ free(pAU);
+ }
+
+ if (M4OSA_NULL != pAudioSbrUserData->m_pAacDecoderUserConfig) {
+ free(pAudioSbrUserData->\
+ m_pAacDecoderUserConfig);
+ }
+ free(pAudioSbrUserData);
+ pC->mAudioStreamHandler->m_pUserData = M4OSA_NULL;
+ }
+
+ if (pC->mAudioStreamHandler->m_pESDSInfo != M4OSA_NULL) {
+ free(pC->mAudioStreamHandler->m_pESDSInfo);
+ pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
+ pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
+ }
+ /* Finally destroy the stream handler */
+ free(pC->mAudioStreamHandler);
+ pC->mAudioStreamHandler = M4OSA_NULL;
+
+ pC->mAudioSource->stop();
+ pC->mAudioSource.clear();
+ }
+ if (pC->mVideoStreamHandler) {
+ ALOGV("VideoEditor3gpReader_close Video ");
+
+ if(M4OSA_NULL != pC->mVideoStreamHandler->m_pDecoderSpecificInfo) {
+ free(pC->mVideoStreamHandler->\
+ m_pDecoderSpecificInfo);
+ pC->mVideoStreamHandler->m_decoderSpecificInfoSize = 0;
+ pC->mVideoStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ if(M4OSA_NULL != pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo) {
+ free(pC->mVideoStreamHandler->\
+ m_pH264DecoderSpecificInfo);
+ pC->mVideoStreamHandler->m_H264decoderSpecificInfoSize = 0;
+ pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ if(pC->mVideoStreamHandler->m_pESDSInfo != M4OSA_NULL) {
+ free(pC->mVideoStreamHandler->m_pESDSInfo);
+ pC->mVideoStreamHandler->m_pESDSInfo = M4OSA_NULL;
+ pC->mVideoStreamHandler->m_ESDSInfoSize = 0;
+ }
+
+ /* Finally destroy the stream handler */
+ free(pC->mVideoStreamHandler);
+ pC->mVideoStreamHandler = M4OSA_NULL;
+
+ pC->mVideoSource->stop();
+ pC->mVideoSource.clear();
+ }
+ pC->mExtractor.clear();
+ pC->mDataSource.clear();
+
+ ALOGV("VideoEditor3gpReader_close end");
+ return err;
+}
+
+/**
+************************************************************************
+* @brief get an option from the 3gp reader
+* @note it allows the caller to retrieve a property value:
+*
+* @param context: (IN) Context of the reader
+* @param optionId: (IN) indicates the option to get
+* @param pValue: (OUT) pointer to structure or value (allocated
+* by user) where option is stored
+*
+* @return M4NO_ERROR there is no error
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+* @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+* @return M4ERR_VIDEO_NOT_H263 No video stream H263 in file.
+* @return M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET
+* Function 3gpReader_getNextStreamHandler must be called before
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditor3gpReader_getOption begin %d", optionId);
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getOption: invalid pointer on value");
+
+ switch (optionId) {
+ case M4READER_kOptionID_Duration:
+ {
+ ALOGV("VideoEditor3gpReader_getOption duration %d",pC->mMaxDuration);
+ *(M4OSA_Time*)pValue = pC->mMaxDuration;
+ }
+ break;
+ case M4READER_kOptionID_Version:
+ /* not used */
+ ALOGV("VideoEditor3gpReader_getOption: M4READER_kOptionID_Version");
+ break;
+
+ case M4READER_kOptionID_Copyright:
+ /* not used */
+ ALOGV(">>>>>>> M4READER_kOptionID_Copyright");
+ break;
+
+ case M4READER_kOptionID_CreationTime:
+ /* not used */
+ ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_CreationTime");
+ break;
+
+ case M4READER_kOptionID_Bitrate:
+ {
+ M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+
+ if (pC->mMaxDuration != 0) {
+ M4OSA_UInt32 ui32Tmp = (M4OSA_UInt32)pC->mMaxDuration;
+ *pBitrate = (M4OSA_UInt32)(pC->mFileSize * 8000.0 / pC->mMaxDuration);
+ }
+ ALOGV("VideoEditor3gpReader_getOption bitrate %ld", *pBitrate);
+ }
+ break;
+ case M4READER_3GP_kOptionID_H263Properties:
+ {
+ if(M4OSA_NULL == pC->mVideoStreamHandler) {
+ ALOGV("VideoEditor3gpReader_getOption no videoStream retrieved");
+
+ err = M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET;
+ break;
+ }
+ if((M4DA_StreamTypeVideoH263 != pC->mVideoStreamHandler->\
+ m_streamType) || (pC->mVideoStreamHandler->\
+ m_decoderSpecificInfoSize < 7)) {
+ ALOGV("VideoEditor3gpReader_getOption DSI Size %d",
+ pC->mVideoStreamHandler->m_decoderSpecificInfoSize);
+
+ err = M4ERR_VIDEO_NOT_H263;
+ break;
+ }
+
+ /* MAGICAL in the decoder confi H263: the 7th byte is the profile
+ * number, 6th byte is the level number */
+ ((M4READER_3GP_H263Properties *)pValue)->uiProfile =
+ pC->mVideoStreamHandler->m_pDecoderSpecificInfo[6];
+ ((M4READER_3GP_H263Properties *)pValue)->uiLevel =
+ pC->mVideoStreamHandler->m_pDecoderSpecificInfo[5];
+ ALOGV("VideoEditor3gpReader_getOption M4READER_3GP_kOptionID_\
+ H263Properties end");
+ }
+ break;
+ case M4READER_3GP_kOptionID_PurpleLabsDrm:
+ ALOGV("VideoEditor3gpReaderOption M4READER_3GP_kOptionID_PurpleLabsDrm");
+ /* not used */
+ break;
+
+ case M4READER_kOptionID_GetNumberOfAudioAu:
+ /* not used */
+ ALOGV("VideoEditor3gpReadeOption M4READER_kOptionID_GetNumberOfAudioAu");
+ break;
+
+ case M4READER_kOptionID_GetNumberOfVideoAu:
+ /* not used */
+ ALOGV("VideoEditor3gpReader_getOption :GetNumberOfVideoAu");
+ break;
+
+ case M4READER_kOptionID_GetMetadata:
+ /* not used */
+ ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_GetMetadata");
+ break;
+
+ case M4READER_kOptionID_3gpFtypBox:
+ /* used only for SEMC */
+ ALOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_3gpFtypBox");
+ err = M4ERR_BAD_OPTION_ID; //check this
+ break;
+
+#ifdef OPTIONID_GET_NEXT_VIDEO_CTS
+ case M4READER_3GP_kOptionID_getNextVideoCTS:
+ /* not used */
+ ALOGV("VideoEditor3gpReader_getOption: getNextVideoCTS");
+ break;
+#endif
+ default:
+ {
+ err = M4ERR_BAD_OPTION_ID;
+ ALOGV("VideoEditor3gpReader_getOption M4ERR_BAD_OPTION_ID");
+ }
+ break;
+ }
+ ALOGV("VideoEditor3gpReader_getOption end: optionID: x%x", optionId);
+ return err;
+}
+/**
+************************************************************************
+* @brief set an option on the 3gp reader
+* @note No option can be set yet.
+* @param context: (IN) Context of the reader
+* @param optionId: (IN) indicates the option to set
+* @param pValue: (IN) pointer to structure or value (allocated
+* by user) where option is stored
+* @return M4NO_ERROR there is no error
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+* @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_setOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+ "invalid value pointer");
+
+ ALOGV("VideoEditor3gpReader_setOption begin %d",optionId);
+
+ switch(optionId) {
+ case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
+ break;
+
+ case M4READER_3GP_kOptionID_AudioOnly:
+ break;
+
+ case M4READER_3GP_kOptionID_VideoOnly:
+ break;
+
+ case M4READER_3GP_kOptionID_FastOpenMode:
+ break;
+
+ case M4READER_kOptionID_MaxMetadataSize:
+ break;
+
+ default:
+ {
+ ALOGV("VideoEditor3gpReader_setOption: returns M4ERR_BAD_OPTION_ID");
+ err = M4ERR_BAD_OPTION_ID;
+ }
+ break;
+ }
+ ALOGV("VideoEditor3gpReader_setOption end ");
+ return err;
+}
+/**
+ ************************************************************************
+ * @brief fill the access unit structure with initialization values
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler: (IN) pointer to the stream handler to which
+ * the access unit will be associated
+ * @param pAccessUnit: (IN/OUT) pointer to the access unit (allocated
+ * by the caller) to initialize
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_fillAuStruct(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err= M4NO_ERROR;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_fillAuStruct: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_fillAuStruc invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+ ALOGV("VideoEditor3gpReader_fillAuStruct begin");
+
+ /* Initialize pAccessUnit structure */
+ pAccessUnit->m_size = 0;
+ pAccessUnit->m_CTS = 0;
+ pAccessUnit->m_DTS = 0;
+ pAccessUnit->m_attribute = 0;
+ pAccessUnit->m_dataAddress = M4OSA_NULL;
+ pAccessUnit->m_maxsize = pStreamHandler->m_maxAUSize;
+ pAccessUnit->m_streamID = pStreamHandler->m_streamId;
+ pAccessUnit->m_structSize = sizeof(M4_AccessUnit);
+
+ ALOGV("VideoEditor3gpReader_fillAuStruct end");
+ return M4NO_ERROR;
+}
+
+/**
+********************************************************************************
+* @brief jump into the stream at the specified time
+* @note
+* @param context: (IN) Context of the reader
+* @param pStreamHandler (IN) the stream handler of the stream to make jump
+* @param pTime (I/O)IN the time to jump to (in ms)
+* OUT the time to which the stream really jumped
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_jump(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_jump: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_jump: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_jump: invalid time pointer");
+
+ ALOGV("VideoEditor3gpReader_jump begin");
+
+ if (*pTime == (pStreamHandler->m_duration)) {
+ *pTime -= 1;
+ }
+ time64 = (M4OSA_Time)*pTime;
+
+ ALOGV("VideoEditor3gpReader_jump time us %ld ", time64);
+
+ if ((pC->mAudioStreamHandler != M4OSA_NULL) &&
+ (pStreamHandler->m_streamId == pC->mAudioStreamHandler->m_streamId))
+ {
+ pAu = &pC->mAudioAu;
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ time64 = time64 * 1000; /* Convert the time into micro sec */
+ pC->mAudioSeeking = M4OSA_TRUE;
+ pC->mAudioSeekTime = time64;
+ ALOGV("VideoEditor3gpReader_jump AUDIO time us %ld ", time64);
+ } else if ((pC->mVideoStreamHandler != M4OSA_NULL) &&
+ (pStreamHandler->m_streamId == pC->mVideoStreamHandler->m_streamId))
+ {
+ pAu = &pC->mVideoAu;
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ time64 = time64 * 1000; /* Convert the time into micro sec */
+ pC->mVideoSeeking = M4OSA_TRUE;
+ pC->mVideoSeekTime = time64;
+ ALOGV("VideoEditor3gpReader_jump VIDEO time us %ld ", time64);
+ } else {
+ ALOGV("VideoEditor3gpReader_jump passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+ time64 = time64 / 1000; /* Convert the time into milli sec */
+ ALOGV("VideoEditor3gpReader_jump time ms before seekset %ld ", time64);
+
+ *pTime = (M4OSA_Int32)time64;
+
+ ALOGV("VideoEditor3gpReader_jump end");
+ err = M4NO_ERROR;
+ return err;
+}
+/**
+********************************************************************************
+* @brief reset the stream, that is seek it to beginning and make it ready
+* @note
+* @param context: (IN) Context of the reader
+* @param pStreamHandler (IN) The stream handler of the stream to reset
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_reset(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler) {
+ VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_StreamID streamIdArray[2];
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = 0;
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_reset: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_reset: invalid pointer to M4_StreamHandler");
+
+ ALOGV("VideoEditor3gpReader_reset begin");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
+ pAu = &pC->mAudioAu;
+ } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
+ pAu = &pC->mVideoAu;
+ } else {
+ ALOGV("VideoEditor3gpReader_reset passed StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ ALOGV("VideoEditor3gpReader_reset end");
+ return err;
+}
+
+/**
+********************************************************************************
+* @brief Gets an access unit (AU) from the stream handler source.
+* @note An AU is the smallest possible amount of data to be decoded by decoder
+*
+* @param context: (IN) Context of the reader
+* @param pStreamHandler (IN) The stream handler of the stream to make jump
+* @param pAccessUnit (IO) Pointer to access unit to fill with read data
+* @return M4NO_ERROR there is no error
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+* @returns M4ERR_ALLOC memory allocation failed
+* @returns M4WAR_NO_MORE_AU there are no more access unit in the stream
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getNextAu(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+ VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+ int64_t tempTime64 = 0;
+ MediaBuffer *mMediaBuffer = NULL;
+ MediaSource::ReadOptions options;
+ M4OSA_Bool flag = M4OSA_FALSE;
+ status_t error;
+ int32_t i32Tmp = 0;
+
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getNextAu: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getNextAu: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getNextAu: invalid pointer to M4_AccessUnit");
+
+ ALOGV("VideoEditor3gpReader_getNextAu begin");
+
+ if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
+ ALOGV("VideoEditor3gpReader_getNextAu audio stream");
+ pAu = &pC->mAudioAu;
+ if (pC->mAudioSeeking == M4OSA_TRUE) {
+ ALOGV("VideoEditor3gpReader_getNextAu audio seek time: %ld",
+ pC->mAudioSeekTime);
+ options.setSeekTo(pC->mAudioSeekTime);
+ pC->mAudioSource->read(&mMediaBuffer, &options);
+
+ mMediaBuffer->meta_data()->findInt64(kKeyTime,
+ (int64_t*)&tempTime64);
+ options.clearSeekTo();
+ pC->mAudioSeeking = M4OSA_FALSE;
+ flag = M4OSA_TRUE;
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextAu audio no seek:");
+ pC->mAudioSource->read(&mMediaBuffer, &options);
+ if (mMediaBuffer != NULL) {
+ mMediaBuffer->meta_data()->findInt64(kKeyTime,
+ (int64_t*)&tempTime64);
+ }
+ }
+ } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
+ ALOGV("VideoEditor3gpReader_getNextAu video steram ");
+ pAu = &pC->mVideoAu;
+ if(pC->mVideoSeeking == M4OSA_TRUE) {
+ flag = M4OSA_TRUE;
+ ALOGV("VideoEditor3gpReader_getNextAu seek: %ld",pC->mVideoSeekTime);
+ options.setSeekTo(pC->mVideoSeekTime,
+ MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ do
+ {
+ if (mMediaBuffer != NULL) {
+ ALOGV("VideoEditor3gpReader_getNextAu free the MediaBuffer");
+ mMediaBuffer->release();
+ }
+ error = pC->mVideoSource->read(&mMediaBuffer, &options);
+ ALOGV("VE3gpReader_getNextAu MediaBuffer %x , error %d",
+ mMediaBuffer, error);
+ if (mMediaBuffer != NULL)
+ {
+ if (mMediaBuffer->meta_data()->findInt32(kKeyIsSyncFrame,
+ &i32Tmp) && i32Tmp) {
+ ALOGV("SYNC FRAME FOUND--%d", i32Tmp);
+ pAu->attribute = AU_RAP;
+ }
+ else {
+ pAu->attribute = AU_P_Frame;
+ }
+ mMediaBuffer->meta_data()->findInt64(kKeyTime,
+ (int64_t*)&tempTime64);
+ } else {
+ break;
+ }
+ options.clearSeekTo();
+ } while(tempTime64 < pC->mVideoSeekTime);
+
+ ALOGV("VE3gpReader_getNextAu: video time with seek = %lld:",
+ tempTime64);
+ pC->mVideoSeeking = M4OSA_FALSE;
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextAu video no seek:");
+ pC->mVideoSource->read(&mMediaBuffer, &options);
+
+ if(mMediaBuffer != NULL) {
+ if (mMediaBuffer->meta_data()->findInt32(kKeyIsSyncFrame,
+ &i32Tmp) && i32Tmp) {
+ ALOGV("SYNC FRAME FOUND--%d", i32Tmp);
+ pAu->attribute = AU_RAP;
+ }
+ else {
+ pAu->attribute = AU_P_Frame;
+ }
+ mMediaBuffer->meta_data()->findInt64(kKeyTime,
+ (int64_t*)&tempTime64);
+ ALOGV("VE3gpReader_getNextAu: video no seek time = %lld:",
+ tempTime64);
+ }else {
+ ALOGV("VE3gpReader_getNextAu:video no seek time buffer is NULL");
+ }
+ }
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextAu M4ERR_PARAMETER");
+ return M4ERR_PARAMETER;
+ }
+
+ if (mMediaBuffer != NULL) {
+ if( (pAu->dataAddress == NULL) || (pAu->size < \
+ mMediaBuffer->range_length())) {
+ if(pAu->dataAddress != NULL) {
+ free((M4OSA_Int32*)pAu->dataAddress);
+ pAu->dataAddress = NULL;
+ }
+ ALOGV("Buffer lenght = %d ,%d",(mMediaBuffer->range_length() +\
+ 3) & ~0x3,(mMediaBuffer->range_length()));
+
+ pAu->dataAddress = (M4OSA_Int32*)M4OSA_32bitAlignedMalloc(
+ (mMediaBuffer->range_length() + 3) & ~0x3,M4READER_3GP,
+ (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
+ if(pAu->dataAddress == NULL) {
+ ALOGV("VideoEditor3gpReader_getNextAu malloc failed");
+ return M4ERR_ALLOC;
+ }
+ }
+ pAu->size = mMediaBuffer->range_length();
+
+ memcpy((void *)pAu->dataAddress,
+ (void *)((const char *)mMediaBuffer->data() + mMediaBuffer->range_offset()),
+ mMediaBuffer->range_length());
+
+ if( (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) &&
+ (pStreamHandler->m_streamType == M4DA_StreamTypeVideoMpeg4Avc) ) {
+ M4OSA_UInt32 size = mMediaBuffer->range_length();
+ M4OSA_UInt8 *lbuffer;
+
+ lbuffer = (M4OSA_UInt8 *) pAu->dataAddress;
+ ALOGV("pAccessUnit->m_dataAddress size = %x",size);
+
+ lbuffer[0] = (size >> 24) & 0xFF;
+ lbuffer[1] = (size >> 16) & 0xFF;
+ lbuffer[2] = (size >> 8) & 0xFF;
+ lbuffer[3] = (size) & 0xFF;
+ }
+
+ pAu->CTS = tempTime64;
+
+ pAu->CTS = pAu->CTS / 1000; //converting the microsec to millisec
+ ALOGV("VideoEditor3gpReader_getNextAu CTS = %ld",pAu->CTS);
+
+ pAu->DTS = pAu->CTS;
+ if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
+ pAu->attribute = M4SYS_kFragAttrOk;
+ }
+ mMediaBuffer->release();
+
+ pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
+ pAccessUnit->m_size = pAu->size;
+ pAccessUnit->m_maxsize = pAu->size;
+ pAccessUnit->m_CTS = pAu->CTS;
+ pAccessUnit->m_DTS = pAu->DTS;
+ pAccessUnit->m_attribute = pAu->attribute;
+
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextAu: M4WAR_NO_MORE_AU (EOS) reached");
+ pAccessUnit->m_size = 0;
+ err = M4WAR_NO_MORE_AU;
+ }
+ options.clearSeekTo();
+
+ pAu->nbFrag = 0;
+ mMediaBuffer = NULL;
+ ALOGV("VideoEditor3gpReader_getNextAu end ");
+
+ return err;
+}
+/**
+ *******************************************************************************
+ * @brief Split the AVC DSI in its different components and write it in
+ * ONE memory buffer
+ * @note
+ * @param pStreamHandler: (IN/OUT) The MPEG4-AVC stream
+ * @param pDecoderConfigLocal: (IN) The DSI buffer
+ * @param decoderConfigSizeLocal: (IN) The DSI buffer size
+ * @return M4NO_ERROR there is no error
+ * @return ERR_FILE_SYNTAX_ERROR pDecoderConfigLocal is NULL
+ *******************************************************************************
+*/
+static M4OSA_ERR VideoEditor3gpReader_AnalyseAvcDsi(
+ M4_StreamHandler *pStreamHandler, M4OSA_Int32* pDecoderConfigLocal,
+ M4OSA_Int32 decoderConfigSizeLocal) {
+ struct _avcSpecificInfo *pAvcSpecInfo = M4OSA_NULL;
+ M4OSA_UInt32 uiSpecInfoSize;
+ M4OSA_Context pBitParserContext = M4OSA_NULL;
+ M4OSA_MemAddr8 pPos;
+
+ /**
+ * First parsing to get the total allocation size (we must not do
+ * multiple malloc, but only one instead) */
+ {
+ M4OSA_Int32 val;
+ M4OSA_UInt32 i,j;
+ M4OSA_UInt8 nalUnitLength;
+ M4OSA_UInt8 numOfSequenceParameterSets;
+ M4OSA_UInt32 uiTotalSizeOfSPS = 0;
+ M4OSA_UInt8 numOfPictureParameterSets;
+ M4OSA_UInt32 uiTotalSizeOfPPS = 0;
+ M4OSA_UInt32 uiSize;
+ struct _avcSpecificInfo avcSpIf;
+
+ avcSpIf.m_nalUnitLength = 0;
+
+ if (M4OSA_NULL == pDecoderConfigLocal) {
+ return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
+ }
+
+ VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
+ pDecoderConfigLocal, decoderConfigSizeLocal);
+
+ if (M4OSA_NULL == pBitParserContext) {
+ return M4ERR_ALLOC;
+ }
+
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- configuration version */
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- avc profile indication*/
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- profile compatibility */
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- avc level indication*/
+ val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext, 8);
+ /* 6 bits reserved 111111b 2 bits length Size minus one*/
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* m_nalUnitLength */
+
+ nalUnitLength = (M4OSA_UInt8)((val & 0x03) + 1);/*0b11111100*/
+ if (nalUnitLength > 4) {
+ pStreamHandler->m_decoderSpecificInfoSize = 0;
+ pStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+ VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+ } else {
+ /**
+ * SPS table */
+ val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
+ 8);/* 3 bits-reserved 111b-5 bits number of sequence parameter set*/
+ numOfSequenceParameterSets = val & 0x1F;
+ /*1F instead of E0*/ /*0b11100000*/ /*Number of seq parameter sets*/
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ for (i=0; i < numOfSequenceParameterSets; i++) {
+ /**
+ * Get the size of this element */
+ uiSize =
+ (M4OSA_UInt32)VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 16);
+ uiTotalSizeOfSPS += uiSize;
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 16);
+ /**
+ *Read the element(dont keep it, we only want size right now) */
+ for (j=0; j<uiSize; j++) {
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 8);
+ }
+ }
+
+ /**
+ * SPS table */
+ numOfPictureParameterSets=(M4OSA_UInt8)\
+ VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
+ 8);
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ for (i=0; i < numOfPictureParameterSets; i++) {
+ /**
+ * Get the size of this element */
+ uiSize = (M4OSA_UInt32)
+ VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 16);
+ uiTotalSizeOfPPS += uiSize;
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 16);
+ /**
+ *Read the element(dont keep it,we only want size right now)*/
+ for (j=0; j<uiSize; j++) {
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 8);
+ }
+ }
+
+ /**
+ * Compute the size of the full buffer */
+ uiSpecInfoSize = sizeof(struct _avcSpecificInfo) +
+ numOfSequenceParameterSets * sizeof(struct _parameterSet)
+ + /**< size of the table of SPS elements */
+ numOfPictureParameterSets * sizeof(struct _parameterSet)
+ + /**< size of the table of PPS elements */
+ uiTotalSizeOfSPS +
+ uiTotalSizeOfPPS;
+ /**
+ * Allocate the buffer */
+ pAvcSpecInfo =(struct _avcSpecificInfo*)M4OSA_32bitAlignedMalloc(uiSpecInfoSize,
+ M4READER_3GP, (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
+ if (M4OSA_NULL == pAvcSpecInfo) {
+ VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+ return M4ERR_ALLOC;
+ }
+
+ /**
+ * Set the pointers to the correct part of the buffer */
+ pAvcSpecInfo->m_nalUnitLength = nalUnitLength;
+ pAvcSpecInfo->m_numOfSequenceParameterSets =
+ numOfSequenceParameterSets;
+ pAvcSpecInfo->m_numOfPictureParameterSets =
+ numOfPictureParameterSets;
+
+ /* We place the SPS param sets table after m_pPictureParameterSet */
+ pAvcSpecInfo->m_pSequenceParameterSet= (struct _parameterSet*)(
+ (M4OSA_MemAddr8)(&pAvcSpecInfo->m_pPictureParameterSet) +
+ sizeof(pAvcSpecInfo->m_pPictureParameterSet));
+ /*We place the PPS param sets table after the SPS param sets table*/
+ pAvcSpecInfo->m_pPictureParameterSet = (struct _parameterSet*)(
+ (M4OSA_MemAddr8)(pAvcSpecInfo->m_pSequenceParameterSet) +
+ (numOfSequenceParameterSets * sizeof(struct _parameterSet)));
+ /**< The data will be placed after the PPS param sets table */
+ pPos = (M4OSA_MemAddr8)pAvcSpecInfo->m_pPictureParameterSet +
+ (numOfPictureParameterSets * sizeof(struct _parameterSet));
+
+ /**
+ * reset the bit parser */
+ VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+ }
+ }
+
+ /**
+ * Second parsing to copy the data */
+ if (M4OSA_NULL != pAvcSpecInfo) {
+ M4OSA_Int32 i,j;
+
+ VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
+ pDecoderConfigLocal, decoderConfigSizeLocal);
+
+ if (M4OSA_NULL == pBitParserContext) {
+ free(pAvcSpecInfo);
+ return M4ERR_ALLOC;
+ }
+
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- configuration version */
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- avc profile indication*/
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- profile compatibility */
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 8 bits -- avc level indication*/
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* m_nalUnitLength */
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* 3 bits -- reserved 111b -- 5 bits number of sequence parameter set*/
+
+ for (i=0; i < pAvcSpecInfo->m_numOfSequenceParameterSets; i++) {
+ pAvcSpecInfo->m_pSequenceParameterSet[i].m_length =
+ (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 16);
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
+
+ pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit =
+ (M4OSA_UInt8*)pPos; /**< current position in the buffer */
+ pPos += pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;
+ /**< increment the position in the buffer */
+ for (j=0; j<pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;j++){
+ pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit[j]=
+ (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 8);
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 8);
+ }
+ }
+
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+ /* number of pîcture parameter set*/
+
+ for (i=0; i < pAvcSpecInfo->m_numOfPictureParameterSets; i++) {
+ pAvcSpecInfo->m_pPictureParameterSet[i].m_length =
+ (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 16);
+ VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
+
+ pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit =
+ (M4OSA_UInt8*)pPos; /**< current position in the buffer */
+ pPos += pAvcSpecInfo->m_pPictureParameterSet[i].m_length;
+ /**< increment the position in the buffer */
+ for (j=0; j<pAvcSpecInfo->m_pPictureParameterSet[i].m_length; j++) {
+ pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit[j] =
+ (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
+ pBitParserContext, 8);
+ VideoEditor3gpReader_BitStreamParserFlushBits(
+ pBitParserContext, 8);
+ }
+ }
+ VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+ pStreamHandler->m_decoderSpecificInfoSize = uiSpecInfoSize;
+ pStreamHandler->m_pDecoderSpecificInfo = (M4OSA_UInt8*)pAvcSpecInfo;
+ }
+ pStreamHandler->m_H264decoderSpecificInfoSize = decoderConfigSizeLocal;
+ pStreamHandler->m_pH264DecoderSpecificInfo = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ decoderConfigSizeLocal, M4READER_3GP,
+ (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
+ if (M4OSA_NULL == pStreamHandler->m_pH264DecoderSpecificInfo) {
+ goto cleanup;
+ }
+
+ memcpy((void * ) pStreamHandler->m_pH264DecoderSpecificInfo,
+ (void * )pDecoderConfigLocal,
+ pStreamHandler->m_H264decoderSpecificInfoSize);
+ return M4NO_ERROR;
+cleanup:
+ VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+ return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
+}
+/**
+********************************************************************************
+* @brief Get the next stream found in the 3gp file
+* @note
+* @param context: (IN) Context of the reader
+* @param pMediaFamily: OUT) pointer to a user allocated
+* M4READER_MediaFamily that will be filled
+* with the media family of the found stream
+* @param pStreamHandler:(OUT) pointer to StreamHandler that will be allocated
+* and filled with the found stream description
+* @return M4NO_ERROR there is no error
+* @return M4ERR_BAD_CONTEXT provided context is not a valid one
+* @return M4ERR_PARAMETER at least one parameter is not properly set
+* @return M4WAR_NO_MORE_STREAM no more available stream in the media
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getNextStreamHandler(M4OSA_Context context,
+ M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandler) {
+ VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_StreamID streamIdArray[2];
+ M4SYS_StreamDescription streamDesc;
+ M4_AudioStreamHandler* pAudioStreamHandler;
+ M4_VideoStreamHandler* pVideoStreamHandler;
+ M4OSA_Int8 *DecoderSpecificInfo = M4OSA_NULL;
+ M4OSA_Int32 decoderSpecificInfoSize =0, maxAUSize = 0;
+
+ M4_StreamType streamType = M4DA_StreamTypeUnknown;
+ M4OSA_UInt8 temp, i, trackCount;
+ M4OSA_Bool haveAudio = M4OSA_FALSE;
+ M4OSA_Bool haveVideo = M4OSA_FALSE;
+ sp<MetaData> meta = NULL;
+ int64_t Duration = 0;
+ M4OSA_UInt8* DecoderSpecific = M4OSA_NULL ;
+ uint32_t type;
+ const void *data;
+ size_t size;
+ const void *codec_specific_data;
+ size_t codec_specific_data_size;
+ M4OSA_Int32 ptempTime;
+ M4OSA_Int32 avgFPS=0;
+
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler begin");
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getNextStreamHandler: invalid context");
+ M4OSA_DEBUG_IF1((pMediaFamily == 0), M4ERR_PARAMETER,
+ "getNextStreamHandler: invalid pointer to MediaFamily");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "getNextStreamHandler: invalid pointer to StreamHandler");
+
+ trackCount = pC->mExtractor->countTracks();
+ temp = pC->mCurrTrack;
+
+ if(temp >= trackCount) {
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler error = %d",
+ M4WAR_NO_MORE_STREAM);
+ return (M4WAR_NO_MORE_STREAM);
+ } else {
+ const char *mime;
+ meta = pC->mExtractor->getTrackMetaData(temp);
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
+ pC->mVideoSource = pC->mExtractor->getTrack(temp);
+ pC->mVideoSource->start();
+
+ *pMediaFamily = M4READER_kMediaFamilyVideo;
+ haveVideo = true;
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler getTrack called");
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ streamType = M4DA_StreamTypeVideoMpeg4Avc;
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_H263)) {
+ streamType = M4DA_StreamTypeVideoH263;
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
+ streamType = M4DA_StreamTypeVideoMpeg4;
+ } else {
+ ALOGV("VideoEditor3gpReaderGetNextStreamHandler streamTypeNONE");
+ }
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler: stream type: %d ",
+ streamType);
+
+ if(streamType != M4DA_StreamTypeUnknown) {
+ pC->mStreamType = streamType;
+ pC->mStreamId = pC->mCurrTrack;
+
+ pVideoStreamHandler = (M4_VideoStreamHandler*)M4OSA_32bitAlignedMalloc
+ (sizeof(M4_VideoStreamHandler), M4READER_3GP,
+ (M4OSA_Char*)"M4_VideoStreamHandler");
+ if (M4OSA_NULL == pVideoStreamHandler) {
+ return M4ERR_ALLOC;
+ }
+ pVideoStreamHandler->m_structSize=sizeof(M4_VideoStreamHandler);
+
+ meta->findInt32(kKeyWidth,
+ (int32_t*)&(pVideoStreamHandler->m_videoWidth));
+ meta->findInt32(kKeyHeight,
+ (int32_t*)&(pVideoStreamHandler->m_videoHeight));
+
+ (*pStreamHandler) = (M4_StreamHandler*)(pVideoStreamHandler);
+ meta->findInt64(kKeyDuration,
+ (int64_t*)&(Duration));
+ ((*pStreamHandler)->m_duration) =
+ (int32_t)((Duration)/1000); // conversion to mS
+ pC->mMaxDuration = ((*pStreamHandler)->m_duration);
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler m_duration %d",
+ (*pStreamHandler)->m_duration);
+
+ off64_t fileSize = 0;
+ pC->mDataSource->getSize(&fileSize);
+ pC->mFileSize = fileSize;
+
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler m_fileSize %d",
+ pC->mFileSize);
+
+ meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
+ if(maxAUSize == 0) {
+ maxAUSize = 70000;
+ }
+ (*pStreamHandler)->m_maxAUSize = maxAUSize;
+ ALOGV("<<<<<<<<<< video: mMaxAUSize from MP4 extractor: %d",
+ (*pStreamHandler)->m_maxAUSize);
+
+ ((M4_StreamHandler*)pVideoStreamHandler)->m_averageBitRate =
+ (pC->mFileSize * 8000)/pC->mMaxDuration;
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler m_averageBitrate %d",
+ ((M4_StreamHandler*)pVideoStreamHandler)->m_averageBitRate);
+
+
+ meta->findInt32(kKeyFrameRate,
+ (int32_t*)&(avgFPS));
+ ALOGV("<<<<<<<<<< video: Average FPS from MP4 extractor: %d",
+ avgFPS);
+
+ pVideoStreamHandler->m_averageFrameRate =(M4OSA_Float) avgFPS;
+ ALOGV("<<<<<<<<<< video: Average FPS from MP4 extractor in FLOAT: %f",
+ pVideoStreamHandler->m_averageFrameRate);
+
+ // Get the video rotation degree
+ int32_t rotationDegree;
+ if(!meta->findInt32(kKeyRotation, &rotationDegree)) {
+ rotationDegree = 0;
+ }
+ pVideoStreamHandler->videoRotationDegrees = rotationDegree;
+
+ pC->mVideoStreamHandler =
+ (M4_StreamHandler*)(pVideoStreamHandler);
+
+ /* Get the DSI info */
+ if(M4DA_StreamTypeVideoH263 == streamType) {
+ if (meta->findData(kKeyD263, &type, &data, &size)) {
+ (*pStreamHandler)->m_decoderSpecificInfoSize = size;
+ if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+ DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_decoderSpecificInfoSize,
+ M4READER_3GP,(M4OSA_Char*)"H263 DSI");
+ if (M4OSA_NULL == DecoderSpecific) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)DecoderSpecific,
+ (void *)data, size);
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ DecoderSpecific;
+ }
+ else {
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ M4OSA_NULL;
+ (*pStreamHandler)->m_decoderSpecificInfoSize = 0;
+ }
+ (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
+ (*pStreamHandler)->m_ESDSInfoSize = 0;
+ (*pStreamHandler)->m_pH264DecoderSpecificInfo = M4OSA_NULL;
+ (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
+ } else {
+ ALOGV("VE_getNextStreamHandler: H263 dsi not found");
+ (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+ (*pStreamHandler)->m_decoderSpecificInfoSize = 0;
+ (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
+ (*pStreamHandler)->m_pH264DecoderSpecificInfo =
+ M4OSA_NULL;
+ (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
+ (*pStreamHandler)->m_ESDSInfoSize = 0;
+ }
+ }
+ else if(M4DA_StreamTypeVideoMpeg4Avc == streamType) {
+ if(meta->findData(kKeyAVCC, &type, &data, &size)) {
+ decoderSpecificInfoSize = size;
+ if (decoderSpecificInfoSize != 0) {
+ DecoderSpecificInfo = (M4OSA_Int8*)M4OSA_32bitAlignedMalloc(
+ decoderSpecificInfoSize, M4READER_3GP,
+ (M4OSA_Char*)"H264 DecoderSpecific" );
+ if (M4OSA_NULL == DecoderSpecificInfo) {
+ ALOGV("VideoEditor3gp_getNextStream is NULL ");
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)DecoderSpecificInfo,
+ (void *)data, decoderSpecificInfoSize);
+ } else {
+ ALOGV("DSI Size %d", decoderSpecificInfoSize);
+ DecoderSpecificInfo = M4OSA_NULL;
+ }
+ }
+ (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
+ (*pStreamHandler)->m_ESDSInfoSize = 0;
+
+ err = VideoEditor3gpReader_AnalyseAvcDsi(*pStreamHandler,
+ (M4OSA_Int32*)DecoderSpecificInfo, decoderSpecificInfoSize);
+
+ if (M4NO_ERROR != err) {
+ return err;
+ }
+ ALOGV("decsize %d, h264decsize %d: %d", (*pStreamHandler)->\
+ m_decoderSpecificInfoSize, (*pStreamHandler)->\
+ m_H264decoderSpecificInfoSize);
+
+ if(M4OSA_NULL != DecoderSpecificInfo) {
+ free(DecoderSpecificInfo);
+ DecoderSpecificInfo = M4OSA_NULL;
+ }
+ } else if( (M4DA_StreamTypeVideoMpeg4 == streamType) ) {
+ if (meta->findData(kKeyESDS, &type, &data, &size)) {
+ ESDS esds((const char *)data, size);
+ CHECK_EQ(esds.InitCheck(), (status_t)OK);
+
+ (*pStreamHandler)->m_ESDSInfoSize = size;
+ (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)\
+ M4OSA_32bitAlignedMalloc((*pStreamHandler)->m_ESDSInfoSize,
+ M4READER_3GP, (M4OSA_Char*)"M4V DecoderSpecific" );
+ if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)(*pStreamHandler)->\
+ m_pESDSInfo, (void *)data, size);
+
+ esds.getCodecSpecificInfo(&codec_specific_data,
+ &codec_specific_data_size);
+ ALOGV("VE MP4 dsisize: %d, %x", codec_specific_data_size,
+ codec_specific_data);
+
+ (*pStreamHandler)->m_decoderSpecificInfoSize =
+ codec_specific_data_size;
+ if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+ DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_decoderSpecificInfoSize,
+ M4READER_3GP, (M4OSA_Char*)" DecoderSpecific" );
+ if (M4OSA_NULL == DecoderSpecific) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)DecoderSpecific,
+ (void *)codec_specific_data,
+ codec_specific_data_size);
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ DecoderSpecific;
+ }
+ else {
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ M4OSA_NULL;
+ }
+ (*pStreamHandler)->m_pH264DecoderSpecificInfo =
+ M4OSA_NULL;
+ (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
+ }
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextStream NO video stream");
+ return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+ }
+ }
+ else {
+ ALOGV("VideoEditor3gpReader_getNextStream NO video stream");
+ return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+ }
+
+ } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
+ ALOGV("VideoEditor3gpReader_getNextStream audio getTrack called");
+ pC->mAudioSource = pC->mExtractor->getTrack(pC->mCurrTrack);
+ pC->mAudioSource->start();
+ *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+ if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
+ streamType = M4DA_StreamTypeAudioAmrNarrowBand;
+ } else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
+ streamType = M4DA_StreamTypeAudioAmrWideBand;
+ }
+ else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+ streamType = M4DA_StreamTypeAudioAac;
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextStrea streamtype Unknown ");
+ }
+ if(streamType != M4DA_StreamTypeUnknown) {
+ pC->mStreamType = streamType;
+ pC->mStreamId = pC->mCurrTrack;
+
+ ALOGV("VE streamtype %d ,id %d", streamType, pC->mCurrTrack);
+
+ pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc
+ (sizeof(M4_AudioStreamHandler), M4READER_3GP,
+ (M4OSA_Char*)"M4_AudioStreamHandler");
+ if (M4OSA_NULL == pAudioStreamHandler) {
+ return M4ERR_ALLOC;
+ }
+ pAudioStreamHandler->m_structSize=sizeof(M4_AudioStreamHandler);
+ pAudioStreamHandler->m_byteSampleSize = 0;
+ pAudioStreamHandler->m_nbChannels = 0;
+ pAudioStreamHandler->m_samplingFrequency= 0;
+ pAudioStreamHandler->m_byteFrameLength = 0;
+
+ (*pStreamHandler) = (M4_StreamHandler*)(pAudioStreamHandler);
+ pC->mAudioStreamHandler =
+ (M4_StreamHandler*)(pAudioStreamHandler);
+ (*pStreamHandler)->m_averageBitRate = 0;
+ haveAudio = true;
+ pC->mAudioStreamHandler=(M4_StreamHandler*)pAudioStreamHandler;
+ pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
+ pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
+
+ meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
+ if(maxAUSize == 0) {
+ maxAUSize = 70000;
+ }
+ (*pStreamHandler)->m_maxAUSize = maxAUSize;
+ ALOGV("VE Audio mMaxAUSize from MP4 extractor: %d", maxAUSize);
+ }
+ if((M4DA_StreamTypeAudioAmrNarrowBand == streamType) ||
+ (M4DA_StreamTypeAudioAmrWideBand == streamType)) {
+ M4OSA_UInt32 freqIndex = 0; /**< AMR NB */
+ M4OSA_UInt32 modeSet;
+ M4OSA_UInt32 i;
+ M4OSA_Context pBitParserContext = M4OSA_NULL;
+
+ if(M4DA_StreamTypeAudioAmrWideBand == streamType) {
+ freqIndex = 1; /**< AMR WB */
+ }
+
+ if (meta->findData(kKeyESDS, &type, &data, &size)) {
+ ESDS esds((const char *)data, size);
+ CHECK_EQ(esds.InitCheck(), (status_t)OK);
+
+ esds.getCodecSpecificInfo(&codec_specific_data,
+ &codec_specific_data_size);
+ (*pStreamHandler)->m_decoderSpecificInfoSize =
+ codec_specific_data_size;
+
+ if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+ DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_decoderSpecificInfoSize,
+ M4READER_3GP, (M4OSA_Char*)"AMR DecoderSpecific" );
+ if (M4OSA_NULL == DecoderSpecific) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)DecoderSpecific,
+ (void *)codec_specific_data,
+ codec_specific_data_size);
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ DecoderSpecific;
+ } else {
+ (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+ } else {
+ M4OSA_UChar AmrDsi[] =
+ {'P','H','L','P',0x00, 0x00, 0x80, 0x00, 0x01,};
+ (*pStreamHandler)->m_decoderSpecificInfoSize = 9;
+ DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_decoderSpecificInfoSize,
+ M4READER_3GP, (M4OSA_Char*)"PHLP DecoderSpecific" );
+ if (M4OSA_NULL == DecoderSpecific) {
+ return M4ERR_ALLOC;
+ }
+ if(freqIndex ==0) {
+ AmrDsi[8] = 0x01;
+ } else {
+ AmrDsi[8] = 0x02;
+ }
+ for(i = 0; i< 9; i++) {
+ DecoderSpecific[i] = AmrDsi[i];
+ }
+ (*pStreamHandler)->m_pDecoderSpecificInfo = DecoderSpecific;
+ }
+ (*pStreamHandler)->m_averageBitRate =
+ VideoEditor3gpReader_AmrBitRate[freqIndex][7];
+ } else if((M4DA_StreamTypeAudioAac == streamType)) {
+ if (meta->findData(kKeyESDS, &type, &data, &size)) {
+ ESDS esds((const char *)data, size);
+ CHECK_EQ(esds.InitCheck(), (status_t)OK);
+
+ (*pStreamHandler)->m_ESDSInfoSize = size;
+ (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_ESDSInfoSize, M4READER_3GP,
+ (M4OSA_Char*)"AAC DecoderSpecific" );
+ if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)(*pStreamHandler)->m_pESDSInfo,
+ (void *)data, size);
+ esds.getCodecSpecificInfo(&codec_specific_data,
+ &codec_specific_data_size);
+
+ ALOGV("VEdsi %d,%x",codec_specific_data_size,
+ codec_specific_data);
+
+ (*pStreamHandler)->m_decoderSpecificInfoSize =
+ codec_specific_data_size;
+ if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+ DecoderSpecific = (M4OSA_UInt8*)M4OSA_32bitAlignedMalloc(
+ (*pStreamHandler)->m_decoderSpecificInfoSize,
+ M4READER_3GP, (M4OSA_Char*)"AAC DecoderSpecific" );
+ if (M4OSA_NULL == DecoderSpecific) {
+ return M4ERR_ALLOC;
+ }
+ memcpy((void *)DecoderSpecific,
+ (void *)codec_specific_data,
+ codec_specific_data_size);
+ (*pStreamHandler)->m_pDecoderSpecificInfo =
+ DecoderSpecific;
+ } else {
+ (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+ }
+ } else {
+ ALOGV("VideoEditor3gpReader_getNextStream mStreamType: none ");
+ return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+ }
+ } else {
+ ALOGV("VE noaudio-video stream:pC->mCurrTrack = %d ",pC->mCurrTrack);
+ pC->mCurrTrack++; //Increment current track to get the next track
+ return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+ }
+ ALOGV("VE StreamType: %d, stremhandler %x",streamType, *pStreamHandler );
+ (*pStreamHandler)->m_streamType = streamType;
+ (*pStreamHandler)->m_streamId = pC->mStreamId;
+ (*pStreamHandler)->m_pUserData = M4OSA_NULL;
+ (*pStreamHandler)->m_structSize = sizeof(M4_StreamHandler);
+ (*pStreamHandler)->m_bStreamIsOK = M4OSA_TRUE;
+
+ meta->findInt64(kKeyDuration,
+ (int64_t*)&(Duration));
+
+ (*pStreamHandler)->m_duration = (int32_t)(Duration / 1000);
+
+ pC->mMaxDuration = ((*pStreamHandler)->m_duration);
+ ALOGV("VE str duration duration: %d ", (*pStreamHandler)->m_duration);
+
+ /* In AAC case: Put the first AU in pAudioStreamHandler->m_pUserData
+ *since decoder has to know if stream contains SBR data(Implicit sig) */
+ if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
+ M4READER_AudioSbrUserdata* pAudioSbrUserdata;
+
+ pAudioSbrUserdata = (M4READER_AudioSbrUserdata*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4READER_AudioSbrUserdata),M4READER_3GP,
+ (M4OSA_Char*)"M4READER_AudioSbrUserdata");
+ if (M4OSA_NULL == pAudioSbrUserdata) {
+ err = M4ERR_ALLOC;
+ goto Error;
+ }
+ (*pStreamHandler)->m_pUserData = pAudioSbrUserdata;
+ pAudioSbrUserdata->m_bIsSbrEnabled = M4OSA_FALSE;
+
+ pAudioSbrUserdata->m_pFirstAU = (M4_AccessUnit*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4_AccessUnit),M4READER_3GP, (M4OSA_Char*)"1st AAC AU");
+ if (M4OSA_NULL == pAudioSbrUserdata->m_pFirstAU) {
+ pAudioSbrUserdata->m_pAacDecoderUserConfig = M4OSA_NULL;
+ err = M4ERR_ALLOC;
+ goto Error;
+ }
+ pAudioSbrUserdata->m_pAacDecoderUserConfig = (M4_AacDecoderConfig*)\
+ M4OSA_32bitAlignedMalloc(sizeof(M4_AacDecoderConfig),M4READER_3GP,
+ (M4OSA_Char*)"m_pAacDecoderUserConfig");
+ if (M4OSA_NULL == pAudioSbrUserdata->m_pAacDecoderUserConfig) {
+ err = M4ERR_ALLOC;
+ goto Error;
+ }
+ }
+ if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
+ M4_AudioStreamHandler* pAudioStreamHandler =
+ (M4_AudioStreamHandler*)(*pStreamHandler);
+ M4READER_AudioSbrUserdata* pUserData = (M4READER_AudioSbrUserdata*)\
+ (pAudioStreamHandler->m_basicProperties.m_pUserData);
+
+ err = VideoEditor3gpReader_fillAuStruct(pC, (*pStreamHandler),
+ (M4_AccessUnit*)pUserData->m_pFirstAU);
+ if (M4NO_ERROR != err) {
+ goto Error;
+ }
+ err = VideoEditor3gpReader_getNextAu(pC, (*pStreamHandler),
+ (M4_AccessUnit*)pUserData->m_pFirstAU);
+ if (M4NO_ERROR != err) {
+ goto Error;
+ }
+ err = VideoEditor3gpReader_reset(pC, (*pStreamHandler));
+ if (M4NO_ERROR != err) {
+ goto Error;
+ }
+ }
+ }
+ pC->mCurrTrack++; //Increment the current track to get next track
+ ALOGV("pC->mCurrTrack = %d",pC->mCurrTrack);
+
+ if (!haveAudio && !haveVideo) {
+ *pMediaFamily=M4READER_kMediaFamilyUnknown;
+ return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+ }
+Error:
+ ALOGV("VideoEditor3gpReader_getNextStreamHandler end error = %d",err);
+ return err;
+}
+
+M4OSA_ERR VideoEditor3gpReader_getPrevRapTime(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime)
+{
+ VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ MediaBuffer *mMediaBuffer = M4OSA_NULL;
+ MediaSource::ReadOptions options;
+ M4OSA_Time time64;
+ int64_t tempTime64 = 0;
+ status_t error;
+
+ ALOGV("VideoEditor3gpReader_getPrevRapTime begin");
+
+ M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getPrevRapTime: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getPrevRapTime invalid pointer to StreamHandler");
+ M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+ "VideoEditor3gpReader_getPrevRapTime: invalid time pointer");
+ if (*pTime == (pStreamHandler->m_duration)) {
+ *pTime -= 1;
+ }
+
+ time64 = (M4OSA_Time)*pTime * 1000;
+
+ ALOGV("VideoEditor3gpReader_getPrevRapTime seek time: %ld",time64);
+ options.setSeekTo(time64, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ error = pC->mVideoSource->read(&mMediaBuffer, &options);
+ if (error != OK) {
+ //Can not get the previous Sync.
+ //Must be end of stream.
+ return M4WAR_NO_MORE_AU;
+ }
+
+ mMediaBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&tempTime64);
+ ALOGV("VideoEditor3gpReader_getPrevRapTime read time %ld, %x", tempTime64,
+ mMediaBuffer);
+
+ *pTime = (M4OSA_Int32)(tempTime64 / 1000);
+
+ if(mMediaBuffer != M4OSA_NULL) {
+ ALOGV(" mMediaBuffer size = %d length %d", mMediaBuffer->size(),
+ mMediaBuffer->range_length());
+ mMediaBuffer->release();
+ mMediaBuffer = M4OSA_NULL;
+ }
+ options.clearSeekTo();
+
+ if(error != OK) {
+ ALOGV("VideoEditor3gpReader_getPrevRapTime end \
+ M4WAR_READER_INFORMATION_NOT_PRESENT");
+ return M4WAR_READER_INFORMATION_NOT_PRESENT;
+ } else {
+ ALOGV("VideoEditor3gpReader_getPrevRapTime end: err %x", err);
+ err = M4NO_ERROR;
+ return err;
+ }
+}
+
+extern "C" {
+M4OSA_ERR VideoEditor3gpReader_getInterface(M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditor3gpReader_getInterface begin");
+ ALOGV("VideoEditor3gpReader_getInterface %d 0x%x 0x%x", *pMediaType,
+ *pRdrGlobalInterface,*pRdrDataInterface);
+
+ SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
+ "VideoEditor3gpReader_getInterface");
+ SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
+ "VideoEditor3gpReader_getInterface");
+
+ *pMediaType = M4READER_kMediaType3GPP;
+
+ (*pRdrGlobalInterface)->m_pFctCreate = VideoEditor3gpReader_create;
+ (*pRdrGlobalInterface)->m_pFctDestroy = VideoEditor3gpReader_destroy;
+ (*pRdrGlobalInterface)->m_pFctOpen = VideoEditor3gpReader_open;
+ (*pRdrGlobalInterface)->m_pFctClose = VideoEditor3gpReader_close;
+ (*pRdrGlobalInterface)->m_pFctGetOption = VideoEditor3gpReader_getOption;
+ (*pRdrGlobalInterface)->m_pFctSetOption = VideoEditor3gpReader_setOption;
+ (*pRdrGlobalInterface)->m_pFctGetNextStream =
+ VideoEditor3gpReader_getNextStreamHandler;
+ (*pRdrGlobalInterface)->m_pFctFillAuStruct =
+ VideoEditor3gpReader_fillAuStruct;
+ (*pRdrGlobalInterface)->m_pFctStart = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctStop = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctJump = VideoEditor3gpReader_jump;
+ (*pRdrGlobalInterface)->m_pFctReset = VideoEditor3gpReader_reset;
+ (*pRdrGlobalInterface)->m_pFctGetPrevRapTime =
+ VideoEditor3gpReader_getPrevRapTime;
+ (*pRdrDataInterface)->m_pFctGetNextAu = VideoEditor3gpReader_getNextAu;
+ (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditor3gpReader_getInterface no error");
+ } else {
+ SAFE_FREE(*pRdrGlobalInterface);
+ SAFE_FREE(*pRdrDataInterface);
+
+ ALOGV("VideoEditor3gpReader_getInterface ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditor3gpReader_getInterface end");
+ return err;
+}
+
+} /* extern "C" */
+
+} /* namespace android */
+
+
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
new file mode 100755
index 0000000..9b35d07
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
@@ -0,0 +1,991 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorAudioDecoder.cpp
+* @brief StageFright shell Audio Decoder
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_AUDIODECODER"
+
+#include "M4OSA_Debug.h"
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorUtils.h"
+#include "M4MCS_InternalTypes.h"
+
+#include "utils/Log.h"
+#include "utils/Vector.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+
+/********************
+ * DEFINITIONS *
+ ********************/
+// Version
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR 1
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR 0
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_REV 0
+
+// Force using software decoder as engine does not support prefetch
+#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
+
+namespace android {
+
+struct VideoEditorAudioDecoderSource : public MediaSource {
+ public:
+ static sp<VideoEditorAudioDecoderSource> Create(
+ const sp<MetaData>& format, void *decoderShellContext);
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(MediaBuffer **buffer,
+ const ReadOptions *options = NULL);
+ virtual void storeBuffer(MediaBuffer *buffer);
+
+ protected:
+ virtual ~VideoEditorAudioDecoderSource();
+
+ private:
+ enum State {
+ CREATED,
+ STARTED,
+ ERROR
+ };
+ VideoEditorAudioDecoderSource(const sp<MetaData>& format,
+ void *decoderShellContext);
+ sp<MetaData> mFormat;
+ Vector<MediaBuffer*> mBuffers;
+ Mutex mLock; // protects mBuffers
+ bool mIsEOS;
+ State mState;
+ void* mDecShellContext;
+ // Don't call me.
+ VideoEditorAudioDecoderSource(const VideoEditorAudioDecoderSource&);
+ VideoEditorAudioDecoderSource& operator=(
+ const VideoEditorAudioDecoderSource &);
+};
+
+/**
+ ******************************************************************************
+ * structure VideoEditorAudioDecoder_Context
+ * @brief This structure defines the context of the StageFright audio decoder
+ * shell
+ ******************************************************************************
+*/
+
+typedef struct {
+ M4AD_Type mDecoderType;
+ M4_AudioStreamHandler* mAudioStreamHandler;
+ sp<VideoEditorAudioDecoderSource> mDecoderSource;
+ OMXClient mClient;
+ sp<MediaSource> mDecoder;
+ int32_t mNbOutputChannels;
+ uint32_t mNbInputFrames;
+ uint32_t mNbOutputFrames;
+ M4READER_DataInterface *m_pReader;
+ M4_AccessUnit* m_pNextAccessUnitToDecode;
+ M4OSA_ERR readerErrCode;
+ int32_t timeStampMs;
+
+} VideoEditorAudioDecoder_Context;
+
+sp<VideoEditorAudioDecoderSource> VideoEditorAudioDecoderSource::Create(
+ const sp<MetaData>& format, void *decoderShellContext) {
+
+ sp<VideoEditorAudioDecoderSource> aSource =
+ new VideoEditorAudioDecoderSource(format, decoderShellContext);
+
+ return aSource;
+}
+
+VideoEditorAudioDecoderSource::VideoEditorAudioDecoderSource(
+ const sp<MetaData>& format, void* decoderShellContext):
+ mFormat(format),
+ mIsEOS(false),
+ mState(CREATED),
+ mDecShellContext(decoderShellContext) {
+}
+
+VideoEditorAudioDecoderSource::~VideoEditorAudioDecoderSource() {
+
+ if( STARTED == mState ) {
+ stop();
+ }
+}
+
+status_t VideoEditorAudioDecoderSource::start(MetaData *meta) {
+ status_t err = OK;
+
+ if( CREATED != mState ) {
+ ALOGV("VideoEditorAudioDecoderSource::start: invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+
+ mState = STARTED;
+
+cleanUp:
+ ALOGV("VideoEditorAudioDecoderSource::start END (0x%x)", err);
+ return err;
+}
+
+status_t VideoEditorAudioDecoderSource::stop() {
+ Mutex::Autolock autolock(mLock);
+ status_t err = OK;
+
+ ALOGV("VideoEditorAudioDecoderSource::stop begin");
+
+ if( STARTED != mState ) {
+ ALOGV("VideoEditorAudioDecoderSource::stop: invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+
+ if (!mBuffers.empty()) {
+ int n = mBuffers.size();
+ for (int i = 0; i < n; i++) {
+ mBuffers.itemAt(i)->release();
+ }
+ ALOGW("VideoEditorAudioDecoderSource::stop : %d buffer remained", n);
+ mBuffers.clear();
+ }
+
+ mState = CREATED;
+
+ ALOGV("VideoEditorAudioDecoderSource::stop END (0x%x)", err);
+ return err;
+}
+
+sp<MetaData> VideoEditorAudioDecoderSource::getFormat() {
+
+ ALOGV("VideoEditorAudioDecoderSource::getFormat");
+ return mFormat;
+}
+
+static MediaBuffer* readBufferFromReader(
+ VideoEditorAudioDecoder_Context* pDecContext) {
+ M4OSA_ERR lerr = M4NO_ERROR;
+ M4_AccessUnit* pAccessUnit = pDecContext->m_pNextAccessUnitToDecode;
+
+ // Get next AU from reader.
+ lerr = pDecContext->m_pReader->m_pFctGetNextAu(
+ pDecContext->m_pReader->m_readerContext,
+ (M4_StreamHandler*)pDecContext->mAudioStreamHandler,
+ pAccessUnit);
+
+ if (lerr == M4WAR_NO_MORE_AU) {
+ ALOGV("readBufferFromReader : EOS");
+ return NULL;
+ }
+
+ pDecContext->timeStampMs = pAccessUnit->m_CTS;
+
+ MediaBuffer* newBuffer = new MediaBuffer((size_t)pAccessUnit->m_size);
+ memcpy((void *)((M4OSA_Int8*)newBuffer->data() + newBuffer->range_offset()),
+ (void *)pAccessUnit->m_dataAddress, pAccessUnit->m_size);
+ newBuffer->meta_data()->setInt64(kKeyTime, (pAccessUnit->m_CTS * 1000LL));
+ return newBuffer;
+}
+
+status_t VideoEditorAudioDecoderSource::read(MediaBuffer **buffer,
+ const ReadOptions *options) {
+ Mutex::Autolock autolock(mLock);
+ MediaSource::ReadOptions readOptions;
+
+ VideoEditorAudioDecoder_Context* pDecContext =
+ (VideoEditorAudioDecoder_Context *)mDecShellContext;
+
+ if ( STARTED != mState ) {
+ ALOGV("VideoEditorAudioDecoderSource::read invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+
+ // Get a buffer from the reader if we don't have any
+ if(mBuffers.empty()) {
+ MediaBuffer* newBuffer = readBufferFromReader(pDecContext);
+ if (!newBuffer) {
+ *buffer = NULL;
+ pDecContext->readerErrCode = M4WAR_NO_MORE_AU;
+ return ERROR_END_OF_STREAM;
+ }
+ mBuffers.push(newBuffer);
+ }
+ *buffer = mBuffers.itemAt(0);
+ mBuffers.removeAt(0);
+
+ return OK;
+}
+
+void VideoEditorAudioDecoderSource::storeBuffer(MediaBuffer *buffer) {
+ Mutex::Autolock autolock(mLock);
+ VideoEditorAudioDecoder_Context* pDecContext =
+ (VideoEditorAudioDecoder_Context *)mDecShellContext;
+
+ ALOGV("VideoEditorAudioDecoderSource::storeBuffer begin");
+
+ // If the user didn't give us a buffer, get it from the reader.
+ if(buffer == NULL) {
+ MediaBuffer* newBuffer = readBufferFromReader(pDecContext);
+ if (!newBuffer) {
+ pDecContext->readerErrCode = M4WAR_NO_MORE_AU;
+ return;
+ }
+ buffer = newBuffer;
+ }
+
+ mBuffers.push(buffer);
+ ALOGV("VideoEditorAudioDecoderSource::storeBuffer END");
+}
+
+/********************
+ * TOOLS *
+ ********************/
+
+M4OSA_ERR VideoEditorAudioDecoder_getBits(M4OSA_Int8* pData,
+ M4OSA_UInt32 dataSize, M4OSA_UInt8 nbBits, M4OSA_Int32* pResult,
+ M4OSA_UInt32* pOffset) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 startByte = 0;
+ M4OSA_UInt32 startBit = 0;
+ M4OSA_UInt32 endByte = 0;
+ M4OSA_UInt32 endBit = 0;
+ M4OSA_UInt32 currentByte = 0;
+ M4OSA_UInt32 result = 0;
+ M4OSA_UInt32 ui32Tmp = 0;
+ M4OSA_UInt32 ui32Mask = 0;
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pData, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pOffset, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(32 >= nbBits, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK((*pOffset + nbBits) <= 8*dataSize, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorAudioDecoder_getBits begin");
+
+ startByte = (*pOffset) >> 3;
+ endByte = (*pOffset + nbBits) >> 3;
+ startBit = (*pOffset) % 8;
+ endBit = (*pOffset + nbBits) % 8;
+ currentByte = startByte;
+
+ // Extract the requested nunber of bits from memory
+ while( currentByte <= endByte) {
+ ui32Mask = 0x000000FF;
+ if( currentByte == startByte ) {
+ ui32Mask >>= startBit;
+ }
+ ui32Tmp = ui32Mask & ((M4OSA_UInt32)pData[currentByte]);
+ if( currentByte == endByte ) {
+ ui32Tmp >>= (8-endBit);
+ result <<= endBit;
+ } else {
+ result <<= 8;
+ }
+ result |= ui32Tmp;
+ currentByte++;
+ }
+
+ *pResult = result;
+ *pOffset += nbBits;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_getBits no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_getBits ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_getBits end");
+ return err;
+}
+
+
+#define FREQ_TABLE_SIZE 16
+const M4OSA_UInt32 AD_AAC_FREQ_TABLE[FREQ_TABLE_SIZE] =
+ {96000, 88200, 64000, 48000, 44100,
+ 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0};
+
+
+M4OSA_ERR VideoEditorAudioDecoder_parse_AAC_DSI(M4OSA_Int8* pDSI,
+ M4OSA_UInt32 dsiSize, AAC_DEC_STREAM_PROPS* pProperties) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 offset = 0;
+ M4OSA_Int32 result = 0;
+
+ ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI begin");
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pProperties, M4ERR_PARAMETER);
+
+ // Get the object type
+ err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 5, &result, &offset);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ switch( result ) {
+ case 2:
+ /* Audio Object Type is 2 (AAC Low Complexity) */
+ pProperties->aPSPresent = 0;
+ pProperties->aSBRPresent = 0;
+ break;
+ case 5:
+ /* Audio Object Type is 5 (Spectral Band Replication) */
+ pProperties->aPSPresent = 0;
+ pProperties->aSBRPresent = 1;
+ break;
+ case 29:
+ /* Audio Object Type is 29 (Parametric Stereo) */
+ pProperties->aPSPresent = 1;
+ pProperties->aSBRPresent = 1;
+ break;
+ default:
+ ALOGV("parse_AAC_DSI ERROR : object type %d is not supported",
+ result);
+ VIDEOEDITOR_CHECK(!"invalid AAC object type", M4ERR_BAD_OPTION_ID);
+ break;
+ }
+ pProperties->aAudioObjectType = (M4OSA_Int32)result;
+
+ // Get the frequency index
+ err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ VIDEOEDITOR_CHECK((0 <= result) && (FREQ_TABLE_SIZE > result),
+ M4ERR_PARAMETER);
+ pProperties->aSampFreq = AD_AAC_FREQ_TABLE[result];
+ pProperties->aExtensionSampFreq = 0;
+
+ // Get the number of channels
+ err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ pProperties->aNumChan = (M4OSA_UInt32)result;
+
+ // Set the max PCM samples per channel
+ pProperties->aMaxPCMSamplesPerCh = (pProperties->aSBRPresent) ? 2048 : 1024;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_parse_AAC_DSI end");
+ return err;
+}
+
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+
+M4OSA_ERR VideoEditorAudioDecoder_destroy(M4AD_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioDecoder_destroy begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+ // Stop the graph
+ if( M4OSA_NULL != pDecoderContext->mDecoder.get() ) {
+ pDecoderContext->mDecoder->stop();
+ }
+
+ // Destroy the graph
+ pDecoderContext->mDecoderSource.clear();
+ pDecoderContext->mDecoder.clear();
+ pDecoderContext->mClient.disconnect();
+
+ SAFE_FREE(pDecoderContext);
+ pContext = M4OSA_NULL;
+ ALOGV("VideoEditorAudioDecoder_destroy : DONE");
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_destroy no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_destroy ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_destroy : end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_create(M4AD_Type decoderType,
+ M4AD_Context* pContext, M4_AudioStreamHandler* pStreamHandler,
+ void* pUserData) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+ AAC_DEC_STREAM_PROPS aacProperties;
+ status_t result = OK;
+ sp<MetaData> decoderMetaData = NULL;
+ const char* mime = NULL;
+ uint32_t codecFlags = 0;
+
+ ALOGV("VideoEditorAudioDecoder_create begin: decoderType %d", decoderType);
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler, M4ERR_PARAMETER);
+
+ // Context allocation & initialization
+ SAFE_MALLOC(pDecoderContext, VideoEditorAudioDecoder_Context, 1,
+ "AudioDecoder");
+ pDecoderContext->mDecoderType = decoderType;
+ pDecoderContext->mAudioStreamHandler = pStreamHandler;
+
+ pDecoderContext->mNbInputFrames = 0;
+ pDecoderContext->mNbOutputFrames = 0;
+ pDecoderContext->readerErrCode = M4NO_ERROR;
+ pDecoderContext->timeStampMs = -1;
+
+ ALOGV("VideoEditorAudioDecoder_create : maxAUSize %d",
+ pDecoderContext->mAudioStreamHandler->m_basicProperties.m_maxAUSize);
+
+ // Create the meta data for the decoder
+ decoderMetaData = new MetaData;
+ switch( pDecoderContext->mDecoderType ) {
+ case M4AD_kTypeAMRNB:
+ // StageFright parameters
+ mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+ // Engine parameters
+ pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
+ // Number of bytes per sample
+ pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
+ pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 8000;
+ pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
+ break;
+
+ case M4AD_kTypeAMRWB:
+ // StageFright parameters
+ mime = MEDIA_MIMETYPE_AUDIO_AMR_WB;
+
+ pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
+ // Number of bytes per sample
+ pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
+ pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 16000;
+ pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
+ break;
+
+ case M4AD_kTypeAAC:
+ // Reject ADTS & ADIF (or any incorrect type)
+ VIDEOEDITOR_CHECK(M4DA_StreamTypeAudioAac ==
+ pDecoderContext->mAudioStreamHandler->\
+ m_basicProperties.m_streamType,M4ERR_PARAMETER);
+
+ // StageFright parameters
+ mime = MEDIA_MIMETYPE_AUDIO_AAC;
+
+ decoderMetaData->setData(kKeyESDS, kTypeESDS,
+ pStreamHandler->m_basicProperties.m_pESDSInfo,
+ pStreamHandler->m_basicProperties.m_ESDSInfoSize);
+
+ // Engine parameters
+ // Retrieve sampling frequency and number of channels from the DSI
+ err = VideoEditorAudioDecoder_parse_AAC_DSI(
+ (M4OSA_Int8*)pStreamHandler->m_basicProperties.\
+ m_pDecoderSpecificInfo,
+ pStreamHandler->m_basicProperties.m_decoderSpecificInfoSize,
+ &aacProperties);
+
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 1024;
+ // Number of bytes per sample
+ pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 2;
+ pDecoderContext->mAudioStreamHandler->m_samplingFrequency =
+ aacProperties.aSampFreq;
+ pDecoderContext->mAudioStreamHandler->m_nbChannels =
+ aacProperties.aNumChan;
+
+ // Copy the stream properties into userdata
+ if( M4OSA_NULL != pUserData ) {
+ memcpy((void *)pUserData,
+ (void *)&aacProperties,
+ sizeof(AAC_DEC_STREAM_PROPS));
+ }
+ break;
+
+ case M4AD_kTypeMP3:
+ // StageFright parameters
+ mime = MEDIA_MIMETYPE_AUDIO_MPEG;
+ break;
+
+ default:
+ VIDEOEDITOR_CHECK(!"AudioDecoder_open : incorrect input format",
+ M4ERR_STATE);
+ break;
+ }
+ decoderMetaData->setCString(kKeyMIMEType, mime);
+ decoderMetaData->setInt32(kKeySampleRate,
+ (int32_t)pDecoderContext->mAudioStreamHandler->m_samplingFrequency);
+ decoderMetaData->setInt32(kKeyChannelCount,
+ pDecoderContext->mAudioStreamHandler->m_nbChannels);
+ decoderMetaData->setInt64(kKeyDuration,
+ (int64_t)pDecoderContext->mAudioStreamHandler->\
+ m_basicProperties.m_duration);
+
+ // Create the decoder source
+ pDecoderContext->mDecoderSource = VideoEditorAudioDecoderSource::Create(
+ decoderMetaData, (void *)pDecoderContext);
+ VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoderSource.get(),
+ M4ERR_STATE);
+
+ // Connect to the OMX client
+ result = pDecoderContext->mClient.connect();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+ codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+
+ pDecoderContext->mDecoder = OMXCodec::Create(pDecoderContext->\
+ mClient.interface(),
+ decoderMetaData, false, pDecoderContext->mDecoderSource, NULL,
+ codecFlags);
+ VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoder.get(), M4ERR_STATE);
+
+ // Get the output channels, the decoder might overwrite the input metadata
+ pDecoderContext->mDecoder->getFormat()->findInt32(kKeyChannelCount,
+ &pDecoderContext->mNbOutputChannels);
+ ALOGV("VideoEditorAudioDecoder_create : output chan %d",
+ pDecoderContext->mNbOutputChannels);
+
+ // Start the decoder
+ result = pDecoderContext->mDecoder->start();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ *pContext = pDecoderContext;
+ ALOGV("VideoEditorAudioDecoder_create : DONE");
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_create no error");
+ } else {
+ VideoEditorAudioDecoder_destroy(pDecoderContext);
+ *pContext = M4OSA_NULL;
+ ALOGV("VideoEditorAudioDecoder_create ERROR 0x%X", err);
+ }
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AAC(M4AD_Context* pContext,
+ M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+ return VideoEditorAudioDecoder_create(
+ M4AD_kTypeAAC, pContext, pStreamHandler,pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AMRNB(M4AD_Context* pContext,
+ M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+ return VideoEditorAudioDecoder_create(
+ M4AD_kTypeAMRNB, pContext, pStreamHandler, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AMRWB(M4AD_Context* pContext,
+ M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+ return VideoEditorAudioDecoder_create(
+ M4AD_kTypeAMRWB, pContext, pStreamHandler, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_MP3(M4AD_Context* pContext,
+ M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+ return VideoEditorAudioDecoder_create(
+ M4AD_kTypeMP3, pContext, pStreamHandler, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_processInputBuffer(
+ M4AD_Context pContext, M4AD_Buffer* pInputBuffer) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+ MediaBuffer* buffer = NULL;
+
+ ALOGV("VideoEditorAudioDecoder_processInputBuffer begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+ if( M4OSA_NULL != pInputBuffer ) {
+ buffer = new MediaBuffer((size_t)pInputBuffer->m_bufferSize);
+ memcpy((void *)((M4OSA_Int8*)buffer->data() + buffer->range_offset()),
+ (void *)pInputBuffer->m_dataAddress, pInputBuffer->m_bufferSize);
+ buffer->meta_data()->setInt64(kKeyTime, pInputBuffer->m_timeStampUs);
+ }
+ pDecoderContext->mDecoderSource->storeBuffer(buffer);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_processInputBuffer no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_processInputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_processInputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_processOutputBuffer(M4AD_Context pContext,
+ MediaBuffer* buffer, M4AD_Buffer* pOuputBuffer) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+ int32_t i32Tmp = 0;
+ int64_t i64Tmp = 0;
+ status_t result = OK;
+
+ ALOGV("VideoEditorAudioDecoder_processOutputBuffer begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != buffer, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pOuputBuffer, M4ERR_PARAMETER);
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+ // Process the returned data
+ if( 0 == buffer->range_length() ) {
+ // Decoder has no data yet, nothing unusual
+ goto cleanUp;
+ }
+
+ pDecoderContext->mNbOutputFrames++;
+
+ if( pDecoderContext->mAudioStreamHandler->m_nbChannels ==
+ (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
+ // Just copy the PCMs
+ pOuputBuffer->m_bufferSize = (M4OSA_UInt32)buffer->range_length();
+ memcpy((void *)pOuputBuffer->m_dataAddress,
+ (void *)(((M4OSA_MemAddr8)buffer->data())+buffer->range_offset()),
+ buffer->range_length());
+ } else if( pDecoderContext->mAudioStreamHandler->m_nbChannels <
+ (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
+ // The decoder forces stereo output, downsample
+ pOuputBuffer->m_bufferSize = (M4OSA_UInt32)(buffer->range_length()/2);
+ M4OSA_Int16* pDataIn = ((M4OSA_Int16*)buffer->data()) +
+ buffer->range_offset();
+ M4OSA_Int16* pDataOut = (M4OSA_Int16*)pOuputBuffer->m_dataAddress;
+ M4OSA_Int16* pDataEnd = pDataIn + \
+ (buffer->range_length()/sizeof(M4OSA_Int16));
+ while( pDataIn < pDataEnd ) {
+ *pDataOut = *pDataIn;
+ pDataIn+=2;
+ pDataOut++;
+ }
+ } else {
+ // The decoder forces mono output, not supported
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+ }
+
+cleanUp:
+ // Release the buffer
+ buffer->release();
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_processOutputBuffer no error");
+ } else {
+ pOuputBuffer->m_bufferSize = 0;
+ ALOGV("VideoEditorAudioDecoder_processOutputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_processOutputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_step(M4AD_Context pContext,
+ M4AD_Buffer* pInputBuffer, M4AD_Buffer* pOutputBuffer,
+ M4OSA_Bool bJump) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+ status_t result = OK;
+ MediaBuffer* outputBuffer = NULL;
+
+ ALOGV("VideoEditorAudioDecoder_step begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+ pDecoderContext->mNbInputFrames++;
+
+ // Push the input buffer to the decoder source
+ err = VideoEditorAudioDecoder_processInputBuffer(pDecoderContext,
+ pInputBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Read
+ result = pDecoderContext->mDecoder->read(&outputBuffer, NULL);
+ if (INFO_FORMAT_CHANGED == result) {
+ ALOGV("VideoEditorAudioDecoder_step: Audio decoder \
+ returned INFO_FORMAT_CHANGED");
+ CHECK(outputBuffer == NULL);
+ sp<MetaData> meta = pDecoderContext->mDecoder->getFormat();
+ int32_t sampleRate, channelCount;
+
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ ALOGV("VideoEditorAudioDecoder_step: samplingFreq = %d", sampleRate);
+ ALOGV("VideoEditorAudioDecoder_step: channelCnt = %d", channelCount);
+ pDecoderContext->mAudioStreamHandler->m_samplingFrequency =
+ (uint32_t)sampleRate;
+ pDecoderContext->mAudioStreamHandler->m_nbChannels =
+ (uint32_t)channelCount;
+ pDecoderContext->mNbOutputChannels = channelCount;
+
+ return M4WAR_INFO_FORMAT_CHANGE;
+ } else if (ERROR_END_OF_STREAM == result) {
+ ALOGV("VideoEditorAudioDecoder_step: Audio decoder \
+ returned ERROR_END_OF_STREAM");
+ pDecoderContext->readerErrCode = M4WAR_NO_MORE_AU;
+ return M4WAR_NO_MORE_AU;
+ } else if (OK != result) {
+ return M4ERR_STATE;
+ }
+
+ // Convert the PCM buffer
+ err = VideoEditorAudioDecoder_processOutputBuffer(pDecoderContext,
+ outputBuffer, pOutputBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_step no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_step ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_step end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getVersion(M4_VersionInfo* pVersionInfo) {
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorAudioDecoder_getVersion begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pVersionInfo, M4ERR_PARAMETER);
+
+ pVersionInfo->m_major = VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR;
+ pVersionInfo->m_minor = VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR;
+ pVersionInfo->m_revision = VIDEOEDITOR_AUDIO_DECODER_VERSION_REV;
+ pVersionInfo->m_structSize = sizeof(M4_VersionInfo);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_getVersion no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_getVersion ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_getVersion end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_setOption(M4AD_Context pContext,
+ M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioDecoder_setOption begin 0x%X", optionID);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+ switch( optionID ) {
+ case M4AD_kOptionID_UserParam:
+ ALOGV("VideoEditorAudioDecodersetOption UserParam is not supported");
+ err = M4ERR_NOT_IMPLEMENTED;
+ break;
+
+ case M4AD_kOptionID_3gpReaderInterface:
+ ALOGV("VideoEditorAudioDecodersetOption 3gpReaderInterface");
+ pDecoderContext->m_pReader =
+ (M4READER_DataInterface *)optionValue;
+ break;
+
+ case M4AD_kOptionID_AudioAU:
+ ALOGV("VideoEditorAudioDecodersetOption AudioAU");
+ pDecoderContext->m_pNextAccessUnitToDecode =
+ (M4_AccessUnit *)optionValue;
+ break;
+
+ default:
+ ALOGV("VideoEditorAudioDecoder_setOption unsupported optionId 0x%X",
+ optionID);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+ break;
+ }
+
+cleanUp:
+ if( ((M4OSA_UInt32)M4NO_ERROR == err) || ((M4OSA_UInt32)M4ERR_NOT_IMPLEMENTED == err) ) {
+ ALOGV("VideoEditorAudioDecoder_setOption error 0x%X", err);
+ } else {
+ ALOGV("VideoEditorAudioDecoder_setOption ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_setOption end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getOption(M4AD_Context pContext,
+ M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioDecoder_getOption begin: optionID 0x%X", optionID);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+ switch( optionID ) {
+
+ case M4AD_kOptionID_GetAudioAUErrCode:
+ *(uint32_t *)optionValue = pDecoderContext->readerErrCode;
+ break;
+
+ case M4AD_kOptionID_AudioNbChannels:
+ *(uint32_t *)optionValue =
+ pDecoderContext->mAudioStreamHandler->m_nbChannels;
+ break;
+
+ case M4AD_kOptionID_AudioSampFrequency:
+ *(uint32_t *)optionValue =
+ pDecoderContext->mAudioStreamHandler->m_samplingFrequency;
+ break;
+
+ case M4AD_kOptionID_AuCTS:
+ *(uint32_t *)optionValue = pDecoderContext->timeStampMs;
+ break;
+
+ default:
+ ALOGV("VideoEditorAudioDecoder_getOption unsupported optionId 0x%X",
+ optionID);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+ break;
+ }
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_getOption no error");
+ } else {
+ ALOGV("VideoEditorAudioDecoder_getOption ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_getOption end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface(M4AD_Type decoderType,
+ M4AD_Type* pDecoderType, M4AD_Interface** pDecoderInterface) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderType, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderInterface, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorAudioDecoder_getInterface begin %d 0x%x 0x%x",
+ decoderType, pDecoderType, pDecoderInterface);
+
+ SAFE_MALLOC(*pDecoderInterface, M4AD_Interface, 1,
+ "VideoEditorAudioDecoder");
+
+ *pDecoderType = decoderType;
+
+ switch( decoderType ) {
+ case M4AD_kTypeAMRNB:
+ (*pDecoderInterface)->m_pFctCreateAudioDec =
+ VideoEditorAudioDecoder_create_AMRNB;
+ break;
+ case M4AD_kTypeAMRWB:
+ (*pDecoderInterface)->m_pFctCreateAudioDec =
+ VideoEditorAudioDecoder_create_AMRWB;
+ break;
+ case M4AD_kTypeAAC:
+ (*pDecoderInterface)->m_pFctCreateAudioDec =
+ VideoEditorAudioDecoder_create_AAC;
+ break;
+ case M4AD_kTypeMP3:
+ (*pDecoderInterface)->m_pFctCreateAudioDec =
+ VideoEditorAudioDecoder_create_MP3;
+ break;
+ default:
+ ALOGV("VEAD_getInterface ERROR: unsupported type %d", decoderType);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+ break;
+ }
+ (*pDecoderInterface)->m_pFctDestroyAudioDec =
+ VideoEditorAudioDecoder_destroy;
+ (*pDecoderInterface)->m_pFctResetAudioDec = M4OSA_NULL;
+ (*pDecoderInterface)->m_pFctStartAudioDec = M4OSA_NULL;
+ (*pDecoderInterface)->m_pFctStepAudioDec =
+ VideoEditorAudioDecoder_step;
+ (*pDecoderInterface)->m_pFctGetVersionAudioDec =
+ VideoEditorAudioDecoder_getVersion;
+ (*pDecoderInterface)->m_pFctSetOptionAudioDec =
+ VideoEditorAudioDecoder_setOption;
+ (*pDecoderInterface)->m_pFctGetOptionAudioDec =
+ VideoEditorAudioDecoder_getOption;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioDecoder_getInterface no error");
+ } else {
+ *pDecoderInterface = M4OSA_NULL;
+ ALOGV("VideoEditorAudioDecoder_getInterface ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioDecoder_getInterface end");
+ return err;
+}
+
+
+extern "C" {
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface) {
+ ALOGV("TEST: AAC VideoEditorAudioDecoder_getInterface no error");
+ return VideoEditorAudioDecoder_getInterface(
+ M4AD_kTypeAAC, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface) {
+ ALOGV("TEST: AMR VideoEditorAudioDecoder_getInterface no error");
+ return VideoEditorAudioDecoder_getInterface(
+ M4AD_kTypeAMRNB, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface) {
+
+ return VideoEditorAudioDecoder_getInterface(
+ M4AD_kTypeAMRWB, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
+ M4AD_Interface** pDecoderInterface) {
+
+ return VideoEditorAudioDecoder_getInterface(
+ M4AD_kTypeMP3, pDecoderType, pDecoderInterface);
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
new file mode 100755
index 0000000..a91f3ee
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
@@ -0,0 +1,755 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorAudioEncoder.cpp
+* @brief StageFright shell Audio Encoder
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_AUDIOENCODER"
+
+#include "M4OSA_Debug.h"
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorUtils.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+
+/*** DEFINITIONS ***/
+// Force using software encoder as engine does not support prefetch
+#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
+
+namespace android {
+struct VideoEditorAudioEncoderSource : public MediaSource {
+ public:
+ static sp<VideoEditorAudioEncoderSource> Create(
+ const sp<MetaData> &format);
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(MediaBuffer **buffer,
+ const ReadOptions *options = NULL);
+ virtual int32_t storeBuffer(MediaBuffer *buffer);
+
+ protected:
+ virtual ~VideoEditorAudioEncoderSource();
+
+ private:
+ struct MediaBufferChain {
+ MediaBuffer* buffer;
+ MediaBufferChain* nextLink;
+ };
+ enum State {
+ CREATED,
+ STARTED,
+ ERROR
+ };
+
+ MediaBufferChain* mFirstBufferLink;
+ MediaBufferChain* mLastBufferLink;
+ int32_t mNbBuffer;
+ State mState;
+ sp<MetaData> mEncFormat;
+
+ VideoEditorAudioEncoderSource(const sp<MetaData> &format);
+
+ // Don't call me.
+ VideoEditorAudioEncoderSource(const VideoEditorAudioEncoderSource&);
+ VideoEditorAudioEncoderSource& operator=(
+ const VideoEditorAudioEncoderSource&);
+};
+
+sp<VideoEditorAudioEncoderSource> VideoEditorAudioEncoderSource::Create(
+ const sp<MetaData> &format) {
+
+ ALOGV("VideoEditorAudioEncoderSource::Create");
+ sp<VideoEditorAudioEncoderSource> aSource =
+ new VideoEditorAudioEncoderSource(format);
+
+ return aSource;
+}
+
+VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource(
+ const sp<MetaData> &format):
+ mFirstBufferLink(NULL),
+ mLastBufferLink(NULL),
+ mNbBuffer(0),
+ mState(CREATED),
+ mEncFormat(format) {
+ ALOGV("VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource");
+}
+
+
+VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource() {
+ ALOGV("VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource");
+
+ if( STARTED == mState ) {
+ stop();
+ }
+}
+
+status_t VideoEditorAudioEncoderSource::start(MetaData *meta) {
+ status_t err = OK;
+
+ ALOGV("VideoEditorAudioEncoderSource::start");
+
+ if( CREATED != mState ) {
+ ALOGV("VideoEditorAudioEncoderSource::start ERROR : invalid state %d",
+ mState);
+ return UNKNOWN_ERROR;
+ }
+
+ mState = STARTED;
+
+cleanUp:
+ ALOGV("VideoEditorAudioEncoderSource::start END (0x%x)", err);
+ return err;
+}
+
+status_t VideoEditorAudioEncoderSource::stop() {
+ status_t err = OK;
+
+ ALOGV("VideoEditorAudioEncoderSource::stop");
+
+ if( STARTED != mState ) {
+ ALOGV("VideoEditorAudioEncoderSource::stop ERROR: invalid state %d",
+ mState);
+ return UNKNOWN_ERROR;
+ }
+
+ int32_t i = 0;
+ MediaBufferChain* tmpLink = NULL;
+ while( mFirstBufferLink ) {
+ i++;
+ tmpLink = mFirstBufferLink;
+ mFirstBufferLink = mFirstBufferLink->nextLink;
+ delete tmpLink;
+ }
+ ALOGV("VideoEditorAudioEncoderSource::stop : %d buffer remained", i);
+ mFirstBufferLink = NULL;
+ mLastBufferLink = NULL;
+
+ mState = CREATED;
+
+ ALOGV("VideoEditorAudioEncoderSource::stop END (0x%x)", err);
+ return err;
+}
+
+sp<MetaData> VideoEditorAudioEncoderSource::getFormat() {
+ ALOGV("VideoEditorAudioEncoderSource::getFormat");
+ return mEncFormat;
+}
+
+status_t VideoEditorAudioEncoderSource::read(MediaBuffer **buffer,
+ const ReadOptions *options) {
+ MediaSource::ReadOptions readOptions;
+ status_t err = OK;
+ MediaBufferChain* tmpLink = NULL;
+
+ ALOGV("VideoEditorAudioEncoderSource::read");
+
+ if ( STARTED != mState ) {
+ ALOGV("VideoEditorAudioEncoderSource::read ERROR : invalid state %d",
+ mState);
+ return UNKNOWN_ERROR;
+ }
+
+ if( NULL == mFirstBufferLink ) {
+ *buffer = NULL;
+ ALOGV("VideoEditorAudioEncoderSource::read : EOS");
+ return ERROR_END_OF_STREAM;
+ }
+ *buffer = mFirstBufferLink->buffer;
+
+ tmpLink = mFirstBufferLink;
+ mFirstBufferLink = mFirstBufferLink->nextLink;
+ if( NULL == mFirstBufferLink ) {
+ mLastBufferLink = NULL;
+ }
+ delete tmpLink;
+ mNbBuffer--;
+
+ ALOGV("VideoEditorAudioEncoderSource::read END (0x%x)", err);
+ return err;
+}
+
+int32_t VideoEditorAudioEncoderSource::storeBuffer(MediaBuffer *buffer) {
+ status_t err = OK;
+
+ ALOGV("VideoEditorAudioEncoderSource::storeBuffer");
+
+ MediaBufferChain* newLink = new MediaBufferChain;
+ newLink->buffer = buffer;
+ newLink->nextLink = NULL;
+ if( NULL != mLastBufferLink ) {
+ mLastBufferLink->nextLink = newLink;
+ } else {
+ mFirstBufferLink = newLink;
+ }
+ mLastBufferLink = newLink;
+ mNbBuffer++;
+
+ ALOGV("VideoEditorAudioEncoderSource::storeBuffer END");
+ return mNbBuffer;
+}
+
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+/**
+ ******************************************************************************
+ * structure VideoEditorAudioEncoder_Context
+ * @brief This structure defines the context of the StageFright audio
+ * encoder shell
+ ******************************************************************************
+*/
+typedef struct {
+ M4ENCODER_AudioFormat mFormat;
+ M4ENCODER_AudioParams* mCodecParams;
+ M4ENCODER_AudioDecSpecificInfo mDSI;
+ sp<VideoEditorAudioEncoderSource> mEncoderSource;
+ OMXClient mClient;
+ sp<MediaSource> mEncoder;
+ uint32_t mNbInputFrames;
+ uint32_t mNbOutputFrames;
+ int64_t mFirstOutputCts;
+ int64_t mLastOutputCts;
+} VideoEditorAudioEncoder_Context;
+
+M4OSA_ERR VideoEditorAudioEncoder_cleanup(M4OSA_Context pContext) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioEncoder_cleanup begin");
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+ SAFE_FREE(pEncoderContext->mDSI.pInfo);
+ SAFE_FREE(pEncoderContext);
+ pContext = M4OSA_NULL;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_cleanup no error");
+ } else {
+ ALOGV("VideoEditorAudioEncoder_cleanup ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_cleanup end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init(M4ENCODER_AudioFormat format,
+ M4OSA_Context* pContext, M4OSA_Void* pUserData) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV(" VideoEditorAudioEncoder_init begin: format %d", format);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ SAFE_MALLOC(pEncoderContext, VideoEditorAudioEncoder_Context, 1,
+ "VideoEditorAudioEncoder");
+ pEncoderContext->mFormat = format;
+
+ *pContext = pEncoderContext;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_init no error");
+ } else {
+ VideoEditorAudioEncoder_cleanup(pEncoderContext);
+ *pContext = M4OSA_NULL;
+ ALOGV("VideoEditorAudioEncoder_init ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_init end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_AAC(M4OSA_Context* pContext,
+ M4OSA_Void* pUserData) {
+ return VideoEditorAudioEncoder_init(M4ENCODER_kAAC, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_AMRNB(M4OSA_Context* pContext,
+ M4OSA_Void* pUserData) {
+ return VideoEditorAudioEncoder_init(M4ENCODER_kAMRNB, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_MP3(M4OSA_Context* pContext,
+ M4OSA_Void* pUserData) {
+ return VideoEditorAudioEncoder_init(M4ENCODER_kMP3, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_close(M4OSA_Context pContext) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioEncoder_close begin");
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+ SAFE_FREE(pEncoderContext->mCodecParams);
+
+ pEncoderContext->mEncoder->stop();
+ pEncoderContext->mEncoder.clear();
+ pEncoderContext->mClient.disconnect();
+ pEncoderContext->mEncoderSource.clear();
+
+ ALOGV("AudioEncoder_close:IN %d frames,OUT %d frames from %lld to %lld",
+ pEncoderContext->mNbInputFrames,
+ pEncoderContext->mNbOutputFrames, pEncoderContext->mFirstOutputCts,
+ pEncoderContext->mLastOutputCts);
+
+ if( pEncoderContext->mNbInputFrames != pEncoderContext->mNbInputFrames ) {
+ ALOGV("VideoEditorAudioEncoder_close:some frames were not encoded %d %d",
+ pEncoderContext->mNbInputFrames, pEncoderContext->mNbInputFrames);
+ }
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_close no error");
+ } else {
+ ALOGV("VideoEditorAudioEncoder_close ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_close begin end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_open(M4OSA_Context pContext,
+ M4ENCODER_AudioParams *pParams, M4ENCODER_AudioDecSpecificInfo *pDSI,
+ M4OSA_Context pGrabberContext) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+ status_t result = OK;
+ sp<MetaData> encoderMetadata = NULL;
+ const char* mime = NULL;
+ int32_t iNbChannel = 0;
+ uint32_t codecFlags = 0;
+
+ ALOGV("VideoEditorAudioEncoder_open begin");
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pParams, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+ pDSI->pInfo = M4OSA_NULL;
+ pDSI->infoSize = 0;
+
+ pEncoderContext->mNbInputFrames = 0;
+ pEncoderContext->mNbOutputFrames = 0;
+ pEncoderContext->mFirstOutputCts = -1;
+ pEncoderContext->mLastOutputCts = -1;
+
+ // Allocate & initialize the encoding parameters
+ ALOGV("VideoEditorAudioEncoder_open : params F=%d CN=%d BR=%d F=%d",
+ pParams->Frequency, pParams->ChannelNum, pParams->Bitrate,
+ pParams->Format);
+ SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_AudioParams, 1,
+ "VIDEOEDITOR CodecParams");
+ pEncoderContext->mCodecParams->Frequency = pParams->Frequency;
+ pEncoderContext->mCodecParams->ChannelNum = pParams->ChannelNum;
+ pEncoderContext->mCodecParams->Bitrate = pParams->Bitrate;
+ pEncoderContext->mCodecParams->Format = pParams->Format;
+
+ // Check output format consistency
+ VIDEOEDITOR_CHECK(pEncoderContext->mCodecParams->Format ==
+ pEncoderContext->mFormat, M4ERR_PARAMETER);
+
+ /**
+ * StageFright graph building
+ */
+ // Create the meta data for the encoder
+ encoderMetadata = new MetaData;
+ switch( pEncoderContext->mCodecParams->Format ) {
+ case M4ENCODER_kAAC:
+ {
+ mime = MEDIA_MIMETYPE_AUDIO_AAC;
+ break;
+ }
+ case M4ENCODER_kAMRNB:
+ {
+ mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+ break;
+ }
+ default:
+ {
+ VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect input format",
+ M4ERR_PARAMETER);
+ break;
+ }
+ }
+ encoderMetadata->setCString(kKeyMIMEType, mime);
+ encoderMetadata->setInt32(kKeySampleRate,
+ (int32_t)pEncoderContext->mCodecParams->Frequency);
+ encoderMetadata->setInt32(kKeyBitRate,
+ (int32_t)pEncoderContext->mCodecParams->Bitrate);
+
+ switch( pEncoderContext->mCodecParams->ChannelNum ) {
+ case M4ENCODER_kMono:
+ {
+ iNbChannel = 1;
+ break;
+ }
+ case M4ENCODER_kStereo:
+ {
+ iNbChannel = 2;
+ break;
+ }
+ default:
+ {
+ VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect channel number",
+ M4ERR_STATE);
+ break;
+ }
+ }
+ encoderMetadata->setInt32(kKeyChannelCount, iNbChannel);
+
+ // Create the encoder source
+ pEncoderContext->mEncoderSource = VideoEditorAudioEncoderSource::Create(
+ encoderMetadata);
+ VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoderSource.get(),
+ M4ERR_STATE);
+
+ // Connect to the OMX client
+ result = pEncoderContext->mClient.connect();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+ codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+ // FIXME:
+ // We are moving away to use software AACEncoder and instead use OMX-based
+ // software AAC audio encoder. We want to use AACEncoder for now. After we
+ // fix the interface issue with the OMX-based AAC audio encoder, we should
+ // then set the component name back to NULL to allow the system to pick up
+ // the right AAC audio encoder.
+ pEncoderContext->mEncoder = OMXCodec::Create(
+ pEncoderContext->mClient.interface(), encoderMetadata, true,
+ pEncoderContext->mEncoderSource, "AACEncoder" /* component name */,
+ codecFlags);
+ VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
+
+ // Start the graph
+ result = pEncoderContext->mEncoder->start();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Get AAC DSI, this code can only work with software encoder
+ if( M4ENCODER_kAAC == pEncoderContext->mCodecParams->Format ) {
+ int32_t isCodecConfig = 0;
+ MediaBuffer* buffer = NULL;
+
+ // Read once to get the DSI
+ result = pEncoderContext->mEncoder->read(&buffer, NULL);
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+ VIDEOEDITOR_CHECK(buffer->meta_data()->findInt32(kKeyIsCodecConfig,
+ &isCodecConfig) && isCodecConfig, M4ERR_STATE);
+
+ // Save the DSI
+ pEncoderContext->mDSI.infoSize = (M4OSA_UInt32)buffer->range_length();
+ SAFE_MALLOC(pEncoderContext->mDSI.pInfo, M4OSA_Int8,
+ pEncoderContext->mDSI.infoSize, "Encoder header");
+
+ memcpy((void *)pEncoderContext->mDSI.pInfo,
+ (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->range_offset()),
+ pEncoderContext->mDSI.infoSize);
+
+ buffer->release();
+ *pDSI = pEncoderContext->mDSI;
+ }
+ ALOGV("VideoEditorAudioEncoder_open : DONE");
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_open no error");
+ } else {
+ VideoEditorAudioEncoder_close(pEncoderContext);
+ ALOGV("VideoEditorAudioEncoder_open ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_open end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_processInputBuffer(M4OSA_Context pContext,
+ M4ENCODER_AudioBuffer* pInBuffer) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+ M4OSA_Int8* pData = M4OSA_NULL;
+ MediaBuffer* buffer = NULL;
+ int32_t nbBuffer = 0;
+
+ ALOGV("VideoEditorAudioEncoder_processInputBuffer begin");
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+ switch( pEncoderContext->mCodecParams->ChannelNum ) {
+ case M4ENCODER_kMono:
+ case M4ENCODER_kStereo:
+ // Let the MediaBuffer own the data so we don't have to free it
+ buffer = new MediaBuffer((size_t)pInBuffer->pTableBufferSize[0]);
+ pData = (M4OSA_Int8*)buffer->data() + buffer->range_offset();
+ memcpy((void *)pData, (void *)pInBuffer->pTableBuffer[0],
+ pInBuffer->pTableBufferSize[0]);
+ break;
+ default:
+ ALOGV("VEAE_processInputBuffer unsupported channel configuration %d",
+ pEncoderContext->mCodecParams->ChannelNum);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+ break;
+ }
+
+ ALOGV("VideoEditorAudioEncoder_processInputBuffer : store %d bytes",
+ buffer->range_length());
+ // Push the buffer to the source
+ nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_processInputBuffer no error");
+ } else {
+ if( NULL != buffer ) {
+ buffer->release();
+ }
+ ALOGV("VideoEditorAudioEncoder_processInputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_processInputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_processOutputBuffer(M4OSA_Context pContext,
+ MediaBuffer* buffer, M4ENCODER_AudioBuffer* pOutBuffer) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+ M4OSA_UInt32 Cts = 0;
+ int32_t i32Tmp = 0;
+ int64_t i64Tmp = 0;
+ status_t result = OK;
+
+ ALOGV("VideoEditorAudioEncoder_processOutputBuffer begin");
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != buffer, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+ // Process the returned AU
+ if( 0 == buffer->range_length() ) {
+ // Encoder has no data yet, nothing unusual
+ ALOGV("VideoEditorAudioEncoder_processOutputBuffer : buffer is empty");
+ pOutBuffer->pTableBufferSize[0] = 0;
+ goto cleanUp;
+ }
+ if( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ) {
+ /* This should not happen with software encoder,
+ * DSI was retrieved beforehand */
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_STATE);
+ } else {
+ // Check the CTS
+ VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
+ M4ERR_STATE);
+ Cts = (M4OSA_Int32)(i64Tmp/1000);
+
+ pEncoderContext->mNbOutputFrames++;
+ if( 0 > pEncoderContext->mFirstOutputCts ) {
+ pEncoderContext->mFirstOutputCts = i64Tmp;
+ }
+ pEncoderContext->mLastOutputCts = i64Tmp;
+
+ // Format the AU
+ memcpy((void *)pOutBuffer->pTableBuffer[0],
+ (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->range_offset()),
+ buffer->range_length());
+ pOutBuffer->pTableBufferSize[0] = (M4OSA_UInt32)buffer->range_length();
+ }
+
+cleanUp:
+ // Release the buffer
+ buffer->release();
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_processOutputBuffer no error");
+ } else {
+ ALOGV("VideoEditorAudioEncoder_processOutputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_processOutputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_step(M4OSA_Context pContext,
+ M4ENCODER_AudioBuffer* pInBuffer, M4ENCODER_AudioBuffer* pOutBuffer) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+ status_t result = OK;
+ MediaBuffer* buffer = NULL;
+
+ ALOGV("VideoEditorAudioEncoder_step begin");
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pInBuffer, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+ pEncoderContext->mNbInputFrames++;
+
+ // Push the input buffer to the encoder source
+ err = VideoEditorAudioEncoder_processInputBuffer(pEncoderContext,pInBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Read
+ result = pEncoderContext->mEncoder->read(&buffer, NULL);
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Provide the encoded AU to the writer
+ err = VideoEditorAudioEncoder_processOutputBuffer(pEncoderContext, buffer,
+ pOutBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_step no error");
+ } else {
+ ALOGV("VideoEditorAudioEncoder_step ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_step end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getOption(M4OSA_Context pContext,
+ M4OSA_OptionID optionID, M4OSA_DataOption* optionValue) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorAudioEncoder_getOption begin optionID 0x%X", optionID);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+ switch( optionID ) {
+ default:
+ ALOGV("VideoEditorAudioEncoder_getOption: unsupported optionId 0x%X",
+ optionID);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+ break;
+ }
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_getOption no error");
+ } else {
+ ALOGV("VideoEditorAudioEncoder_getOption ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorAudioEncoder_getOption end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface(
+ M4ENCODER_AudioFormat format, M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorAudioEncoder_getInterface 0x%x 0x%x",pFormat,
+ pEncoderInterface);
+ SAFE_MALLOC(*pEncoderInterface, M4ENCODER_AudioGlobalInterface, 1,
+ "AudioEncoder");
+
+ *pFormat = format;
+
+ switch( format ) {
+ case M4ENCODER_kAAC:
+ {
+ (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AAC;
+ break;
+ }
+ case M4ENCODER_kAMRNB:
+ {
+ (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AMRNB;
+ break;
+ }
+ case M4ENCODER_kMP3:
+ {
+ (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_MP3;
+ break;
+ }
+ default:
+ {
+ ALOGV("VideoEditorAudioEncoder_getInterface: unsupported format %d",
+ format);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+ break;
+ }
+ }
+ (*pEncoderInterface)->pFctCleanUp = VideoEditorAudioEncoder_cleanup;
+ (*pEncoderInterface)->pFctOpen = VideoEditorAudioEncoder_open;
+ (*pEncoderInterface)->pFctClose = VideoEditorAudioEncoder_close;
+ (*pEncoderInterface)->pFctStep = VideoEditorAudioEncoder_step;
+ (*pEncoderInterface)->pFctGetOption = VideoEditorAudioEncoder_getOption;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorAudioEncoder_getInterface no error");
+ } else {
+ *pEncoderInterface = M4OSA_NULL;
+ ALOGV("VideoEditorAudioEncoder_getInterface ERROR 0x%X", err);
+ }
+ return err;
+}
+extern "C" {
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+ return VideoEditorAudioEncoder_getInterface(
+ M4ENCODER_kAAC, pFormat, pEncoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+
+ return VideoEditorAudioEncoder_getInterface(
+ M4ENCODER_kAMRNB, pFormat, pEncoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
+ M4ENCODER_AudioFormat* pFormat,
+ M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+ ALOGV("VideoEditorAudioEncoder_getInterface_MP3 no error");
+
+ return VideoEditorAudioEncoder_getInterface(
+ M4ENCODER_kMP3, pFormat, pEncoderInterface);
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c b/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
new file mode 100755
index 0000000..98919d2
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorBuffer.c
+* @brief StageFright shell Buffer
+*************************************************************************
+*/
+#undef M4OSA_TRACE_LEVEL
+#define M4OSA_TRACE_LEVEL 1
+
+#include "VideoEditorBuffer.h"
+#include "utils/Log.h"
+
+#define VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE 40
+
+#define VIDEOEDITOR_SAFE_FREE(p) \
+{ \
+ if(M4OSA_NULL != p) \
+ { \
+ free(p); \
+ p = M4OSA_NULL; \
+ } \
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ * M4OSA_UInt32 nbBuffers)
+ * @brief Allocate a pool of nbBuffers buffers
+ *
+ * @param ppool : IN The buffer pool to create
+ * @param nbBuffers : IN The number of buffers in the pool
+ * @param poolName : IN a name given to the pool
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName)
+{
+ M4OSA_ERR lerr = M4NO_ERROR;
+ VIDEOEDITOR_BUFFER_Pool* pool;
+
+ ALOGV("VIDEOEDITOR_BUFFER_allocatePool : ppool = 0x%x nbBuffers = %d ",
+ ppool, nbBuffers);
+
+ pool = M4OSA_NULL;
+ pool = (VIDEOEDITOR_BUFFER_Pool*)M4OSA_32bitAlignedMalloc(
+ sizeof(VIDEOEDITOR_BUFFER_Pool), VIDEOEDITOR_BUFFER_EXTERNAL,
+ (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: pool"));
+ if (M4OSA_NULL == pool)
+ {
+ lerr = M4ERR_ALLOC;
+ goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+ }
+
+ ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool buffers");
+ pool->pNXPBuffer = M4OSA_NULL;
+ pool->pNXPBuffer = (VIDEOEDITOR_BUFFER_Buffer*)M4OSA_32bitAlignedMalloc(
+ sizeof(VIDEOEDITOR_BUFFER_Buffer)*nbBuffers,
+ VIDEOEDITOR_BUFFER_EXTERNAL,
+ (M4OSA_Char*)("BUFFER_allocatePool: pNXPBuffer"));
+ if(M4OSA_NULL == pool->pNXPBuffer)
+ {
+ lerr = M4ERR_ALLOC;
+ goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+ }
+
+ ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool name buffer");
+ pool->poolName = M4OSA_NULL;
+ pool->poolName = (M4OSA_Char*)M4OSA_32bitAlignedMalloc(
+ VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE,VIDEOEDITOR_BUFFER_EXTERNAL,
+ (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: poolname"));
+ if(pool->poolName == M4OSA_NULL)
+ {
+ lerr = M4ERR_ALLOC;
+ goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+ }
+
+ ALOGV("VIDEOEDITOR_BUFFER_allocatePool : Assigning Pool name buffer");
+
+ memset((void *)pool->poolName, 0,VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE);
+ memcpy((void *)pool->poolName, (void *)poolName,
+ VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE-1);
+
+ pool->NB = nbBuffers;
+
+VIDEOEDITOR_BUFFER_allocatePool_Cleanup:
+ if(M4NO_ERROR != lerr)
+ {
+ VIDEOEDITOR_SAFE_FREE(pool->pNXPBuffer);
+ VIDEOEDITOR_SAFE_FREE(pool->poolName);
+ VIDEOEDITOR_SAFE_FREE(pool);
+ }
+ *ppool = pool;
+ ALOGV("VIDEOEDITOR_BUFFER_allocatePool END");
+
+ return lerr;
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
+ * @brief Deallocate a buffer pool
+ *
+ * @param ppool : IN The buffer pool to free
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
+{
+ M4OSA_ERR err;
+ M4OSA_UInt32 j = 0;
+
+ ALOGV("VIDEOEDITOR_BUFFER_freePool : ppool = 0x%x", ppool);
+
+ err = M4NO_ERROR;
+
+ for (j = 0; j < ppool->NB; j++)
+ {
+ if(M4OSA_NULL != ppool->pNXPBuffer[j].pData)
+ {
+ free(ppool->pNXPBuffer[j].pData);
+ ppool->pNXPBuffer[j].pData = M4OSA_NULL;
+ }
+ }
+
+ if(ppool != M4OSA_NULL)
+ {
+ SAFE_FREE(ppool->pNXPBuffer);
+ SAFE_FREE(ppool->poolName);
+ SAFE_FREE(ppool);
+ }
+
+ return(err);
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ * VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+ * @brief Returns a buffer in a given state
+ *
+ * @param ppool : IN The buffer pool
+ * @param desiredState : IN The buffer state
+ * @param pNXPBuffer : IN The selected buffer
+ * @return Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ VIDEOEDITOR_BUFFER_State desiredState,
+ VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_Bool bFound = M4OSA_FALSE;
+ M4OSA_UInt32 i, ibuf;
+
+ ALOGV("VIDEOEDITOR_BUFFER_getBuffer from %s in state=%d",
+ ppool->poolName, desiredState);
+
+ ibuf = 0;
+
+ for (i=0; i < ppool->NB; i++)
+ {
+ bFound = (ppool->pNXPBuffer[i].state == desiredState);
+ if (bFound)
+ {
+ ibuf = i;
+ break;
+ }
+ }
+
+ if(!bFound)
+ {
+ ALOGV("VIDEOEDITOR_BUFFER_getBuffer No buffer available in state %d",
+ desiredState);
+ *pNXPBuffer = M4OSA_NULL;
+ return M4ERR_NO_BUFFER_AVAILABLE;
+ }
+
+ /* case where a buffer has been found */
+ *pNXPBuffer = &(ppool->pNXPBuffer[ibuf]);
+
+ ALOGV("VIDEOEDITOR_BUFFER_getBuffer: idx = %d", ibuf);
+
+ return(err);
+}
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* pool,
+ M4OSA_UInt32 lSize)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 index, j;
+
+ /**
+ * Initialize all the buffers in the pool */
+ for(index = 0; index < pool->NB; index++)
+ {
+ pool->pNXPBuffer[index].pData = M4OSA_NULL;
+ pool->pNXPBuffer[index].pData = (M4OSA_Void*)M4OSA_32bitAlignedMalloc(
+ lSize, VIDEOEDITOR_BUFFER_EXTERNAL,
+ (M4OSA_Char*)("BUFFER_initPoolBuffers: Buffer data"));
+ if(M4OSA_NULL == pool->pNXPBuffer[index].pData)
+ {
+ for (j = 0; j < index; j++)
+ {
+ if(M4OSA_NULL != pool->pNXPBuffer[j].pData)
+ {
+ free(pool->pNXPBuffer[j].pData);
+ pool->pNXPBuffer[j].pData = M4OSA_NULL;
+ }
+ }
+ err = M4ERR_ALLOC;
+ return err;
+ }
+ pool->pNXPBuffer[index].size = 0;
+ pool->pNXPBuffer[index].state = VIDEOEDITOR_BUFFER_kEmpty;
+ pool->pNXPBuffer[index].idx = index;
+ pool->pNXPBuffer[index].buffCTS = -1;
+ }
+ return err;
+}
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
+ VIDEOEDITOR_BUFFER_State desiredState,
+ VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+{
+ M4OSA_ERR err = M4NO_ERROR;
+ M4OSA_UInt32 index, j;
+ M4_MediaTime candidateTimeStamp = (M4_MediaTime)0x7ffffff;
+ M4OSA_Bool bFound = M4OSA_FALSE;
+
+ *pNXPBuffer = M4OSA_NULL;
+ for(index = 0; index< pool->NB; index++)
+ {
+ if(pool->pNXPBuffer[index].state == desiredState)
+ {
+ if(pool->pNXPBuffer[index].buffCTS <= candidateTimeStamp)
+ {
+ bFound = M4OSA_TRUE;
+ candidateTimeStamp = pool->pNXPBuffer[index].buffCTS;
+ *pNXPBuffer = &(pool->pNXPBuffer[index]);
+ }
+ }
+ }
+ if(M4OSA_FALSE == bFound)
+ {
+ ALOGV("VIDEOEDITOR_BUFFER_getOldestBuffer WARNING no buffer available");
+ err = M4ERR_NO_BUFFER_AVAILABLE;
+ }
+ return err;
+}
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
new file mode 100755
index 0000000..af53c54
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
@@ -0,0 +1,803 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorMp3Reader.cpp
+* @brief StageFright shell MP3 Reader
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_MP3READER"
+
+/**
+ * HEADERS
+ *
+ */
+#include "M4OSA_Debug.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorMp3Reader.h"
+#include "VideoEditorUtils.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ * SOURCE CLASS
+ */
+
+namespace android {
+/**
+ * ENGINE INTERFACE
+ */
+
+/**
+ **************************************************************************
+ * structure VideoEditorMp3Reader_Context
+ * @brief This structure defines the context of the SF MP3 reader shell.
+ **************************************************************************
+ */
+typedef struct {
+ sp<DataSource> mDataSource;
+ sp<MediaExtractor> mExtractor;
+ sp<MediaSource> mMediaSource;
+ M4_AudioStreamHandler* mAudioStreamHandler;
+ M4SYS_AccessUnit mAudioAu;
+ M4OSA_Time mMaxDuration;
+ M4OSA_UInt8 mStreamNumber;
+ M4OSA_Bool mSeeking;
+ M4OSA_Time mSeekTime;
+ uint32_t mExtractorFlags;
+} VideoEditorMp3Reader_Context;
+
+/**
+ ****************************************************************************
+ * @brief create an instance of the MP3 reader
+ * @note allocates the context
+ *
+ * @param pContext: (OUT) pointer on a reader context
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_ALLOC a memory allocation has failed
+ * @return M4ERR_PARAMETER at least one parameter is not valid
+ ****************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_create(M4OSA_Context *pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorMp3Reader_Context *pReaderContext = M4OSA_NULL;
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorMp3Reader_create begin");
+
+ /* Context allocation & initialization */
+ SAFE_MALLOC(pReaderContext, VideoEditorMp3Reader_Context, 1,
+ "VideoEditorMp3Reader");
+
+ pReaderContext->mAudioStreamHandler = M4OSA_NULL;
+ pReaderContext->mAudioAu.dataAddress = M4OSA_NULL;
+ pReaderContext->mMaxDuration = 0;
+ *pContext = pReaderContext;
+
+cleanUp:
+ if (M4NO_ERROR == err) {
+ ALOGV("VideoEditorMp3Reader_create no error");
+ } else {
+ ALOGV("VideoEditorMp3Reader_create ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorMp3Reader_create end");
+ return err;
+}
+
+/**
+ *******************************************************************************
+ * @brief destroy the instance of the MP3 reader
+ * @note after this call the context is invalid
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER The input parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_destroy(M4OSA_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)pContext;
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderContext, M4ERR_PARAMETER);
+ ALOGV("VideoEditorMp3Reader_destroy begin");
+
+ SAFE_FREE(pReaderContext);
+cleanUp:
+ if (M4NO_ERROR == err) {
+ ALOGV("VideoEditorMp3Reader_destroy no error");
+ } else {
+ ALOGV("VideoEditorMp3Reader_destroy ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorMp3Reader_destroy end");
+ return err;
+}
+/**
+ ******************************************************************************
+ * @brief open the reader and initializes its created instance
+ * @note this function opens the MP3 file
+ * @param context: (IN) Context of the reader
+ * @param pFileDescriptor: (IN) Pointer to proprietary data identifying
+ * the media to open
+
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ * @return M4ERR_BAD_CONTEXT provided context is not a valid one
+ * @return M4ERR_UNSUPPORTED_MEDIA_TYPE the media is DRM protected
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_open(M4OSA_Context context,
+ M4OSA_Void* pFileDescriptor){
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorMp3Reader_open begin");
+ /* Check function parameters*/
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_open: invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_open: invalid pointer pFileDescriptor");
+
+ ALOGV("VideoEditorMp3Reader_open Datasource start %s",
+ (char*)pFileDescriptor);
+ pReaderContext->mDataSource = new FileSource ((char*)pFileDescriptor);
+ ALOGV("VideoEditorMp3Reader_open Datasource end");
+
+ if (pReaderContext->mDataSource == NULL) {
+ ALOGV("VideoEditorMp3Reader_open Datasource error");
+ return UNKNOWN_ERROR;
+ }
+
+ ALOGV("VideoEditorMp3Reader_open extractor start");
+ pReaderContext->mExtractor = MediaExtractor::Create(
+ pReaderContext->mDataSource,MEDIA_MIMETYPE_AUDIO_MPEG);
+ ALOGV("VideoEditorMp3Reader_open extractor end");
+
+ if (pReaderContext->mExtractor == NULL) {
+ ALOGV("VideoEditorMp3Reader_open extractor error");
+ return UNKNOWN_ERROR;
+ }
+ pReaderContext->mStreamNumber = 0;
+
+ int32_t isDRMProtected = 0;
+ sp<MetaData> meta = pReaderContext->mExtractor->getMetaData();
+ meta->findInt32(kKeyIsDRM, &isDRMProtected);
+ if (isDRMProtected) {
+ ALOGV("VideoEditorMp3Reader_open error - DRM Protected");
+ return M4ERR_UNSUPPORTED_MEDIA_TYPE;
+ }
+
+ ALOGV("VideoEditorMp3Reader_open end");
+ return err;
+}
+/**
+ **************************************************************************
+ * @brief close the reader
+ * @note this function closes the MP3 reader
+ * @param context: (IN) Context of the reader
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER the context is NULL
+ **************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_close(M4OSA_Context context) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorMp3Reader_close begin");
+ /* Check function parameters */
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_close: invalid context pointer");
+
+ if (pReaderContext->mAudioStreamHandler != NULL) {
+ if (M4OSA_NULL != pReaderContext->mAudioStreamHandler->\
+ m_basicProperties.m_pDecoderSpecificInfo) {
+ free(pReaderContext->mAudioStreamHandler->\
+ m_basicProperties.m_pDecoderSpecificInfo);
+ pReaderContext->mAudioStreamHandler->m_basicProperties.\
+ m_decoderSpecificInfoSize = 0;
+ pReaderContext->mAudioStreamHandler->m_basicProperties.\
+ m_pDecoderSpecificInfo = M4OSA_NULL;
+ }
+
+ /* Finally destroy the stream handler */
+ free(pReaderContext->mAudioStreamHandler);
+ pReaderContext->mAudioStreamHandler = M4OSA_NULL;
+
+ if (pReaderContext->mAudioAu.dataAddress != NULL) {
+ free(pReaderContext->mAudioAu.dataAddress);
+ pReaderContext->mAudioAu.dataAddress = NULL;
+ }
+ }
+
+ pReaderContext->mMediaSource->stop();
+ pReaderContext->mMediaSource.clear();
+ pReaderContext->mExtractor.clear();
+ pReaderContext->mDataSource.clear();
+
+ ALOGV("VideoEditorMp3Reader_close end ");
+ return err;
+}
+/**
+ ******************************************************************************
+ * @brief get an option value from the reader
+ * @note
+ * it allows the caller to retrieve a property value:
+ *
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) indicates the option to get
+ * @param pValue: (OUT) pointer to structure or value (allocated
+ * by user) where option is stored
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @return M4ERR_BAD_OPTION_ID when the option ID is not a valid one
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorMp3Reader_getOption begin: optionId= %d ",(int)optionId);
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+ "invalid value pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+ "invalid value pointer");
+
+ switch(optionId) {
+ case M4READER_kOptionID_Duration:
+ {
+ ALOGV("Mp3Reader duration=%ld",pReaderContext->mMaxDuration);
+ *(M4OSA_Time*)pValue = pReaderContext->mMaxDuration;
+ }
+ break;
+
+ case M4READER_kOptionID_Bitrate:
+ {
+ M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+ if (M4OSA_NULL != pReaderContext->mAudioStreamHandler) {
+ *pBitrate = pReaderContext->mAudioStreamHandler->\
+ m_basicProperties.m_averageBitRate;
+ } else {
+ pBitrate = 0;
+ err = M4ERR_PARAMETER;
+ }
+ }
+ break;
+
+ case M4READER_kOptionID_Mp3Id3v1Tag:
+ break;
+
+ case M4READER_kOptionID_Mp3Id3v2Tag:
+ break;
+
+ case M4READER_kOptionID_GetMetadata:
+ break;
+
+ default :
+ {
+ ALOGV("VideoEditorMp3Reader_getOption: M4ERR_BAD_OPTION_ID");
+ err = M4ERR_BAD_OPTION_ID;
+ }
+ }
+ ALOGV("VideoEditorMp3Reader_getOption end ");
+ return err;
+}
+/**
+ ******************************************************************************
+ * @brief set an option value of the reader
+ * @note
+ * it allows the caller to set a property value:
+ *
+ * @param context: (IN) Context of the reader
+ * @param optionId: (IN) Identifier indicating the option to set
+ * @param pValue: (IN) Pointer to structure or value (allocated
+ * by user) where option is stored
+ *
+ * @return M4NO_ERROR There is no error
+ * @return M4ERR_BAD_OPTION_ID The option ID is not a valid one
+ * @return M4ERR_STATE State automaton is not applied
+ * @return M4ERR_PARAMETER The option parameter is invalid
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_setOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorMp3Reader_Context begin: optionId: %d Value: %d ",
+ (int)optionId,(int)pValue);
+
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+ "invalid context pointer");
+ M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+ "invalid value pointer");
+
+ switch(optionId) {
+ case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
+ default :
+ {
+ err = M4NO_ERROR;
+ }
+ }
+ ALOGV("VideoEditorMp3Reader_Context end ");
+ return err;
+}
+/**
+ ******************************************************************************
+ * @brief jump into the stream at the specified time
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler(IN) stream description of the stream to make jump
+ * @param pTime (I/O)IN:the time to jump to (in ms)
+ * OUT: the time to which the stream really jumped
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_jump(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4SYS_StreamID streamIdArray[2];
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = (M4OSA_Time)*pTime;
+
+ ALOGV("VideoEditorMp3Reader_jump begin");
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_jump: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_jump: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_jump: invalid time pointer");
+
+ if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+ mAudioStreamHandler){
+ pAu = &pReaderContext->mAudioAu;
+ } else {
+ ALOGV("VideoEditorMp3Reader_jump: passed StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+
+ ALOGV("VideoEditorMp3Reader_jump time ms %ld ", time64);
+
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ time64 = time64 * 1000; /* Convert the time into micro sec */
+ ALOGV("VideoEditorMp3Reader_jump time us %ld ", time64);
+
+ pReaderContext->mSeeking = M4OSA_TRUE;
+ pReaderContext->mSeekTime = time64;
+
+ time64 = time64 / 1000; /* Convert the time into milli sec */
+ *pTime = (M4OSA_Int32)time64;
+ ALOGV("VideoEditorMp3Reader_jump end ");
+ return err;
+}
+/**
+ *******************************************************************************
+ * @brief Get the next stream found in the media file
+ *
+ * @param context: (IN) Context of the reader
+ * @param pMediaFamily: (OUT) pointer to a user allocated
+ * M4READER_MediaFamily that will be filled with
+ * the media family of the found stream
+ * @param pStreamHandler: (OUT) pointer to a stream handler that will be
+ * allocated and filled with stream description
+ *
+ * @return M4NO_ERROR there is no error
+ * @return M4WAR_NO_MORE_STREAM no more available stream in the media
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getNextStream(M4OSA_Context context,
+ M4READER_MediaFamily *pMediaFamily,
+ M4_StreamHandler **pStreamHandlerParam) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_StreamID streamIdArray[2];
+ M4SYS_StreamDescription streamDesc;
+ M4_AudioStreamHandler* pAudioStreamHandler;
+ M4_StreamHandler* pStreamHandler;
+ M4OSA_UInt8 type, temp;
+ M4OSA_Bool haveAudio = M4OSA_FALSE;
+ sp<MetaData> meta = NULL;
+ int64_t Duration;
+
+ ALOGV("VideoEditorMp3Reader_getNextStream begin");
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextStream: invalid context");
+ M4OSA_DEBUG_IF1((pMediaFamily == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextStream: invalid pointer to MediaFamily");
+ M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextStream: invalid pointer to StreamHandler");
+
+ ALOGV("VideoEditorMp3Reader_getNextStream stream number = %d",
+ pReaderContext->mStreamNumber);
+ if (pReaderContext->mStreamNumber >= 1) {
+ ALOGV("VideoEditorMp3Reader_getNextStream max number of stream reached");
+ return M4WAR_NO_MORE_STREAM;
+ }
+ pReaderContext->mStreamNumber = pReaderContext->mStreamNumber + 1;
+ ALOGV("VideoEditorMp3Reader_getNextStream number of Tracks%d",
+ pReaderContext->mExtractor->countTracks());
+ for (temp = 0; temp < pReaderContext->mExtractor->countTracks(); temp++) {
+ meta = pReaderContext->mExtractor->getTrackMetaData(temp);
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
+ pReaderContext->mMediaSource =
+ pReaderContext->mExtractor->getTrack(temp);
+ pReaderContext->mMediaSource->start();
+ haveAudio = true;
+ }
+
+ if (haveAudio) {
+ break;
+ }
+ }
+
+ if (!haveAudio) {
+ ALOGV("VideoEditorMp3Reader_getNextStream no more stream ");
+ pReaderContext->mDataSource.clear();
+ return M4WAR_NO_MORE_STREAM;
+ }
+
+ pReaderContext->mExtractorFlags = pReaderContext->mExtractor->flags();
+ *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+ streamDesc.duration = meta->findInt64(kKeyDuration, &Duration);
+ streamDesc.duration = (M4OSA_Time)Duration/1000;
+
+ meta->findInt32(kKeyBitRate, (int32_t*)&streamDesc.averageBitrate);
+ meta->findInt32(kKeySampleRate, (int32_t*)&streamDesc.timeScale);
+ ALOGV("Bitrate = %d, SampleRate = %d duration = %lld",
+ streamDesc.averageBitrate,streamDesc.timeScale,Duration/1000);
+
+ streamDesc.streamType = M4SYS_kMP3;
+ streamDesc.profileLevel = 0xFF ;
+ streamDesc.streamID = pReaderContext->mStreamNumber;
+ streamDesc.decoderSpecificInfo = M4OSA_NULL;
+ streamDesc.decoderSpecificInfoSize = 0;
+ streamDesc.maxBitrate = streamDesc.averageBitrate;
+
+ /* Allocate the audio stream handler and set its parameters */
+ pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4_AudioStreamHandler), M4READER_MP3,
+ (M4OSA_Char*)"M4_AudioStreamHandler");
+
+ if (pAudioStreamHandler == M4OSA_NULL) {
+ ALOGV("VideoEditorMp3Reader_getNextStream malloc failed");
+ pReaderContext->mMediaSource->stop();
+ pReaderContext->mMediaSource.clear();
+ pReaderContext->mDataSource.clear();
+
+ return M4ERR_ALLOC;
+ }
+ pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
+ *pStreamHandlerParam = pStreamHandler;
+ pReaderContext->mAudioStreamHandler = pAudioStreamHandler;
+
+ pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+
+ if (meta == NULL) {
+ ALOGV("VideoEditorMp3Reader_getNextStream meta is NULL");
+ }
+
+ pAudioStreamHandler->m_samplingFrequency = streamDesc.timeScale;
+ pStreamHandler->m_pDecoderSpecificInfo =
+ (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+ pStreamHandler->m_decoderSpecificInfoSize =
+ streamDesc.decoderSpecificInfoSize;
+
+ meta->findInt32(kKeyChannelCount,
+ (int32_t*)&pAudioStreamHandler->m_nbChannels);
+ pAudioStreamHandler->m_byteFrameLength = 1152;
+ pAudioStreamHandler->m_byteSampleSize = 2;
+
+ pStreamHandler->m_pUserData = NULL;
+ pStreamHandler->m_streamId = streamDesc.streamID;
+ pStreamHandler->m_duration = streamDesc.duration;
+ pReaderContext->mMaxDuration = streamDesc.duration;
+ pStreamHandler->m_averageBitRate = streamDesc.averageBitrate;
+
+ pStreamHandler->m_maxAUSize = 0;
+ pStreamHandler->m_streamType = M4DA_StreamTypeAudioMp3;
+
+ ALOGV("VideoEditorMp3Reader_getNextStream end ");
+ return err;
+}
+
+/**
+ *******************************************************************************
+ * @brief fill the access unit structure with initialization values
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler: (IN) pointer to the stream handler to which
+ * the access unit will be associated
+ * @param pAccessUnit: (IN/OUT) pointer to the access unit (allocated by
+ * the caller) to initialize
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_fillAuStruct(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4SYS_AccessUnit *pAu;
+
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_fillAuStruct: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_fillAuStruct invalid pointer to StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+ ALOGV("VideoEditorMp3Reader_fillAuStruct start ");
+ if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+ mAudioStreamHandler){
+ pAu = &pReaderContext->mAudioAu;
+ } else {
+ ALOGV("VideoEditorMp3Reader_fillAuStruct StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+
+ /* Initialize pAu structure */
+ pAu->dataAddress = M4OSA_NULL;
+ pAu->size = 0;
+ pAu->CTS = 0;
+ pAu->DTS = 0;
+ pAu->attribute = 0;
+ pAu->nbFrag = 0;
+
+ /* Initialize pAccessUnit structure */
+ pAccessUnit->m_size = 0;
+ pAccessUnit->m_CTS = 0;
+ pAccessUnit->m_DTS = 0;
+ pAccessUnit->m_attribute = 0;
+ pAccessUnit->m_dataAddress = M4OSA_NULL;
+ pAccessUnit->m_maxsize = pStreamHandler->m_maxAUSize;
+ pAccessUnit->m_streamID = pStreamHandler->m_streamId;
+ pAccessUnit->m_structSize = sizeof(M4_AccessUnit);
+
+ ALOGV("VideoEditorMp3Reader_fillAuStruct end");
+ return M4NO_ERROR;
+}
+
+/**
+ *******************************************************************************
+ * @brief reset the stream, i.e seek it to the beginning
+ * @note
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to reset
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_reset(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_StreamID streamIdArray[2];
+ M4SYS_AccessUnit* pAu;
+ M4OSA_Time time64 = 0;
+
+ ALOGV("VideoEditorMp3Reader_reset start");
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_reset: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_reset: invalid pointer to M4_StreamHandler");
+
+ if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+ mAudioStreamHandler) {
+ pAu = &pReaderContext->mAudioAu;
+ } else {
+ ALOGV("VideoEditorMp3Reader_reset StreamHandler is not known");
+ return M4ERR_PARAMETER;
+ }
+ streamIdArray[0] = pStreamHandler->m_streamId;
+ streamIdArray[1] = 0;
+ pAu->CTS = time64;
+ pAu->DTS = time64;
+
+ pReaderContext->mSeeking = M4OSA_TRUE;
+ pReaderContext->mSeekTime = time64;
+
+ ALOGV("VideoEditorMp3Reader_reset end");
+ return err;
+}
+/**
+ *******************************************************************************
+ * @brief Gets an access unit (AU) from the stream handler source.
+ * @note AU is the smallest possible amount of data to be decoded by decoder
+ *
+ * @param context: (IN) Context of the reader
+ * @param pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param pAccessUnit (I/O)Pointer to an access unit to fill with read data
+ * @return M4NO_ERROR there is no error
+ * @return M4ERR_PARAMETER at least one parameter is not properly set
+ * @returns M4ERR_ALLOC memory allocation failed
+ * @returns M4WAR_NO_MORE_AU there are no more access unit in the stream
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getNextAu(M4OSA_Context context,
+ M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+ VideoEditorMp3Reader_Context *pReaderContext =
+ (VideoEditorMp3Reader_Context*)context;
+ M4OSA_ERR err = M4NO_ERROR;
+ M4SYS_AccessUnit* pAu;
+ MediaBuffer *mAudioBuffer;
+ MediaSource::ReadOptions options;
+
+ ALOGV("VideoEditorMp3Reader_getNextAu start");
+ M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextAu: invalid context");
+ M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_StreamHandler");
+ M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER,
+ "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_AccessUnit");
+
+ if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+ mAudioStreamHandler) {
+ pAu = &pReaderContext->mAudioAu;
+ } else {
+ ALOGV("VideoEditorMp3Reader_getNextAu: StreamHandler is not known\n");
+ return M4ERR_PARAMETER;
+ }
+
+ if (pReaderContext->mSeeking) {
+ options.setSeekTo(pReaderContext->mSeekTime);
+ }
+
+ pReaderContext->mMediaSource->read(&mAudioBuffer, &options);
+
+ if (mAudioBuffer != NULL) {
+ if ((pAu->dataAddress == NULL) ||
+ (pAu->size < mAudioBuffer->range_length())) {
+ if (pAu->dataAddress != NULL) {
+ free((M4OSA_Int32*)pAu->dataAddress);
+ pAu->dataAddress = NULL;
+ }
+ pAu->dataAddress = (M4OSA_Int32*)M4OSA_32bitAlignedMalloc(
+ (mAudioBuffer->range_length() + 3) & ~0x3,
+ M4READER_MP3, (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
+
+ if (pAu->dataAddress == NULL) {
+ ALOGV("VideoEditorMp3Reader_getNextAu malloc failed");
+ pReaderContext->mMediaSource->stop();
+ pReaderContext->mMediaSource.clear();
+ pReaderContext->mDataSource.clear();
+
+ return M4ERR_ALLOC;
+ }
+ }
+ pAu->size = mAudioBuffer->range_length();
+ memcpy((M4OSA_MemAddr8)pAu->dataAddress,
+ (const char *)mAudioBuffer->data() + mAudioBuffer->range_offset(),
+ mAudioBuffer->range_length());
+
+ mAudioBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&pAu->CTS);
+
+
+ pAu->CTS = pAu->CTS / 1000; /*converting the microsec to millisec */
+ pAu->DTS = pAu->CTS;
+ pAu->attribute = M4SYS_kFragAttrOk;
+ mAudioBuffer->release();
+
+ ALOGV("VideoEditorMp3Reader_getNextAu AU CTS = %ld",pAu->CTS);
+
+ pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
+ pAccessUnit->m_size = pAu->size;
+ pAccessUnit->m_CTS = pAu->CTS;
+ pAccessUnit->m_DTS = pAu->DTS;
+ pAccessUnit->m_attribute = pAu->attribute;
+ } else {
+ ALOGV("VideoEditorMp3Reader_getNextAu EOS reached.");
+ pAccessUnit->m_size=0;
+ err = M4WAR_NO_MORE_AU;
+ }
+ pAu->nbFrag = 0;
+
+ options.clearSeekTo();
+ pReaderContext->mSeeking = M4OSA_FALSE;
+ mAudioBuffer = NULL;
+ ALOGV("VideoEditorMp3Reader_getNextAu end");
+
+ return err;
+}
+
+extern "C" {
+
+M4OSA_ERR VideoEditorMp3Reader_getInterface(
+ M4READER_MediaType *pMediaType,
+ M4READER_GlobalInterface **pRdrGlobalInterface,
+ M4READER_DataInterface **pRdrDataInterface) {
+ M4OSA_ERR err = M4NO_ERROR;
+
+ ALOGV("VideoEditorMp3Reader_getInterface: begin");
+ /* Input parameters check */
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
+
+ SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
+ "VideoEditorMp3Reader_getInterface");
+ SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
+ "VideoEditorMp3Reader_getInterface");
+
+ *pMediaType = M4READER_kMediaTypeMP3;
+
+ (*pRdrGlobalInterface)->m_pFctCreate = VideoEditorMp3Reader_create;
+ (*pRdrGlobalInterface)->m_pFctDestroy = VideoEditorMp3Reader_destroy;
+ (*pRdrGlobalInterface)->m_pFctOpen = VideoEditorMp3Reader_open;
+ (*pRdrGlobalInterface)->m_pFctClose = VideoEditorMp3Reader_close;
+ (*pRdrGlobalInterface)->m_pFctGetOption = VideoEditorMp3Reader_getOption;
+ (*pRdrGlobalInterface)->m_pFctSetOption = VideoEditorMp3Reader_setOption;
+ (*pRdrGlobalInterface)->m_pFctGetNextStream =
+ VideoEditorMp3Reader_getNextStream;
+ (*pRdrGlobalInterface)->m_pFctFillAuStruct =
+ VideoEditorMp3Reader_fillAuStruct;
+ (*pRdrGlobalInterface)->m_pFctStart = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctStop = M4OSA_NULL;
+ (*pRdrGlobalInterface)->m_pFctJump = VideoEditorMp3Reader_jump;
+ (*pRdrGlobalInterface)->m_pFctReset = VideoEditorMp3Reader_reset;
+ (*pRdrGlobalInterface)->m_pFctGetPrevRapTime = M4OSA_NULL;
+
+ (*pRdrDataInterface)->m_pFctGetNextAu = VideoEditorMp3Reader_getNextAu;
+ (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+cleanUp:
+ if( M4NO_ERROR == err )
+ {
+ ALOGV("VideoEditorMp3Reader_getInterface no error");
+ }
+ else
+ {
+ SAFE_FREE(*pRdrGlobalInterface);
+ SAFE_FREE(*pRdrDataInterface);
+
+ ALOGV("VideoEditorMp3Reader_getInterface ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorMp3Reader_getInterface: end");
+ return err;
+}
+} /* extern "C" */
+} /* namespace android */
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
new file mode 100755
index 0000000..5309bd4
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+* @file VideoEditorUtils.cpp
+* @brief StageFright shell Utilities
+*************************************************************************
+*/
+#define LOG_NDEBUG 0
+#define LOG_TAG "SF_utils"
+#include "utils/Log.h"
+
+#include "VideoEditorUtils.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+
+/* Android includes*/
+#include <utils/Log.h>
+#include <memory.h>
+
+/*---------------------*/
+/* DEBUG LEVEL SETUP */
+/*---------------------*/
+#define LOG1 ALOGE /*ERRORS Logging*/
+#define LOG2 ALOGI /*WARNING Logging*/
+#define LOG3 //ALOGV /*COMMENTS Logging*/
+
+namespace android {
+
+void displayMetaData(const sp<MetaData> meta) {
+
+ const char* charData;
+ int32_t int32Data;
+ int64_t int64Data;
+ uint32_t type;
+ const void* data;
+ void* ptr;
+ size_t size;
+
+ if (meta->findCString(kKeyMIMEType, &charData)) {
+ LOG1("displayMetaData kKeyMIMEType %s", charData);
+ }
+ if (meta->findInt32(kKeyWidth, &int32Data)) {
+ LOG1("displayMetaData kKeyWidth %d", int32Data);
+ }
+ if (meta->findInt32(kKeyHeight, &int32Data)) {
+ LOG1("displayMetaData kKeyHeight %d", int32Data);
+ }
+ if (meta->findInt32(kKeyIFramesInterval, &int32Data)) {
+ LOG1("displayMetaData kKeyIFramesInterval %d", int32Data);
+ }
+ if (meta->findInt32(kKeyStride, &int32Data)) {
+ LOG1("displayMetaData kKeyStride %d", int32Data);
+ }
+ if (meta->findInt32(kKeySliceHeight, &int32Data)) {
+ LOG1("displayMetaData kKeySliceHeight %d", int32Data);
+ }
+ if (meta->findInt32(kKeyChannelCount, &int32Data)) {
+ LOG1("displayMetaData kKeyChannelCount %d", int32Data);
+ }
+ if (meta->findInt32(kKeySampleRate, &int32Data)) {
+ LOG1("displayMetaData kKeySampleRate %d", int32Data);
+ }
+ if (meta->findInt32(kKeyBitRate, &int32Data)) {
+ LOG1("displayMetaData kKeyBitRate %d", int32Data);
+ }
+ if (meta->findData(kKeyESDS, &type, &data, &size)) {
+ LOG1("displayMetaData kKeyESDS type=%d size=%d", type, size);
+ }
+ if (meta->findData(kKeyAVCC, &type, &data, &size)) {
+ LOG1("displayMetaData kKeyAVCC data=0x%X type=%d size=%d",
+ *((unsigned int*)data), type, size);
+ }
+ if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
+ LOG1("displayMetaData kKeyVorbisInfo type=%d size=%d", type, size);
+ }
+ if (meta->findData(kKeyVorbisBooks, &type, &data, &size)) {
+ LOG1("displayMetaData kKeyVorbisBooks type=%d size=%d", type, size);
+ }
+ if (meta->findInt32(kKeyWantsNALFragments, &int32Data)) {
+ LOG1("displayMetaData kKeyWantsNALFragments %d", int32Data);
+ }
+ if (meta->findInt32(kKeyIsSyncFrame, &int32Data)) {
+ LOG1("displayMetaData kKeyIsSyncFrame %d", int32Data);
+ }
+ if (meta->findInt32(kKeyIsCodecConfig, &int32Data)) {
+ LOG1("displayMetaData kKeyIsCodecConfig %d", int32Data);
+ }
+ if (meta->findInt64(kKeyTime, &int64Data)) {
+ LOG1("displayMetaData kKeyTime %lld", int64Data);
+ }
+ if (meta->findInt32(kKeyDuration, &int32Data)) {
+ LOG1("displayMetaData kKeyDuration %d", int32Data);
+ }
+ if (meta->findInt32(kKeyColorFormat, &int32Data)) {
+ LOG1("displayMetaData kKeyColorFormat %d", int32Data);
+ }
+ if (meta->findPointer(kKeyPlatformPrivate, &ptr)) {
+ LOG1("displayMetaData kKeyPlatformPrivate pointer=0x%x", (int32_t) ptr);
+ }
+ if (meta->findCString(kKeyDecoderComponent, &charData)) {
+ LOG1("displayMetaData kKeyDecoderComponent %s", charData);
+ }
+ if (meta->findInt32(kKeyBufferID, &int32Data)) {
+ LOG1("displayMetaData kKeyBufferID %d", int32Data);
+ }
+ if (meta->findInt32(kKeyMaxInputSize, &int32Data)) {
+ LOG1("displayMetaData kKeyMaxInputSize %d", int32Data);
+ }
+ if (meta->findInt64(kKeyThumbnailTime, &int64Data)) {
+ LOG1("displayMetaData kKeyThumbnailTime %lld", int64Data);
+ }
+ if (meta->findCString(kKeyAlbum, &charData)) {
+ LOG1("displayMetaData kKeyAlbum %s", charData);
+ }
+ if (meta->findCString(kKeyArtist, &charData)) {
+ LOG1("displayMetaData kKeyArtist %s", charData);
+ }
+ if (meta->findCString(kKeyAlbumArtist, &charData)) {
+ LOG1("displayMetaData kKeyAlbumArtist %s", charData);
+ }
+ if (meta->findCString(kKeyComposer, &charData)) {
+ LOG1("displayMetaData kKeyComposer %s", charData);
+ }
+ if (meta->findCString(kKeyGenre, &charData)) {
+ LOG1("displayMetaData kKeyGenre %s", charData);
+ }
+ if (meta->findCString(kKeyTitle, &charData)) {
+ LOG1("displayMetaData kKeyTitle %s", charData);
+ }
+ if (meta->findCString(kKeyYear, &charData)) {
+ LOG1("displayMetaData kKeyYear %s", charData);
+ }
+ if (meta->findData(kKeyAlbumArt, &type, &data, &size)) {
+ LOG1("displayMetaData kKeyAlbumArt type=%d size=%d", type, size);
+ }
+ if (meta->findCString(kKeyAlbumArtMIME, &charData)) {
+ LOG1("displayMetaData kKeyAlbumArtMIME %s", charData);
+ }
+ if (meta->findCString(kKeyAuthor, &charData)) {
+ LOG1("displayMetaData kKeyAuthor %s", charData);
+ }
+ if (meta->findCString(kKeyCDTrackNumber, &charData)) {
+ LOG1("displayMetaData kKeyCDTrackNumber %s", charData);
+ }
+ if (meta->findCString(kKeyDiscNumber, &charData)) {
+ LOG1("displayMetaData kKeyDiscNumber %s", charData);
+ }
+ if (meta->findCString(kKeyDate, &charData)) {
+ LOG1("displayMetaData kKeyDate %s", charData);
+ }
+ if (meta->findCString(kKeyWriter, &charData)) {
+ LOG1("displayMetaData kKeyWriter %s", charData);
+ }
+ if (meta->findInt32(kKeyTimeScale, &int32Data)) {
+ LOG1("displayMetaData kKeyTimeScale %d", int32Data);
+ }
+ if (meta->findInt32(kKeyVideoProfile, &int32Data)) {
+ LOG1("displayMetaData kKeyVideoProfile %d", int32Data);
+ }
+ if (meta->findInt32(kKeyVideoLevel, &int32Data)) {
+ LOG1("displayMetaData kKeyVideoLevel %d", int32Data);
+ }
+ if (meta->findInt32(kKey64BitFileOffset, &int32Data)) {
+ LOG1("displayMetaData kKey64BitFileOffset %d", int32Data);
+ }
+ if (meta->findInt32(kKeyFileType, &int32Data)) {
+ LOG1("displayMetaData kKeyFileType %d", int32Data);
+ }
+ if (meta->findInt64(kKeyTrackTimeStatus, &int64Data)) {
+ LOG1("displayMetaData kKeyTrackTimeStatus %lld", int64Data);
+ }
+ if (meta->findInt32(kKeyNotRealTime, &int32Data)) {
+ LOG1("displayMetaData kKeyNotRealTime %d", int32Data);
+ }
+}
+
+/**
+ * This code was extracted from StageFright MPEG4 writer
+ * Is is used to parse and format the AVC codec specific info received
+ * from StageFright encoders
+ */
+static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
+static const uint8_t kNalUnitTypePicParamSet = 0x08;
+struct AVCParamSet {
+ AVCParamSet(uint16_t length, const uint8_t *data)
+ : mLength(length), mData(data) {}
+
+ uint16_t mLength;
+ const uint8_t *mData;
+};
+struct AVCCodecSpecificContext {
+ List<AVCParamSet> mSeqParamSets;
+ List<AVCParamSet> mPicParamSets;
+ uint8_t mProfileIdc;
+ uint8_t mProfileCompatible;
+ uint8_t mLevelIdc;
+};
+
+const uint8_t *parseParamSet(AVCCodecSpecificContext* pC,
+ const uint8_t *data, size_t length, int type, size_t *paramSetLen) {
+ CHECK(type == kNalUnitTypeSeqParamSet ||
+ type == kNalUnitTypePicParamSet);
+
+ size_t bytesLeft = length;
+ while (bytesLeft > 4 &&
+ memcmp("\x00\x00\x00\x01", &data[length - bytesLeft], 4)) {
+ --bytesLeft;
+ }
+ if (bytesLeft <= 4) {
+ bytesLeft = 0; // Last parameter set
+ }
+ const uint8_t *nextStartCode = &data[length - bytesLeft];
+ *paramSetLen = nextStartCode - data;
+ if (*paramSetLen == 0) {
+ ALOGE("Param set is malformed, since its length is 0");
+ return NULL;
+ }
+
+ AVCParamSet paramSet(*paramSetLen, data);
+ if (type == kNalUnitTypeSeqParamSet) {
+ if (*paramSetLen < 4) {
+ ALOGE("Seq parameter set malformed");
+ return NULL;
+ }
+ if (pC->mSeqParamSets.empty()) {
+ pC->mProfileIdc = data[1];
+ pC->mProfileCompatible = data[2];
+ pC->mLevelIdc = data[3];
+ } else {
+ if (pC->mProfileIdc != data[1] ||
+ pC->mProfileCompatible != data[2] ||
+ pC->mLevelIdc != data[3]) {
+ ALOGV("Inconsistent profile/level found in seq parameter sets");
+ return NULL;
+ }
+ }
+ pC->mSeqParamSets.push_back(paramSet);
+ } else {
+ pC->mPicParamSets.push_back(paramSet);
+ }
+ return nextStartCode;
+}
+
+status_t buildAVCCodecSpecificData(uint8_t **pOutputData, size_t *pOutputSize,
+ const uint8_t *data, size_t size, MetaData *param)
+{
+ //ALOGV("buildAVCCodecSpecificData");
+
+ if ( (pOutputData == NULL) || (pOutputSize == NULL) ) {
+ ALOGE("output is invalid");
+ return ERROR_MALFORMED;
+ }
+
+ if (*pOutputData != NULL) {
+ ALOGE("Already have codec specific data");
+ return ERROR_MALFORMED;
+ }
+
+ if (size < 4) {
+ ALOGE("Codec specific data length too short: %d", size);
+ return ERROR_MALFORMED;
+ }
+
+ // Data is in the form of AVCCodecSpecificData
+ if (memcmp("\x00\x00\x00\x01", data, 4)) {
+ // 2 bytes for each of the parameter set length field
+ // plus the 7 bytes for the header
+ if (size < 4 + 7) {
+ ALOGE("Codec specific data length too short: %d", size);
+ return ERROR_MALFORMED;
+ }
+
+ *pOutputSize = size;
+ *pOutputData = (uint8_t*)malloc(size);
+ memcpy(*pOutputData, data, size);
+ return OK;
+ }
+
+ AVCCodecSpecificContext ctx;
+ uint8_t *outputData = NULL;
+ size_t outputSize = 0;
+
+ // Check if the data is valid
+ uint8_t type = kNalUnitTypeSeqParamSet;
+ bool gotSps = false;
+ bool gotPps = false;
+ const uint8_t *tmp = data;
+ const uint8_t *nextStartCode = data;
+ size_t bytesLeft = size;
+ size_t paramSetLen = 0;
+ outputSize = 0;
+ while (bytesLeft > 4 && !memcmp("\x00\x00\x00\x01", tmp, 4)) {
+ type = (*(tmp + 4)) & 0x1F;
+ if (type == kNalUnitTypeSeqParamSet) {
+ if (gotPps) {
+ ALOGE("SPS must come before PPS");
+ return ERROR_MALFORMED;
+ }
+ if (!gotSps) {
+ gotSps = true;
+ }
+ nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
+ &paramSetLen);
+ } else if (type == kNalUnitTypePicParamSet) {
+ if (!gotSps) {
+ ALOGE("SPS must come before PPS");
+ return ERROR_MALFORMED;
+ }
+ if (!gotPps) {
+ gotPps = true;
+ }
+ nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
+ &paramSetLen);
+ } else {
+ ALOGE("Only SPS and PPS Nal units are expected");
+ return ERROR_MALFORMED;
+ }
+
+ if (nextStartCode == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ // Move on to find the next parameter set
+ bytesLeft -= nextStartCode - tmp;
+ tmp = nextStartCode;
+ outputSize += (2 + paramSetLen);
+ }
+
+ {
+ // Check on the number of seq parameter sets
+ size_t nSeqParamSets = ctx.mSeqParamSets.size();
+ if (nSeqParamSets == 0) {
+ ALOGE("Cound not find sequence parameter set");
+ return ERROR_MALFORMED;
+ }
+
+ if (nSeqParamSets > 0x1F) {
+ ALOGE("Too many seq parameter sets (%d) found", nSeqParamSets);
+ return ERROR_MALFORMED;
+ }
+ }
+
+ {
+ // Check on the number of pic parameter sets
+ size_t nPicParamSets = ctx.mPicParamSets.size();
+ if (nPicParamSets == 0) {
+ ALOGE("Cound not find picture parameter set");
+ return ERROR_MALFORMED;
+ }
+ if (nPicParamSets > 0xFF) {
+ ALOGE("Too many pic parameter sets (%d) found", nPicParamSets);
+ return ERROR_MALFORMED;
+ }
+ }
+
+ // ISO 14496-15: AVC file format
+ outputSize += 7; // 7 more bytes in the header
+ outputData = (uint8_t *)malloc(outputSize);
+ uint8_t *header = outputData;
+ header[0] = 1; // version
+ header[1] = ctx.mProfileIdc; // profile indication
+ header[2] = ctx.mProfileCompatible; // profile compatibility
+ header[3] = ctx.mLevelIdc;
+
+ // 6-bit '111111' followed by 2-bit to lengthSizeMinuusOne
+ int32_t use2ByteNalLength = 0;
+ if (param &&
+ param->findInt32(kKey2ByteNalLength, &use2ByteNalLength) &&
+ use2ByteNalLength) {
+ header[4] = 0xfc | 1; // length size == 2 bytes
+ } else {
+ header[4] = 0xfc | 3; // length size == 4 bytes
+ }
+
+ // 3-bit '111' followed by 5-bit numSequenceParameterSets
+ int nSequenceParamSets = ctx.mSeqParamSets.size();
+ header[5] = 0xe0 | nSequenceParamSets;
+ header += 6;
+ for (List<AVCParamSet>::iterator it = ctx.mSeqParamSets.begin();
+ it != ctx.mSeqParamSets.end(); ++it) {
+ // 16-bit sequence parameter set length
+ uint16_t seqParamSetLength = it->mLength;
+ header[0] = seqParamSetLength >> 8;
+ header[1] = seqParamSetLength & 0xff;
+ //ALOGE("### SPS %d %d %d", seqParamSetLength, header[0], header[1]);
+
+ // SPS NAL unit (sequence parameter length bytes)
+ memcpy(&header[2], it->mData, seqParamSetLength);
+ header += (2 + seqParamSetLength);
+ }
+
+ // 8-bit nPictureParameterSets
+ int nPictureParamSets = ctx.mPicParamSets.size();
+ header[0] = nPictureParamSets;
+ header += 1;
+ for (List<AVCParamSet>::iterator it = ctx.mPicParamSets.begin();
+ it != ctx.mPicParamSets.end(); ++it) {
+ // 16-bit picture parameter set length
+ uint16_t picParamSetLength = it->mLength;
+ header[0] = picParamSetLength >> 8;
+ header[1] = picParamSetLength & 0xff;
+//ALOGE("### PPS %d %d %d", picParamSetLength, header[0], header[1]);
+
+ // PPS Nal unit (picture parameter set length bytes)
+ memcpy(&header[2], it->mData, picParamSetLength);
+ header += (2 + picParamSetLength);
+ }
+
+ *pOutputSize = outputSize;
+ *pOutputData = outputData;
+ return OK;
+}
+}// namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
new file mode 100755
index 0000000..aa26252
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
@@ -0,0 +1,1764 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorVideoDecoder.cpp
+* @brief StageFright shell video decoder
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_VIDEODECODER"
+/*******************
+ * HEADERS *
+ *******************/
+
+#include "VideoEditorVideoDecoder_internal.h"
+#include "VideoEditorUtils.h"
+#include "M4VD_Tools.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+/********************
+ * DEFINITIONS *
+ ********************/
+#define MAX_DEC_BUFFERS 10
+
+/********************
+ * SOURCE CLASS *
+ ********************/
+using namespace android;
+static M4OSA_ERR copyBufferToQueue(
+ VideoEditorVideoDecoder_Context* pDecShellContext,
+ MediaBuffer* pDecodedBuffer);
+
+class VideoEditorVideoDecoderSource : public MediaSource {
+ public:
+
+ VideoEditorVideoDecoderSource(
+ const sp<MetaData> &format,
+ VIDEOEDITOR_CodecType codecType,
+ void *decoderShellContext);
+
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+ protected :
+ virtual ~VideoEditorVideoDecoderSource();
+
+ private:
+ sp<MetaData> mFormat;
+ MediaBuffer* mBuffer;
+ MediaBufferGroup* mGroup;
+ Mutex mLock;
+ VideoEditorVideoDecoder_Context* mpDecShellContext;
+ int32_t mMaxAUSize;
+ bool mStarted;
+ VIDEOEDITOR_CodecType mCodecType;
+
+ // Don't call me
+ VideoEditorVideoDecoderSource(const VideoEditorVideoDecoderSource &);
+ VideoEditorVideoDecoderSource &operator=(
+ const VideoEditorVideoDecoderSource &);
+};
+
+VideoEditorVideoDecoderSource::VideoEditorVideoDecoderSource(
+ const sp<MetaData> &format, VIDEOEDITOR_CodecType codecType,
+ void *decoderShellContext) :
+ mFormat(format),
+ mBuffer(NULL),
+ mGroup(NULL),
+ mStarted(false),
+ mCodecType(codecType) {
+ mpDecShellContext = (VideoEditorVideoDecoder_Context*) decoderShellContext;
+}
+
+VideoEditorVideoDecoderSource::~VideoEditorVideoDecoderSource() {
+ if (mStarted == true) {
+ stop();
+ }
+}
+
+status_t VideoEditorVideoDecoderSource::start(
+ MetaData *params) {
+
+ if (!mStarted) {
+ if (mFormat->findInt32(kKeyMaxInputSize, &mMaxAUSize) == false) {
+ ALOGE("Could not find kKeyMaxInputSize");
+ return ERROR_MALFORMED;
+ }
+
+ mGroup = new MediaBufferGroup;
+ if (mGroup == NULL) {
+ ALOGE("FATAL: memory limitation ! ");
+ return NO_MEMORY;
+ }
+
+ mGroup->add_buffer(new MediaBuffer(mMaxAUSize));
+
+ mStarted = true;
+ }
+ return OK;
+}
+
+status_t VideoEditorVideoDecoderSource::stop() {
+ if (mStarted) {
+ if (mBuffer != NULL) {
+
+ // FIXME:
+ // Why do we need to check on the ref count?
+ int ref_count = mBuffer->refcount();
+ ALOGV("MediaBuffer refcount is %d",ref_count);
+ for (int i = 0; i < ref_count; ++i) {
+ mBuffer->release();
+ }
+
+ mBuffer = NULL;
+ }
+ delete mGroup;
+ mGroup = NULL;
+ mStarted = false;
+ }
+ return OK;
+}
+
+sp<MetaData> VideoEditorVideoDecoderSource::getFormat() {
+ Mutex::Autolock autolock(mLock);
+
+ return mFormat;
+}
+
+status_t VideoEditorVideoDecoderSource::read(MediaBuffer** buffer_out,
+ const ReadOptions *options) {
+
+ Mutex::Autolock autolock(mLock);
+ if (options != NULL) {
+ int64_t time_us;
+ MediaSource::ReadOptions::SeekMode mode;
+ options->getSeekTo(&time_us, &mode);
+ if (mode != MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC) {
+ ALOGE("Unexpected read options");
+ return BAD_VALUE;
+ }
+
+ M4OSA_ERR err;
+ M4OSA_Int32 rapTime = time_us / 1000;
+
+ /*--- Retrieve the previous RAP time ---*/
+ err = mpDecShellContext->m_pReaderGlobal->m_pFctGetPrevRapTime(
+ mpDecShellContext->m_pReader->m_readerContext,
+ (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
+ &rapTime);
+
+ if (err == M4WAR_READER_INFORMATION_NOT_PRESENT) {
+ /* No RAP table, jump backward and predecode */
+ rapTime -= 40000;
+ if(rapTime < 0) rapTime = 0;
+ } else if (err != OK) {
+ ALOGE("get rap time error = 0x%x\n", (uint32_t)err);
+ return UNKNOWN_ERROR;
+ }
+
+ err = mpDecShellContext->m_pReaderGlobal->m_pFctJump(
+ mpDecShellContext->m_pReader->m_readerContext,
+ (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
+ &rapTime);
+
+ if (err != OK) {
+ ALOGE("jump err = 0x%x\n", (uint32_t)err);
+ return BAD_VALUE;
+ }
+ }
+
+ *buffer_out = NULL;
+
+ M4OSA_ERR lerr = mGroup->acquire_buffer(&mBuffer);
+ if (lerr != OK) {
+ return lerr;
+ }
+ mBuffer->meta_data()->clear(); // clear all the meta data
+
+ if (mStarted) {
+ //getNext AU from reader.
+ M4_AccessUnit* pAccessUnit = mpDecShellContext->m_pNextAccessUnitToDecode;
+ lerr = mpDecShellContext->m_pReader->m_pFctGetNextAu(
+ mpDecShellContext->m_pReader->m_readerContext,
+ (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
+ pAccessUnit);
+ if (lerr == M4WAR_NO_DATA_YET || lerr == M4WAR_NO_MORE_AU) {
+ *buffer_out = NULL;
+ return ERROR_END_OF_STREAM;
+ }
+
+ //copy the reader AU buffer to mBuffer
+ M4OSA_UInt32 lSize = (pAccessUnit->m_size > (M4OSA_UInt32)mMaxAUSize)\
+ ? (M4OSA_UInt32)mMaxAUSize : pAccessUnit->m_size;
+ memcpy((void *)mBuffer->data(),(void *)pAccessUnit->m_dataAddress,
+ lSize);
+
+ mBuffer->set_range(0, lSize);
+ int64_t frameTimeUs = (int64_t) (pAccessUnit->m_CTS * 1000);
+ mBuffer->meta_data()->setInt64(kKeyTime, frameTimeUs);
+
+ // Replace the AU start code for H264
+ if (VIDEOEDITOR_kH264VideoDec == mCodecType) {
+ uint8_t *data =(uint8_t *)mBuffer->data() + mBuffer->range_offset();
+ data[0]=0;
+ data[1]=0;
+ data[2]=0;
+ data[3]=1;
+ }
+ mBuffer->meta_data()->setInt32(kKeyIsSyncFrame,
+ (pAccessUnit->m_attribute == 0x04)? 1 : 0);
+ *buffer_out = mBuffer;
+ }
+ return OK;
+}
+
+static M4OSA_UInt32 VideoEditorVideoDecoder_GetBitsFromMemory(
+ VIDEOEDITOR_VIDEO_Bitstream_ctxt* parsingCtxt, M4OSA_UInt32 nb_bits) {
+ return (M4VD_Tools_GetBitsFromMemory((M4VS_Bitstream_ctxt*) parsingCtxt,
+ nb_bits));
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_internalParseVideoDSI(M4OSA_UInt8* pVol,
+ M4OSA_Int32 aVolSize, M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+ M4DECODER_VideoSize* pVideoSize) {
+
+ VIDEOEDITOR_VIDEO_Bitstream_ctxt parsingCtxt;
+ M4OSA_UInt32 code, j;
+ M4OSA_MemAddr8 start;
+ M4OSA_UInt8 i;
+ M4OSA_UInt32 time_incr_length;
+ M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+ /* Parsing variables */
+ M4OSA_UInt8 video_object_layer_shape = 0;
+ M4OSA_UInt8 sprite_enable = 0;
+ M4OSA_UInt8 reduced_resolution_vop_enable = 0;
+ M4OSA_UInt8 scalability = 0;
+ M4OSA_UInt8 enhancement_type = 0;
+ M4OSA_UInt8 complexity_estimation_disable = 0;
+ M4OSA_UInt8 interlaced = 0;
+ M4OSA_UInt8 sprite_warping_points = 0;
+ M4OSA_UInt8 sprite_brightness_change = 0;
+ M4OSA_UInt8 quant_precision = 0;
+
+ /* Fill the structure with default parameters */
+ pVideoSize->m_uiWidth = 0;
+ pVideoSize->m_uiHeight = 0;
+
+ pDci->uiTimeScale = 0;
+ pDci->uiProfile = 0;
+ pDci->uiUseOfResynchMarker = 0;
+ pDci->bDataPartition = M4OSA_FALSE;
+ pDci->bUseOfRVLC = M4OSA_FALSE;
+
+ /* Reset the bitstream context */
+ parsingCtxt.stream_byte = 0;
+ parsingCtxt.stream_index = 8;
+ parsingCtxt.in = (M4OSA_MemAddr8) pVol;
+
+ start = (M4OSA_MemAddr8) pVol;
+
+ /* Start parsing */
+ while (parsingCtxt.in - start < aVolSize) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
+ if (code == 0) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt,8);
+ if (code == 1) {
+ /* start code found */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);
+
+ /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
+
+ if ((code > 0x1F) && (code < 0x30)) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ if (code == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4);
+ vol_verid = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 3);
+ }
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4);
+ if (code == 15) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 16);
+ }
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ if (code == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 3);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ if (code == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 32);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 31);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 16);
+ }
+ }
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 2);
+ /* Need to save it for vop parsing */
+ video_object_layer_shape = (M4OSA_UInt8)code;
+
+ if (code != 0) {
+ return 0; /* only rectangular case supported */
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 16);
+ pDci->uiTimeScale = code;
+
+ /* Computes time increment length */
+ j = code - 1;
+ for (i = 0; (i < 32) && (j != 0); j >>=1) {
+ i++;
+ }
+ time_incr_length = (i == 0) ? 1 : i;
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ if (code == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, time_incr_length);
+ }
+
+ if(video_object_layer_shape != 1) { /* 1 = Binary */
+ if(video_object_layer_shape == 0) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* Width */
+ pVideoSize->m_uiWidth = code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* Height */
+ pVideoSize->m_uiHeight = code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ }
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* interlaced */
+ interlaced = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* OBMC disable */
+
+ if(vol_verid == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* sprite enable */
+ sprite_enable = (M4OSA_UInt8)code;
+ } else {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 2);/* sprite enable */
+ sprite_enable = (M4OSA_UInt8)code;
+ }
+ if ((sprite_enable == 1) || (sprite_enable == 2)) {
+ if (sprite_enable != 2) {
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* sprite width */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* sprite height */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* sprite l coordinate */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 13);/* sprite top coordinate */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* Marker bit */
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 6);/* sprite warping points */
+ sprite_warping_points = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 2);/* sprite warping accuracy */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* sprite brightness change */
+ sprite_brightness_change = (M4OSA_UInt8)code;
+ if (sprite_enable != 2) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ }
+ }
+ if ((vol_verid != 1) && (video_object_layer_shape != 0)){
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* sadct disable */
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1); /* not 8 bits */
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4);/* quant precision */
+ quant_precision = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4);/* bits per pixel */
+ }
+
+ /* greyscale not supported */
+ if(video_object_layer_shape == 3) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 3);
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* quant type */
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* load intra quant mat */
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);/* */
+ i = 1;
+ while (i < 64) {
+ code =
+ VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);
+ if (code == 0) {
+ break;
+ }
+ i++;
+ }
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* load non intra quant mat */
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);/* */
+ i = 1;
+ while (i < 64) {
+ code =
+ VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);
+ if (code == 0) {
+ break;
+ }
+ i++;
+ }
+ }
+ }
+
+ if (vol_verid != 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* quarter sample */
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* complexity estimation disable */
+ complexity_estimation_disable = (M4OSA_UInt8)code;
+ if (!code) {
+ //return M4ERR_NOT_IMPLEMENTED;
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* resync marker disable */
+ pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* data partitionned */
+ pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* reversible VLC */
+ pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+ }
+
+ if (vol_verid != 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* newpred */
+ if (code) {
+ //return M4ERR_PARAMETER;
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);
+ reduced_resolution_vop_enable = (M4OSA_UInt8)code;
+ }
+
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* scalability */
+ scalability = (M4OSA_UInt8)code;
+ if (code) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* hierarchy type */
+ b_hierarchy_type = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4);/* ref layer id */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* ref sampling direct */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);/* hor sampling factor N */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);/* hor sampling factor M */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);/* vert sampling factor N */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);/* vert sampling factor M */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* enhancement type */
+ enhancement_type = (M4OSA_UInt8)code;
+ if ((!b_hierarchy_type) &&
+ (video_object_layer_shape == 1)) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* use ref shape */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* use ref texture */
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 5);
+ }
+ }
+ break;
+ }
+
+ /* ----- 0xB0 : visual_object_sequence_start_code ----- */
+
+ else if(code == 0xB0) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 8);/* profile_and_level_indication */
+ pDci->uiProfile = (M4OSA_UInt8)code;
+ }
+
+ /* ----- 0xB5 : visual_object_start_code ----- */
+
+ else if(code == 0xB5) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 1);/* is object layer identifier */
+ if (code == 1) {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 4); /* visual object verid */
+ vol_verid = (M4OSA_UInt8)code;
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 3);
+ } else {
+ code = VideoEditorVideoDecoder_GetBitsFromMemory(
+ &parsingCtxt, 7); /* Realign on byte */
+ vol_verid = 1;
+ }
+ }
+
+ /* ----- end ----- */
+ } else {
+ if ((code >> 2) == 0x20) {
+ /* H263 ...-> wrong*/
+ break;
+ }
+ }
+ }
+ }
+ }
+ return M4NO_ERROR;
+}
+
+M4VIFI_UInt8 M4VIFI_SemiplanarYVU420toYUV420(void *user_data,
+ M4VIFI_UInt8 *inyuv, M4VIFI_ImagePlane *PlaneOut ) {
+ M4VIFI_UInt8 return_code = M4VIFI_OK;
+ M4VIFI_UInt8 *outyuv =
+ ((M4VIFI_UInt8*)&(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]));
+ int32_t width = PlaneOut[0].u_width;
+ int32_t height = PlaneOut[0].u_height;
+
+ int32_t outYsize = width * height;
+ uint32_t *outy = (uint32_t *) outyuv;
+ uint16_t *outcb =
+ (uint16_t *) &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
+ uint16_t *outcr =
+ (uint16_t *) &(PlaneOut[2].pac_data[PlaneOut[2].u_topleft]);
+
+ /* Y copying */
+ memcpy((void *)outy, (void *)inyuv, outYsize);
+
+ /* U & V copying */
+ uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
+ for (int32_t i = height >> 1; i > 0; --i) {
+ for (int32_t j = width >> 2; j > 0; --j) {
+ uint32_t temp = *inyuv_4++;
+ uint32_t tempU = temp & 0xFF;
+ tempU = tempU | ((temp >> 8) & 0xFF00);
+
+ uint32_t tempV = (temp >> 8) & 0xFF;
+ tempV = tempV | ((temp >> 16) & 0xFF00);
+
+ // Flip U and V
+ *outcb++ = tempV;
+ *outcr++ = tempU;
+ }
+ }
+ return return_code;
+}
+void logSupportDecodersAndCapabilities(M4DECODER_VideoDecoders* decoders) {
+ VideoDecoder *pDecoder;
+ VideoComponentCapabilities *pOmxComponents = NULL;
+ VideoProfileLevel *pProfileLevel = NULL;
+ pDecoder = decoders->decoder;
+ for (size_t i = 0; i< decoders->decoderNumber; i++) {
+ ALOGV("Supported Codec[%d] :%d", i, pDecoder->codec);
+ pOmxComponents = pDecoder->component;
+ for(size_t j = 0; j < pDecoder->componentNumber; j++) {
+ pProfileLevel = pOmxComponents->profileLevel;
+ ALOGV("-->component %d", j);
+ for(size_t k = 0; k < pOmxComponents->profileNumber; k++) {
+ ALOGV("-->profile:%ld maxLevel:%ld", pProfileLevel->mProfile,
+ pProfileLevel->mLevel);
+ pProfileLevel++;
+ }
+ pOmxComponents++;
+ }
+ pDecoder++;
+ }
+}
+
+M4OSA_ERR queryVideoDecoderCapabilities
+ (M4DECODER_VideoDecoders** decoders) {
+ M4OSA_ERR err = M4NO_ERROR;
+ const char *kMimeTypes[] = {
+ MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
+ MEDIA_MIMETYPE_VIDEO_H263
+ };
+
+ int32_t supportFormats = sizeof(kMimeTypes) / sizeof(kMimeTypes[0]);
+ M4DECODER_VideoDecoders *pDecoders;
+ VideoDecoder *pDecoder;
+ VideoComponentCapabilities *pOmxComponents = NULL;
+ VideoProfileLevel *pProfileLevel = NULL;
+ OMXClient client;
+ status_t status = OK;
+ SAFE_MALLOC(pDecoders, M4DECODER_VideoDecoders, 1, "VideoDecoders");
+ SAFE_MALLOC(pDecoder, VideoDecoder, supportFormats,
+ "VideoDecoder");
+ pDecoders->decoder = pDecoder;
+
+ pDecoders->decoderNumber= supportFormats;
+ status = client.connect();
+ CHECK(status == OK);
+ for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]);
+ ++k) {
+ Vector<CodecCapabilities> results;
+ CHECK_EQ(QueryCodecs(client.interface(), kMimeTypes[k],
+ true, // queryDecoders
+ &results), (status_t)OK);
+
+ if (results.size()) {
+ SAFE_MALLOC(pOmxComponents, VideoComponentCapabilities,
+ results.size(), "VideoComponentCapabilities");
+ ALOGV("K=%d",k);
+ pDecoder->component = pOmxComponents;
+ pDecoder->componentNumber = results.size();
+ }
+
+ for (size_t i = 0; i < results.size(); ++i) {
+ ALOGV(" decoder '%s' supports ",
+ results[i].mComponentName.string());
+
+ if (results[i].mProfileLevels.size() == 0) {
+ ALOGV("NOTHING.\n");
+ continue;
+ }
+
+#if 0
+ // FIXME:
+ // We should ignore the software codecs and make IsSoftwareCodec()
+ // part of pubic API from OMXCodec.cpp
+ if (IsSoftwareCodec(results[i].mComponentName.string())) {
+ ALOGV("Ignore software codec %s", results[i].mComponentName.string());
+ continue;
+ }
+#endif
+
+ // Count the supported profiles
+ int32_t profileNumber = 0;
+ int32_t profile = -1;
+ for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
+ const CodecProfileLevel &profileLevel =
+ results[i].mProfileLevels[j];
+ // FIXME: assume that the profiles are ordered
+ if (profileLevel.mProfile != profile) {
+ profile = profileLevel.mProfile;
+ profileNumber++;
+ }
+ }
+ SAFE_MALLOC(pProfileLevel, VideoProfileLevel,
+ profileNumber, "VideoProfileLevel");
+ pOmxComponents->profileLevel = pProfileLevel;
+ pOmxComponents->profileNumber = profileNumber;
+
+ // Get the max Level for each profile.
+ int32_t maxLevel = -1;
+ profile = -1;
+ profileNumber = 0;
+ for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
+ const CodecProfileLevel &profileLevel =
+ results[i].mProfileLevels[j];
+ if (profile == -1 && maxLevel == -1) {
+ profile = profileLevel.mProfile;
+ maxLevel = profileLevel.mLevel;
+ pProfileLevel->mProfile = profile;
+ pProfileLevel->mLevel = maxLevel;
+ ALOGV("%d profile: %ld, max level: %ld",
+ __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
+ }
+ if (profileLevel.mProfile != profile) {
+ profile = profileLevel.mProfile;
+ maxLevel = profileLevel.mLevel;
+ profileNumber++;
+ pProfileLevel++;
+ pProfileLevel->mProfile = profile;
+ pProfileLevel->mLevel = maxLevel;
+ ALOGV("%d profile: %ld, max level: %ld",
+ __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
+ } else if (profileLevel.mLevel > maxLevel) {
+ maxLevel = profileLevel.mLevel;
+ pProfileLevel->mLevel = maxLevel;
+ ALOGV("%d profile: %ld, max level: %ld",
+ __LINE__, pProfileLevel->mProfile, pProfileLevel->mLevel);
+ }
+
+ }
+ pOmxComponents++;
+ }
+ if (!strcmp(MEDIA_MIMETYPE_VIDEO_AVC, kMimeTypes[k]))
+ pDecoder->codec = M4DA_StreamTypeVideoMpeg4Avc;
+ if (!strcmp(MEDIA_MIMETYPE_VIDEO_MPEG4, kMimeTypes[k]))
+ pDecoder->codec = M4DA_StreamTypeVideoMpeg4;
+ if (!strcmp(MEDIA_MIMETYPE_VIDEO_H263, kMimeTypes[k]))
+ pDecoder->codec = M4DA_StreamTypeVideoH263;
+
+ pDecoder++;
+ }
+
+ logSupportDecodersAndCapabilities(pDecoders);
+ *decoders = pDecoders;
+cleanUp:
+ return err;
+}
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+M4OSA_ERR VideoEditorVideoDecoder_configureFromMetadata(M4OSA_Context pContext,
+ MetaData* meta) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
+ bool success = OK;
+ int32_t width = 0;
+ int32_t height = 0;
+ int32_t frameSize = 0;
+ int32_t vWidth, vHeight;
+ int32_t cropLeft, cropTop, cropRight, cropBottom;
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != meta, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorVideoDecoder_configureFromMetadata begin");
+
+ pDecShellContext = (VideoEditorVideoDecoder_Context*)pContext;
+
+ success = meta->findInt32(kKeyWidth, &vWidth);
+ VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+ success = meta->findInt32(kKeyHeight, &vHeight);
+ VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+
+ ALOGV("vWidth = %d, vHeight = %d", vWidth, vHeight);
+
+ pDecShellContext->mGivenWidth = vWidth;
+ pDecShellContext->mGivenHeight = vHeight;
+
+ if (!meta->findRect(
+ kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) {
+
+ cropLeft = cropTop = 0;
+ cropRight = vWidth - 1;
+ cropBottom = vHeight - 1;
+
+ ALOGV("got dimensions only %d x %d", width, height);
+ } else {
+ ALOGV("got crop rect %d, %d, %d, %d",
+ cropLeft, cropTop, cropRight, cropBottom);
+ }
+
+ pDecShellContext->mCropRect.left = cropLeft;
+ pDecShellContext->mCropRect.right = cropRight;
+ pDecShellContext->mCropRect.top = cropTop;
+ pDecShellContext->mCropRect.bottom = cropBottom;
+
+ width = cropRight - cropLeft + 1;
+ height = cropBottom - cropTop + 1;
+
+ ALOGV("VideoDecoder_configureFromMetadata : W=%d H=%d", width, height);
+ VIDEOEDITOR_CHECK((0 != width) && (0 != height), M4ERR_PARAMETER);
+
+ if( (M4OSA_NULL != pDecShellContext->m_pDecBufferPool) &&
+ (pDecShellContext->m_pVideoStreamhandler->m_videoWidth == \
+ (uint32_t)width) &&
+ (pDecShellContext->m_pVideoStreamhandler->m_videoHeight == \
+ (uint32_t)height) ) {
+ // No need to reconfigure
+ goto cleanUp;
+ }
+ ALOGV("VideoDecoder_configureFromMetadata reset: W=%d H=%d", width, height);
+ // Update the stream handler parameters
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth = width;
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight = height;
+ frameSize = (width * height * 3) / 2;
+
+ // Configure the buffer pool
+ if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
+ ALOGV("VideoDecoder_configureFromMetadata : reset the buffer pool");
+ VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+ pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+ }
+ err = VIDEOEDITOR_BUFFER_allocatePool(&pDecShellContext->m_pDecBufferPool,
+ MAX_DEC_BUFFERS, (M4OSA_Char*)"VIDEOEDITOR_DecodedBufferPool");
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ err = VIDEOEDITOR_BUFFER_initPoolBuffers(pDecShellContext->m_pDecBufferPool,
+ frameSize + pDecShellContext->mGivenWidth * 2);
+
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoDecoder_configureFromMetadata no error");
+ } else {
+ if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
+ VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+ pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+ }
+ ALOGV("VideoEditorVideoDecoder_configureFromMetadata ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoDecoder_configureFromMetadata end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_destroy(M4OSA_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext =
+ (VideoEditorVideoDecoder_Context*)pContext;
+
+ // Input parameters check
+ ALOGV("VideoEditorVideoDecoder_destroy begin");
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ // Release the color converter
+ delete pDecShellContext->mI420ColorConverter;
+
+ // Destroy the graph
+ if( pDecShellContext->mVideoDecoder != NULL ) {
+ ALOGV("### VideoEditorVideoDecoder_destroy : releasing decoder");
+ pDecShellContext->mVideoDecoder->stop();
+ pDecShellContext->mVideoDecoder.clear();
+ }
+ pDecShellContext->mClient.disconnect();
+ pDecShellContext->mReaderSource.clear();
+
+ // Release memory
+ if( pDecShellContext->m_pDecBufferPool != M4OSA_NULL ) {
+ VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+ pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+ }
+ SAFE_FREE(pDecShellContext);
+ pContext = NULL;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoDecoder_destroy no error");
+ } else {
+ ALOGV("VideoEditorVideoDecoder_destroy ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoDecoder_destroy end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_create(M4OSA_Context *pContext,
+ M4_StreamHandler *pStreamHandler,
+ M4READER_GlobalInterface *pReaderGlobalInterface,
+ M4READER_DataInterface *pReaderDataInterface,
+ M4_AccessUnit *pAccessUnit, M4OSA_Void *pUserData) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
+ status_t status = OK;
+ bool success = TRUE;
+ int32_t colorFormat = 0;
+ M4OSA_UInt32 size = 0;
+ sp<MetaData> decoderMetadata = NULL;
+ int decoderOutput = OMX_COLOR_FormatYUV420Planar;
+
+ ALOGV("VideoEditorVideoDecoder_create begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderDataInterface, M4ERR_PARAMETER);
+
+ // Context allocation & initialization
+ SAFE_MALLOC(pDecShellContext, VideoEditorVideoDecoder_Context, 1,
+ "VideoEditorVideoDecoder");
+ pDecShellContext->m_pVideoStreamhandler =
+ (M4_VideoStreamHandler*)pStreamHandler;
+ pDecShellContext->m_pNextAccessUnitToDecode = pAccessUnit;
+ pDecShellContext->m_pReaderGlobal = pReaderGlobalInterface;
+ pDecShellContext->m_pReader = pReaderDataInterface;
+ pDecShellContext->m_lastDecodedCTS = -1;
+ pDecShellContext->m_lastRenderCts = -1;
+ switch( pStreamHandler->m_streamType ) {
+ case M4DA_StreamTypeVideoH263:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kH263VideoDec;
+ break;
+ case M4DA_StreamTypeVideoMpeg4:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kMpeg4VideoDec;
+ // Parse the VOL header
+ err = VideoEditorVideoDecoder_internalParseVideoDSI(
+ (M4OSA_UInt8*)pDecShellContext->m_pVideoStreamhandler->\
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pDecShellContext->m_pVideoStreamhandler->\
+ m_basicProperties.m_decoderSpecificInfoSize,
+ &pDecShellContext->m_Dci, &pDecShellContext->m_VideoSize);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ break;
+ case M4DA_StreamTypeVideoMpeg4Avc:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kH264VideoDec;
+ break;
+ default:
+ VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+ M4ERR_PARAMETER);
+ break;
+ }
+
+ pDecShellContext->mNbInputFrames = 0;
+ pDecShellContext->mFirstInputCts = -1.0;
+ pDecShellContext->mLastInputCts = -1.0;
+ pDecShellContext->mNbRenderedFrames = 0;
+ pDecShellContext->mFirstRenderedCts = -1.0;
+ pDecShellContext->mLastRenderedCts = -1.0;
+ pDecShellContext->mNbOutputFrames = 0;
+ pDecShellContext->mFirstOutputCts = -1;
+ pDecShellContext->mLastOutputCts = -1;
+ pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+
+ /**
+ * StageFright graph building
+ */
+ decoderMetadata = new MetaData;
+ switch( pDecShellContext->mDecoderType ) {
+ case VIDEOEDITOR_kH263VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
+ break;
+ case VIDEOEDITOR_kMpeg4VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+ decoderMetadata->setData(kKeyESDS, kTypeESDS,
+ pStreamHandler->m_pESDSInfo,
+ pStreamHandler->m_ESDSInfoSize);
+ break;
+ case VIDEOEDITOR_kH264VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ decoderMetadata->setData(kKeyAVCC, kTypeAVCC,
+ pStreamHandler->m_pH264DecoderSpecificInfo,
+ pStreamHandler->m_H264decoderSpecificInfoSize);
+ break;
+ default:
+ VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+ M4ERR_PARAMETER);
+ break;
+ }
+
+ decoderMetadata->setInt32(kKeyMaxInputSize, pStreamHandler->m_maxAUSize);
+ decoderMetadata->setInt32(kKeyWidth,
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+ decoderMetadata->setInt32(kKeyHeight,
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+ // Create the decoder source
+ pDecShellContext->mReaderSource = new VideoEditorVideoDecoderSource(
+ decoderMetadata, pDecShellContext->mDecoderType,
+ (void *)pDecShellContext);
+ VIDEOEDITOR_CHECK(NULL != pDecShellContext->mReaderSource.get(),
+ M4ERR_SF_DECODER_RSRC_FAIL);
+
+ // Connect to the OMX client
+ status = pDecShellContext->mClient.connect();
+ VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+ // Create the decoder
+ pDecShellContext->mVideoDecoder = OMXCodec::Create(
+ pDecShellContext->mClient.interface(),
+ decoderMetadata, false, pDecShellContext->mReaderSource);
+ VIDEOEDITOR_CHECK(NULL != pDecShellContext->mVideoDecoder.get(),
+ M4ERR_SF_DECODER_RSRC_FAIL);
+
+
+ // Get the output color format
+ success = pDecShellContext->mVideoDecoder->getFormat()->findInt32(
+ kKeyColorFormat, &colorFormat);
+ VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+ pDecShellContext->decOuputColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+
+ pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyWidth,
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+ pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyHeight,
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+ // Get the color converter
+ pDecShellContext->mI420ColorConverter = new I420ColorConverter;
+ if (pDecShellContext->mI420ColorConverter->isLoaded()) {
+ decoderOutput = pDecShellContext->mI420ColorConverter->getDecoderOutputFormat();
+ }
+
+ if (decoderOutput == OMX_COLOR_FormatYUV420Planar) {
+ delete pDecShellContext->mI420ColorConverter;
+ pDecShellContext->mI420ColorConverter = NULL;
+ }
+
+ ALOGI("decoder output format = 0x%X\n", decoderOutput);
+
+ // Configure the buffer pool from the metadata
+ err = VideoEditorVideoDecoder_configureFromMetadata(pDecShellContext,
+ pDecShellContext->mVideoDecoder->getFormat().get());
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Start the graph
+ status = pDecShellContext->mVideoDecoder->start();
+ VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+ *pContext = (M4OSA_Context)pDecShellContext;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoDecoder_create no error");
+ } else {
+ VideoEditorVideoDecoder_destroy(pDecShellContext);
+ *pContext = M4OSA_NULL;
+ ALOGV("VideoEditorVideoDecoder_create ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoDecoder_create : DONE");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoSoftwareDecoder_create(M4OSA_Context *pContext,
+ M4_StreamHandler *pStreamHandler,
+ M4READER_GlobalInterface *pReaderGlobalInterface,
+ M4READER_DataInterface *pReaderDataInterface,
+ M4_AccessUnit *pAccessUnit, M4OSA_Void *pUserData) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
+ status_t status = OK;
+ bool success = TRUE;
+ int32_t colorFormat = 0;
+ M4OSA_UInt32 size = 0;
+ sp<MetaData> decoderMetadata = NULL;
+
+ ALOGV("VideoEditorVideoDecoder_create begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderDataInterface, M4ERR_PARAMETER);
+
+ // Context allocation & initialization
+ SAFE_MALLOC(pDecShellContext, VideoEditorVideoDecoder_Context, 1,
+ "VideoEditorVideoDecoder");
+ pDecShellContext->m_pVideoStreamhandler =
+ (M4_VideoStreamHandler*)pStreamHandler;
+ pDecShellContext->m_pNextAccessUnitToDecode = pAccessUnit;
+ pDecShellContext->m_pReaderGlobal = pReaderGlobalInterface;
+ pDecShellContext->m_pReader = pReaderDataInterface;
+ pDecShellContext->m_lastDecodedCTS = -1;
+ pDecShellContext->m_lastRenderCts = -1;
+ switch( pStreamHandler->m_streamType ) {
+ case M4DA_StreamTypeVideoH263:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kH263VideoDec;
+ break;
+ case M4DA_StreamTypeVideoMpeg4:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kMpeg4VideoDec;
+ // Parse the VOL header
+ err = VideoEditorVideoDecoder_internalParseVideoDSI(
+ (M4OSA_UInt8*)pDecShellContext->m_pVideoStreamhandler->\
+ m_basicProperties.m_pDecoderSpecificInfo,
+ pDecShellContext->m_pVideoStreamhandler->\
+ m_basicProperties.m_decoderSpecificInfoSize,
+ &pDecShellContext->m_Dci, &pDecShellContext->m_VideoSize);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ break;
+ case M4DA_StreamTypeVideoMpeg4Avc:
+ pDecShellContext->mDecoderType = VIDEOEDITOR_kH264VideoDec;
+ break;
+ default:
+ VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+ M4ERR_PARAMETER);
+ break;
+ }
+
+ pDecShellContext->mNbInputFrames = 0;
+ pDecShellContext->mFirstInputCts = -1.0;
+ pDecShellContext->mLastInputCts = -1.0;
+ pDecShellContext->mNbRenderedFrames = 0;
+ pDecShellContext->mFirstRenderedCts = -1.0;
+ pDecShellContext->mLastRenderedCts = -1.0;
+ pDecShellContext->mNbOutputFrames = 0;
+ pDecShellContext->mFirstOutputCts = -1;
+ pDecShellContext->mLastOutputCts = -1;
+ pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+
+ /**
+ * StageFright graph building
+ */
+ decoderMetadata = new MetaData;
+ switch( pDecShellContext->mDecoderType ) {
+ case VIDEOEDITOR_kH263VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
+ break;
+ case VIDEOEDITOR_kMpeg4VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+ decoderMetadata->setData(kKeyESDS, kTypeESDS,
+ pStreamHandler->m_pESDSInfo,
+ pStreamHandler->m_ESDSInfoSize);
+ break;
+ case VIDEOEDITOR_kH264VideoDec:
+ decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ decoderMetadata->setData(kKeyAVCC, kTypeAVCC,
+ pStreamHandler->m_pH264DecoderSpecificInfo,
+ pStreamHandler->m_H264decoderSpecificInfoSize);
+ break;
+ default:
+ VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+ M4ERR_PARAMETER);
+ break;
+ }
+
+ decoderMetadata->setInt32(kKeyMaxInputSize, pStreamHandler->m_maxAUSize);
+ decoderMetadata->setInt32(kKeyWidth,
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+ decoderMetadata->setInt32(kKeyHeight,
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+ // Create the decoder source
+ pDecShellContext->mReaderSource = new VideoEditorVideoDecoderSource(
+ decoderMetadata, pDecShellContext->mDecoderType,
+ (void *)pDecShellContext);
+ VIDEOEDITOR_CHECK(NULL != pDecShellContext->mReaderSource.get(),
+ M4ERR_SF_DECODER_RSRC_FAIL);
+
+ // Connect to the OMX client
+ status = pDecShellContext->mClient.connect();
+ VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+ ALOGI("Using software codecs only");
+ // Create the decoder
+ pDecShellContext->mVideoDecoder = OMXCodec::Create(
+ pDecShellContext->mClient.interface(),
+ decoderMetadata, false, pDecShellContext->mReaderSource,NULL,OMXCodec::kSoftwareCodecsOnly);
+ VIDEOEDITOR_CHECK(NULL != pDecShellContext->mVideoDecoder.get(),
+ M4ERR_SF_DECODER_RSRC_FAIL);
+
+ // Get the output color format
+ success = pDecShellContext->mVideoDecoder->getFormat()->findInt32(
+ kKeyColorFormat, &colorFormat);
+ VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+ pDecShellContext->decOuputColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+
+ pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyWidth,
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+ pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyHeight,
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+ // Configure the buffer pool from the metadata
+ err = VideoEditorVideoDecoder_configureFromMetadata(pDecShellContext,
+ pDecShellContext->mVideoDecoder->getFormat().get());
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Start the graph
+ status = pDecShellContext->mVideoDecoder->start();
+ VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+ *pContext = (M4OSA_Context)pDecShellContext;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoDecoder_create no error");
+ } else {
+ VideoEditorVideoDecoder_destroy(pDecShellContext);
+ *pContext = M4OSA_NULL;
+ ALOGV("VideoEditorVideoDecoder_create ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoDecoder_create : DONE");
+ return err;
+}
+
+
+M4OSA_ERR VideoEditorVideoDecoder_getOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ M4OSA_ERR lerr = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext =
+ (VideoEditorVideoDecoder_Context*) context;
+ M4_VersionInfo* pVersionInfo;
+ M4DECODER_VideoSize* pVideoSize;
+ M4OSA_UInt32* pNextFrameCts;
+ M4OSA_UInt32 *plastDecodedFrameCts;
+ M4DECODER_AVCProfileLevel* profile;
+ M4DECODER_MPEG4_DecoderConfigInfo* pDecConfInfo;
+
+ ALOGV("VideoEditorVideoDecoder_getOption begin");
+
+ switch (optionId) {
+ case M4DECODER_kOptionID_AVCLastDecodedFrameCTS:
+ plastDecodedFrameCts = (M4OSA_UInt32 *) pValue;
+ *plastDecodedFrameCts = pDecShellContext->m_lastDecodedCTS;
+ break;
+
+ case M4DECODER_kOptionID_Version:
+ pVersionInfo = (M4_VersionInfo*)pValue;
+
+ pVersionInfo->m_major = VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR;
+ pVersionInfo->m_minor= VIDEOEDITOR_VIDEC_SHELL_VER_MINOR;
+ pVersionInfo->m_revision = VIDEOEDITOR_VIDEC_SHELL_VER_REVISION;
+ pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
+ break;
+
+ case M4DECODER_kOptionID_VideoSize:
+ /** Only VPS uses this Option ID. */
+ pVideoSize = (M4DECODER_VideoSize*)pValue;
+ pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyWidth,
+ (int32_t*)(&pVideoSize->m_uiWidth));
+ pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyHeight,
+ (int32_t*)(&pVideoSize->m_uiHeight));
+ ALOGV("VideoEditorVideoDecoder_getOption : W=%d H=%d",
+ pVideoSize->m_uiWidth, pVideoSize->m_uiHeight);
+ break;
+
+ case M4DECODER_kOptionID_NextRenderedFrameCTS:
+ /** How to get this information. SF decoder does not provide this. *
+ ** Let us provide last decoded frame CTS as of now. *
+ ** Only VPS uses this Option ID. */
+ pNextFrameCts = (M4OSA_UInt32 *)pValue;
+ *pNextFrameCts = pDecShellContext->m_lastDecodedCTS;
+ break;
+ case M4DECODER_MPEG4_kOptionID_DecoderConfigInfo:
+ if(pDecShellContext->mDecoderType == VIDEOEDITOR_kMpeg4VideoDec) {
+ (*(M4DECODER_MPEG4_DecoderConfigInfo*)pValue) =
+ pDecShellContext->m_Dci;
+ }
+ break;
+ default:
+ lerr = M4ERR_BAD_OPTION_ID;
+ break;
+
+ }
+
+ ALOGV("VideoEditorVideoDecoder_getOption: end with err = 0x%x", lerr);
+ return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_setOption(M4OSA_Context context,
+ M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+ M4OSA_ERR lerr = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context *pDecShellContext =
+ (VideoEditorVideoDecoder_Context*) context;
+
+ ALOGV("VideoEditorVideoDecoder_setOption begin");
+
+ switch (optionId) {
+ case M4DECODER_kOptionID_OutputFilter: {
+ M4DECODER_OutputFilter* pOutputFilter =
+ (M4DECODER_OutputFilter*) pValue;
+ pDecShellContext->m_pFilter =
+ (M4VIFI_PlanConverterFunctionType*)pOutputFilter->\
+ m_pFilterFunction;
+ pDecShellContext->m_pFilterUserData =
+ pOutputFilter->m_pFilterUserData;
+ }
+ break;
+ case M4DECODER_kOptionID_DeblockingFilter:
+ break;
+ default:
+ lerr = M4ERR_BAD_CONTEXT;
+ break;
+ }
+
+ ALOGV("VideoEditorVideoDecoder_setOption: end with err = 0x%x", lerr);
+ return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context,
+ M4_MediaTime* pTime, M4OSA_Bool bJump, M4OSA_UInt32 tolerance) {
+ M4OSA_ERR lerr = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext =
+ (VideoEditorVideoDecoder_Context*) context;
+ int64_t lFrameTime;
+ MediaBuffer* pDecoderBuffer = NULL;
+ MediaBuffer* pNextBuffer = NULL;
+ status_t errStatus;
+ bool needSeek = bJump;
+
+ ALOGV("VideoEditorVideoDecoder_decode begin");
+
+ if( M4OSA_TRUE == pDecShellContext->mReachedEOS ) {
+ // Do not call read(), it could lead to a freeze
+ ALOGV("VideoEditorVideoDecoder_decode : EOS already reached");
+ lerr = M4WAR_NO_MORE_AU;
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ }
+ if(pDecShellContext->m_lastDecodedCTS >= *pTime) {
+ ALOGV("VideoDecoder_decode: Already decoded up to this time CTS = %lf.",
+ pDecShellContext->m_lastDecodedCTS);
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ }
+ if(M4OSA_TRUE == bJump) {
+ ALOGV("VideoEditorVideoDecoder_decode: Jump called");
+ pDecShellContext->m_lastDecodedCTS = -1;
+ pDecShellContext->m_lastRenderCts = -1;
+ }
+
+ pDecShellContext->mNbInputFrames++;
+ if (0 > pDecShellContext->mFirstInputCts){
+ pDecShellContext->mFirstInputCts = *pTime;
+ }
+ pDecShellContext->mLastInputCts = *pTime;
+
+ while (pDecoderBuffer == NULL || pDecShellContext->m_lastDecodedCTS + tolerance < *pTime) {
+ ALOGV("VideoEditorVideoDecoder_decode, frameCTS = %lf, DecodeUpTo = %lf",
+ pDecShellContext->m_lastDecodedCTS, *pTime);
+
+ // Read the buffer from the stagefright decoder
+ if (needSeek) {
+ MediaSource::ReadOptions options;
+ int64_t time_us = *pTime * 1000;
+ options.setSeekTo(time_us, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ errStatus = pDecShellContext->mVideoDecoder->read(&pNextBuffer, &options);
+ needSeek = false;
+ } else {
+ errStatus = pDecShellContext->mVideoDecoder->read(&pNextBuffer);
+ }
+
+ // Handle EOS and format change
+ if (errStatus == ERROR_END_OF_STREAM) {
+ ALOGV("End of stream reached, returning M4WAR_NO_MORE_AU ");
+ pDecShellContext->mReachedEOS = M4OSA_TRUE;
+ lerr = M4WAR_NO_MORE_AU;
+ // If we decoded a buffer before EOS, we still need to put it
+ // into the queue.
+ if (pDecoderBuffer && bJump) {
+ copyBufferToQueue(pDecShellContext, pDecoderBuffer);
+ }
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ } else if (INFO_FORMAT_CHANGED == errStatus) {
+ ALOGV("VideoDecoder_decode : source returns INFO_FORMAT_CHANGED");
+ lerr = VideoEditorVideoDecoder_configureFromMetadata(
+ pDecShellContext,
+ pDecShellContext->mVideoDecoder->getFormat().get());
+ if( M4NO_ERROR != lerr ) {
+ ALOGV("!!! VideoEditorVideoDecoder_decode ERROR : "
+ "VideoDecoder_configureFromMetadata returns 0x%X", lerr);
+ break;
+ }
+ continue;
+ } else if (errStatus != OK) {
+ ALOGE("VideoEditorVideoDecoder_decode ERROR:0x%x(%d)",
+ errStatus,errStatus);
+ lerr = errStatus;
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ }
+
+ // The OMXCodec client should expect to receive 0-length buffers
+ // and drop the 0-length buffers.
+ if (pNextBuffer->range_length() == 0) {
+ pNextBuffer->release();
+ continue;
+ }
+
+ // Now we have a good next buffer, release the previous one.
+ if (pDecoderBuffer != NULL) {
+ pDecoderBuffer->release();
+ pDecoderBuffer = NULL;
+ }
+ pDecoderBuffer = pNextBuffer;
+
+ // Record the timestamp of last decoded buffer
+ pDecoderBuffer->meta_data()->findInt64(kKeyTime, &lFrameTime);
+ pDecShellContext->m_lastDecodedCTS = (M4_MediaTime)(lFrameTime/1000);
+ ALOGV("VideoEditorVideoDecoder_decode,decoded frametime = %lf,size = %d",
+ (M4_MediaTime)lFrameTime, pDecoderBuffer->size() );
+
+ // If bJump is false, we need to save every decoded buffer
+ if (!bJump) {
+ lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer);
+ if (lerr != M4NO_ERROR) {
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ }
+ }
+ }
+
+ // If bJump is true, we only need to copy the last buffer
+ if (bJump) {
+ lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer);
+ if (lerr != M4NO_ERROR) {
+ goto VIDEOEDITOR_VideoDecode_cleanUP;
+ }
+ }
+
+ pDecShellContext->mNbOutputFrames++;
+ if ( 0 > pDecShellContext->mFirstOutputCts ) {
+ pDecShellContext->mFirstOutputCts = *pTime;
+ }
+ pDecShellContext->mLastOutputCts = *pTime;
+
+VIDEOEDITOR_VideoDecode_cleanUP:
+ *pTime = pDecShellContext->m_lastDecodedCTS;
+ if (pDecoderBuffer != NULL) {
+ pDecoderBuffer->release();
+ pDecoderBuffer = NULL;
+ }
+
+ ALOGV("VideoEditorVideoDecoder_decode: end with 0x%x", lerr);
+ return lerr;
+}
+
+static M4OSA_ERR copyBufferToQueue(
+ VideoEditorVideoDecoder_Context* pDecShellContext,
+ MediaBuffer* pDecoderBuffer) {
+
+ M4OSA_ERR lerr = M4NO_ERROR;
+ VIDEOEDITOR_BUFFER_Buffer* tmpDecBuffer;
+
+ // Get a buffer from the queue
+ lerr = VIDEOEDITOR_BUFFER_getBuffer(pDecShellContext->m_pDecBufferPool,
+ VIDEOEDITOR_BUFFER_kEmpty, &tmpDecBuffer);
+ if (lerr == (M4OSA_UInt32)M4ERR_NO_BUFFER_AVAILABLE) {
+ lerr = VIDEOEDITOR_BUFFER_getOldestBuffer(
+ pDecShellContext->m_pDecBufferPool,
+ VIDEOEDITOR_BUFFER_kFilled, &tmpDecBuffer);
+ tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
+ lerr = M4NO_ERROR;
+ }
+
+ if (lerr != M4NO_ERROR) return lerr;
+
+ // Color convert or copy from the given MediaBuffer to our buffer
+ if (pDecShellContext->mI420ColorConverter) {
+ if (pDecShellContext->mI420ColorConverter->convertDecoderOutputToI420(
+ (uint8_t *)pDecoderBuffer->data(),// ?? + pDecoderBuffer->range_offset(), // decoderBits
+ pDecShellContext->mGivenWidth, // decoderWidth
+ pDecShellContext->mGivenHeight, // decoderHeight
+ pDecShellContext->mCropRect, // decoderRect
+ tmpDecBuffer->pData /* dstBits */) < 0) {
+ ALOGE("convertDecoderOutputToI420 failed");
+ lerr = M4ERR_NOT_IMPLEMENTED;
+ }
+ } else if (pDecShellContext->decOuputColorFormat == OMX_COLOR_FormatYUV420Planar) {
+ int32_t width = pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+ int32_t height = pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+ int32_t yPlaneSize = width * height;
+ int32_t uvPlaneSize = width * height / 4;
+ int32_t offsetSrc = 0;
+
+ if (( width == pDecShellContext->mGivenWidth ) &&
+ ( height == pDecShellContext->mGivenHeight ))
+ {
+ M4OSA_MemAddr8 pTmpBuff = (M4OSA_MemAddr8)pDecoderBuffer->data() + pDecoderBuffer->range_offset();
+
+ memcpy((void *)tmpDecBuffer->pData, (void *)pTmpBuff, yPlaneSize);
+
+ offsetSrc += pDecShellContext->mGivenWidth * pDecShellContext->mGivenHeight;
+ memcpy((void *)((M4OSA_MemAddr8)tmpDecBuffer->pData + yPlaneSize),
+ (void *)(pTmpBuff + offsetSrc), uvPlaneSize);
+
+ offsetSrc += (pDecShellContext->mGivenWidth >> 1) * (pDecShellContext->mGivenHeight >> 1);
+ memcpy((void *)((M4OSA_MemAddr8)tmpDecBuffer->pData + yPlaneSize + uvPlaneSize),
+ (void *)(pTmpBuff + offsetSrc), uvPlaneSize);
+ }
+ else
+ {
+ M4OSA_MemAddr8 pTmpBuff = (M4OSA_MemAddr8)pDecoderBuffer->data() + pDecoderBuffer->range_offset();
+ M4OSA_MemAddr8 pTmpBuffDst = (M4OSA_MemAddr8)tmpDecBuffer->pData;
+ int32_t index;
+
+ for ( index = 0; index < height; index++)
+ {
+ memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width);
+ pTmpBuffDst += width;
+ pTmpBuff += pDecShellContext->mGivenWidth;
+ }
+
+ pTmpBuff += (pDecShellContext->mGivenWidth * ( pDecShellContext->mGivenHeight - height));
+ for ( index = 0; index < height >> 1; index++)
+ {
+ memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width >> 1);
+ pTmpBuffDst += width >> 1;
+ pTmpBuff += pDecShellContext->mGivenWidth >> 1;
+ }
+
+ pTmpBuff += ((pDecShellContext->mGivenWidth * (pDecShellContext->mGivenHeight - height)) / 4);
+ for ( index = 0; index < height >> 1; index++)
+ {
+ memcpy((void *)pTmpBuffDst, (void *)pTmpBuff, width >> 1);
+ pTmpBuffDst += width >> 1;
+ pTmpBuff += pDecShellContext->mGivenWidth >> 1;
+ }
+ }
+ } else {
+ ALOGE("VideoDecoder_decode: unexpected color format 0x%X",
+ pDecShellContext->decOuputColorFormat);
+ lerr = M4ERR_PARAMETER;
+ }
+
+ tmpDecBuffer->buffCTS = pDecShellContext->m_lastDecodedCTS;
+ tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kFilled;
+ tmpDecBuffer->size = pDecoderBuffer->size();
+
+ return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_render(M4OSA_Context context,
+ M4_MediaTime* pTime, M4VIFI_ImagePlane* pOutputPlane,
+ M4OSA_Bool bForceRender) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoDecoder_Context* pDecShellContext =
+ (VideoEditorVideoDecoder_Context*) context;
+ M4OSA_UInt32 lindex, i;
+ M4OSA_UInt8* p_buf_src, *p_buf_dest;
+ M4VIFI_ImagePlane tmpPlaneIn, tmpPlaneOut;
+ VIDEOEDITOR_BUFFER_Buffer* pTmpVIDEOEDITORBuffer, *pRenderVIDEOEDITORBuffer
+ = M4OSA_NULL;
+ M4_MediaTime candidateTimeStamp = -1;
+ M4OSA_Bool bFound = M4OSA_FALSE;
+
+ ALOGV("VideoEditorVideoDecoder_render begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pTime, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pOutputPlane, M4ERR_PARAMETER);
+
+ // The output buffer is already allocated, just copy the data
+ if ( (*pTime <= pDecShellContext->m_lastRenderCts) &&
+ (M4OSA_FALSE == bForceRender) ) {
+ ALOGV("VIDEOEDITOR_VIDEO_render Frame in the past");
+ err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+ goto cleanUp;
+ }
+ ALOGV("VideoDecoder_render: lastRendered time = %lf,requested render time = "
+ "%lf", pDecShellContext->m_lastRenderCts, *pTime);
+
+ /**
+ * Find the buffer appropriate for rendering. */
+ for (i=0; i < pDecShellContext->m_pDecBufferPool->NB; i++) {
+ pTmpVIDEOEDITORBuffer = &pDecShellContext->m_pDecBufferPool\
+ ->pNXPBuffer[i];
+ if (pTmpVIDEOEDITORBuffer->state == VIDEOEDITOR_BUFFER_kFilled) {
+ /** Free all those buffers older than last rendered frame. */
+ if (pTmpVIDEOEDITORBuffer->buffCTS < pDecShellContext->\
+ m_lastRenderCts) {
+ pTmpVIDEOEDITORBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
+ }
+
+ /** Get the buffer with appropriate timestamp */
+ if ( (pTmpVIDEOEDITORBuffer->buffCTS >= pDecShellContext->\
+ m_lastRenderCts) &&
+ (pTmpVIDEOEDITORBuffer->buffCTS <= *pTime) &&
+ (pTmpVIDEOEDITORBuffer->buffCTS > candidateTimeStamp)) {
+ bFound = M4OSA_TRUE;
+ pRenderVIDEOEDITORBuffer = pTmpVIDEOEDITORBuffer;
+ candidateTimeStamp = pTmpVIDEOEDITORBuffer->buffCTS;
+ ALOGV("VideoDecoder_render: found a buffer with timestamp = %lf",
+ candidateTimeStamp);
+ }
+ }
+ }
+ if (M4OSA_FALSE == bFound) {
+ err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+ goto cleanUp;
+ }
+
+ ALOGV("VideoEditorVideoDecoder_render 3 ouput %d %d %d %d",
+ pOutputPlane[0].u_width, pOutputPlane[0].u_height,
+ pOutputPlane[0].u_topleft, pOutputPlane[0].u_stride);
+
+ pDecShellContext->m_lastRenderCts = candidateTimeStamp;
+
+ if( M4OSA_NULL != pDecShellContext->m_pFilter ) {
+ // Filtering was requested
+ M4VIFI_ImagePlane tmpPlane[3];
+ // Prepare the output image for conversion
+ tmpPlane[0].u_width =
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+ tmpPlane[0].u_height =
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+ tmpPlane[0].u_topleft = 0;
+ tmpPlane[0].u_stride = tmpPlane[0].u_width;
+ tmpPlane[0].pac_data = (M4VIFI_UInt8*)pRenderVIDEOEDITORBuffer->pData;
+ tmpPlane[1].u_width = tmpPlane[0].u_width/2;
+ tmpPlane[1].u_height = tmpPlane[0].u_height/2;
+ tmpPlane[1].u_topleft = 0;
+ tmpPlane[1].u_stride = tmpPlane[0].u_stride/2;
+ tmpPlane[1].pac_data = tmpPlane[0].pac_data +
+ (tmpPlane[0].u_stride * tmpPlane[0].u_height);
+ tmpPlane[2].u_width = tmpPlane[1].u_width;
+ tmpPlane[2].u_height = tmpPlane[1].u_height;
+ tmpPlane[2].u_topleft = 0;
+ tmpPlane[2].u_stride = tmpPlane[1].u_stride;
+ tmpPlane[2].pac_data = tmpPlane[1].pac_data +
+ (tmpPlane[1].u_stride * tmpPlane[1].u_height);
+
+ ALOGV("VideoEditorVideoDecoder_render w = %d H = %d",
+ tmpPlane[0].u_width,tmpPlane[0].u_height);
+ pDecShellContext->m_pFilter(M4OSA_NULL, &tmpPlane[0], pOutputPlane);
+ } else {
+ // Just copy the YUV420P buffer
+ M4OSA_MemAddr8 tempBuffPtr =
+ (M4OSA_MemAddr8)pRenderVIDEOEDITORBuffer->pData;
+ M4OSA_UInt32 tempWidth =
+ pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+ M4OSA_UInt32 tempHeight =
+ pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+
+ memcpy((void *) pOutputPlane[0].pac_data, (void *)tempBuffPtr,
+ tempWidth * tempHeight);
+ tempBuffPtr += (tempWidth * tempHeight);
+ memcpy((void *) pOutputPlane[1].pac_data, (void *)tempBuffPtr,
+ (tempWidth/2) * (tempHeight/2));
+ tempBuffPtr += ((tempWidth/2) * (tempHeight/2));
+ memcpy((void *) pOutputPlane[2].pac_data, (void *)tempBuffPtr,
+ (tempWidth/2) * (tempHeight/2));
+ }
+
+ pDecShellContext->mNbRenderedFrames++;
+ if ( 0 > pDecShellContext->mFirstRenderedCts ) {
+ pDecShellContext->mFirstRenderedCts = *pTime;
+ }
+ pDecShellContext->mLastRenderedCts = *pTime;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ *pTime = pDecShellContext->m_lastRenderCts;
+ ALOGV("VideoEditorVideoDecoder_render no error");
+ } else {
+ ALOGV("VideoEditorVideoDecoder_render ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoDecoder_render end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface(M4DECODER_VideoType decoderType,
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ M4DECODER_VideoInterface* pDecoderInterface = M4OSA_NULL;
+
+ pDecoderInterface = (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4DECODER_VideoInterface), M4DECODER_EXTERNAL,
+ (M4OSA_Char*)"VideoEditorVideoDecoder_getInterface" );
+ if (M4OSA_NULL == pDecoderInterface) {
+ return M4ERR_ALLOC;
+ }
+
+ *pDecoderType = decoderType;
+
+ pDecoderInterface->m_pFctCreate = VideoEditorVideoDecoder_create;
+ pDecoderInterface->m_pFctDestroy = VideoEditorVideoDecoder_destroy;
+ pDecoderInterface->m_pFctGetOption = VideoEditorVideoDecoder_getOption;
+ pDecoderInterface->m_pFctSetOption = VideoEditorVideoDecoder_setOption;
+ pDecoderInterface->m_pFctDecode = VideoEditorVideoDecoder_decode;
+ pDecoderInterface->m_pFctRender = VideoEditorVideoDecoder_render;
+
+ *pDecInterface = (M4OSA_Context)pDecoderInterface;
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_VideoType decoderType,
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ M4DECODER_VideoInterface* pDecoderInterface = M4OSA_NULL;
+
+ pDecoderInterface = (M4DECODER_VideoInterface*)M4OSA_32bitAlignedMalloc(
+ sizeof(M4DECODER_VideoInterface), M4DECODER_EXTERNAL,
+ (M4OSA_Char*)"VideoEditorVideoDecoder_getInterface" );
+ if (M4OSA_NULL == pDecoderInterface) {
+ return M4ERR_ALLOC;
+ }
+
+ *pDecoderType = decoderType;
+
+ pDecoderInterface->m_pFctCreate = VideoEditorVideoSoftwareDecoder_create;
+ pDecoderInterface->m_pFctDestroy = VideoEditorVideoDecoder_destroy;
+ pDecoderInterface->m_pFctGetOption = VideoEditorVideoDecoder_getOption;
+ pDecoderInterface->m_pFctSetOption = VideoEditorVideoDecoder_setOption;
+ pDecoderInterface->m_pFctDecode = VideoEditorVideoDecoder_decode;
+ pDecoderInterface->m_pFctRender = VideoEditorVideoDecoder_render;
+
+ *pDecInterface = (M4OSA_Context)pDecoderInterface;
+ return M4NO_ERROR;
+}
+extern "C" {
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeMPEG4,
+ pDecoderType, pDecInterface);
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeAVC,
+ pDecoderType, pDecInterface);
+
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_MPEG4(
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ return VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_kVideoTypeMPEG4,
+ pDecoderType, pDecInterface);
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getSoftwareInterface_H264(
+ M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+ return VideoEditorVideoDecoder_getSoftwareInterface(M4DECODER_kVideoTypeAVC,
+ pDecoderType, pDecInterface);
+
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getVideoDecodersAndCapabilities(
+ M4DECODER_VideoDecoders** decoders) {
+ return queryVideoDecoderCapabilities(decoders);
+}
+
+} // extern "C"
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
new file mode 100755
index 0000000..4787680
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
@@ -0,0 +1,1304 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file VideoEditorVideoEncoder.cpp
+* @brief StageFright shell video encoder
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_VIDEOENCODER"
+
+/*******************
+ * HEADERS *
+ *******************/
+#include "M4OSA_Debug.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorVideoEncoder.h"
+#include "VideoEditorUtils.h"
+#include "MediaBufferPuller.h"
+#include <I420ColorConverter.h>
+
+#include <unistd.h>
+#include "utils/Log.h"
+#include "utils/Vector.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/MediaProfiles.h>
+#include "OMX_Video.h"
+
+/********************
+ * DEFINITIONS *
+ ********************/
+
+// Force using hardware encoder
+#define VIDEOEDITOR_FORCECODEC kHardwareCodecsOnly
+
+#if !defined(VIDEOEDITOR_FORCECODEC)
+ #error "Cannot force DSI retrieval if codec type is not fixed"
+#endif
+
+/********************
+ * SOURCE CLASS *
+ ********************/
+
+namespace android {
+
+struct VideoEditorVideoEncoderSource : public MediaSource {
+ public:
+ static sp<VideoEditorVideoEncoderSource> Create(
+ const sp<MetaData> &format);
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(MediaBuffer **buffer,
+ const ReadOptions *options = NULL);
+ virtual int32_t storeBuffer(MediaBuffer *buffer);
+ virtual int32_t getNumberOfBuffersInQueue();
+
+ protected:
+ virtual ~VideoEditorVideoEncoderSource();
+
+ private:
+ struct MediaBufferChain {
+ MediaBuffer* buffer;
+ MediaBufferChain* nextLink;
+ };
+ enum State {
+ CREATED,
+ STARTED,
+ ERROR
+ };
+ VideoEditorVideoEncoderSource(const sp<MetaData> &format);
+
+ // Don't call me
+ VideoEditorVideoEncoderSource(const VideoEditorVideoEncoderSource &);
+ VideoEditorVideoEncoderSource &operator=(
+ const VideoEditorVideoEncoderSource &);
+
+ MediaBufferChain* mFirstBufferLink;
+ MediaBufferChain* mLastBufferLink;
+ int32_t mNbBuffer;
+ bool mIsEOS;
+ State mState;
+ sp<MetaData> mEncFormat;
+ Mutex mLock;
+ Condition mBufferCond;
+};
+
+sp<VideoEditorVideoEncoderSource> VideoEditorVideoEncoderSource::Create(
+ const sp<MetaData> &format) {
+
+ sp<VideoEditorVideoEncoderSource> aSource =
+ new VideoEditorVideoEncoderSource(format);
+ return aSource;
+}
+
+VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource(
+ const sp<MetaData> &format):
+ mFirstBufferLink(NULL),
+ mLastBufferLink(NULL),
+ mNbBuffer(0),
+ mIsEOS(false),
+ mState(CREATED),
+ mEncFormat(format) {
+ ALOGV("VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource");
+}
+
+VideoEditorVideoEncoderSource::~VideoEditorVideoEncoderSource() {
+
+ // Safety clean up
+ if( STARTED == mState ) {
+ stop();
+ }
+}
+
+status_t VideoEditorVideoEncoderSource::start(MetaData *meta) {
+ status_t err = OK;
+
+ ALOGV("VideoEditorVideoEncoderSource::start() begin");
+
+ if( CREATED != mState ) {
+ ALOGV("VideoEditorVideoEncoderSource::start: invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+ mState = STARTED;
+
+ ALOGV("VideoEditorVideoEncoderSource::start() END (0x%x)", err);
+ return err;
+}
+
+status_t VideoEditorVideoEncoderSource::stop() {
+ status_t err = OK;
+
+ ALOGV("VideoEditorVideoEncoderSource::stop() begin");
+
+ if( STARTED != mState ) {
+ ALOGV("VideoEditorVideoEncoderSource::stop: invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+
+ // Release the buffer chain
+ int32_t i = 0;
+ MediaBufferChain* tmpLink = NULL;
+ while( mFirstBufferLink ) {
+ i++;
+ tmpLink = mFirstBufferLink;
+ mFirstBufferLink = mFirstBufferLink->nextLink;
+ delete tmpLink;
+ }
+ ALOGV("VideoEditorVideoEncoderSource::stop : %d buffer remained", i);
+ mFirstBufferLink = NULL;
+ mLastBufferLink = NULL;
+
+ mState = CREATED;
+
+ ALOGV("VideoEditorVideoEncoderSource::stop() END (0x%x)", err);
+ return err;
+}
+
+sp<MetaData> VideoEditorVideoEncoderSource::getFormat() {
+
+ ALOGV("VideoEditorVideoEncoderSource::getFormat");
+ return mEncFormat;
+}
+
+status_t VideoEditorVideoEncoderSource::read(MediaBuffer **buffer,
+ const ReadOptions *options) {
+ Mutex::Autolock autolock(mLock);
+ MediaSource::ReadOptions readOptions;
+ status_t err = OK;
+ MediaBufferChain* tmpLink = NULL;
+
+ ALOGV("VideoEditorVideoEncoderSource::read() begin");
+
+ if ( STARTED != mState ) {
+ ALOGV("VideoEditorVideoEncoderSource::read: invalid state %d", mState);
+ return UNKNOWN_ERROR;
+ }
+
+ while (mFirstBufferLink == NULL && !mIsEOS) {
+ mBufferCond.wait(mLock);
+ }
+
+ // End of stream?
+ if (mFirstBufferLink == NULL) {
+ *buffer = NULL;
+ ALOGV("VideoEditorVideoEncoderSource::read : EOS");
+ return ERROR_END_OF_STREAM;
+ }
+
+ // Get a buffer from the chain
+ *buffer = mFirstBufferLink->buffer;
+ tmpLink = mFirstBufferLink;
+ mFirstBufferLink = mFirstBufferLink->nextLink;
+
+ if ( NULL == mFirstBufferLink ) {
+ mLastBufferLink = NULL;
+ }
+ delete tmpLink;
+ mNbBuffer--;
+
+ ALOGV("VideoEditorVideoEncoderSource::read() END (0x%x)", err);
+ return err;
+}
+
+int32_t VideoEditorVideoEncoderSource::storeBuffer(MediaBuffer *buffer) {
+ Mutex::Autolock autolock(mLock);
+ status_t err = OK;
+
+ ALOGV("VideoEditorVideoEncoderSource::storeBuffer() begin");
+
+ if( NULL == buffer ) {
+ ALOGV("VideoEditorVideoEncoderSource::storeBuffer : reached EOS");
+ mIsEOS = true;
+ } else {
+ MediaBufferChain* newLink = new MediaBufferChain;
+ newLink->buffer = buffer;
+ newLink->nextLink = NULL;
+ if( NULL != mLastBufferLink ) {
+ mLastBufferLink->nextLink = newLink;
+ } else {
+ mFirstBufferLink = newLink;
+ }
+ mLastBufferLink = newLink;
+ mNbBuffer++;
+ }
+ mBufferCond.signal();
+ ALOGV("VideoEditorVideoEncoderSource::storeBuffer() end");
+ return mNbBuffer;
+}
+
+int32_t VideoEditorVideoEncoderSource::getNumberOfBuffersInQueue() {
+ Mutex::Autolock autolock(mLock);
+ return mNbBuffer;
+}
+
+/**
+ ******************************************************************************
+ * structure VideoEditorVideoEncoder_Context
+ * @brief This structure defines the context of the StageFright video encoder
+ * shell
+ ******************************************************************************
+*/
+typedef enum {
+ CREATED = 0x1,
+ OPENED = 0x2,
+ STARTED = 0x4,
+ BUFFERING = 0x8,
+ READING = 0x10
+} VideoEditorVideoEncoder_State;
+
+typedef struct {
+ VideoEditorVideoEncoder_State mState;
+ M4ENCODER_Format mFormat;
+ M4WRITER_DataInterface* mWriterDataInterface;
+ M4VPP_apply_fct* mPreProcFunction;
+ M4VPP_Context mPreProcContext;
+ M4SYS_AccessUnit* mAccessUnit;
+ M4ENCODER_Params* mCodecParams;
+ M4ENCODER_Header mHeader;
+ H264MCS_ProcessEncodedNALU_fct* mH264NALUPostProcessFct;
+ M4OSA_Context mH264NALUPostProcessCtx;
+ M4OSA_UInt32 mLastCTS;
+ sp<VideoEditorVideoEncoderSource> mEncoderSource;
+ OMXClient mClient;
+ sp<MediaSource> mEncoder;
+ OMX_COLOR_FORMATTYPE mEncoderColorFormat;
+ MediaBufferPuller* mPuller;
+ I420ColorConverter* mI420ColorConverter;
+
+ uint32_t mNbInputFrames;
+ double mFirstInputCts;
+ double mLastInputCts;
+ uint32_t mNbOutputFrames;
+ int64_t mFirstOutputCts;
+ int64_t mLastOutputCts;
+
+ MediaProfiles *mVideoEditorProfile;
+ int32_t mMaxPrefetchFrames;
+} VideoEditorVideoEncoder_Context;
+
+/********************
+ * TOOLS *
+ ********************/
+
+M4OSA_ERR VideoEditorVideoEncoder_getDSI(M4ENCODER_Context pContext,
+ sp<MetaData> metaData) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ status_t result = OK;
+ int32_t nbBuffer = 0;
+ int32_t stride = 0;
+ int32_t height = 0;
+ int32_t framerate = 0;
+ int32_t isCodecConfig = 0;
+ size_t size = 0;
+ uint32_t codecFlags = 0;
+ MediaBuffer* inputBuffer = NULL;
+ MediaBuffer* outputBuffer = NULL;
+ sp<VideoEditorVideoEncoderSource> encoderSource = NULL;
+ sp<MediaSource> encoder = NULL;;
+ OMXClient client;
+
+ ALOGV("VideoEditorVideoEncoder_getDSI begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != metaData.get(), M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+ // Create the encoder source
+ encoderSource = VideoEditorVideoEncoderSource::Create(metaData);
+ VIDEOEDITOR_CHECK(NULL != encoderSource.get(), M4ERR_STATE);
+
+ // Connect to the OMX client
+ result = client.connect();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Create the OMX codec
+ // VIDEOEDITOR_FORCECODEC MUST be defined here
+ codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+ encoder = OMXCodec::Create(client.interface(), metaData, true,
+ encoderSource, NULL, codecFlags);
+ VIDEOEDITOR_CHECK(NULL != encoder.get(), M4ERR_STATE);
+
+ /**
+ * Send fake frames and retrieve the DSI
+ */
+ // Send a fake frame to the source
+ metaData->findInt32(kKeyStride, &stride);
+ metaData->findInt32(kKeyHeight, &height);
+ metaData->findInt32(kKeySampleRate, &framerate);
+ size = (size_t)(stride*height*3)/2;
+ inputBuffer = new MediaBuffer(size);
+ inputBuffer->meta_data()->setInt64(kKeyTime, 0);
+ nbBuffer = encoderSource->storeBuffer(inputBuffer);
+ encoderSource->storeBuffer(NULL); // Signal EOS
+
+ // Call read once to get the DSI
+ result = encoder->start();;
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+ result = encoder->read(&outputBuffer, NULL);
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+ VIDEOEDITOR_CHECK(outputBuffer->meta_data()->findInt32(
+ kKeyIsCodecConfig, &isCodecConfig) && isCodecConfig, M4ERR_STATE);
+
+ VIDEOEDITOR_CHECK(M4OSA_NULL == pEncoderContext->mHeader.pBuf, M4ERR_STATE);
+ if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
+ // For H264, format the DSI
+ result = buildAVCCodecSpecificData(
+ (uint8_t**)(&(pEncoderContext->mHeader.pBuf)),
+ (size_t*)(&(pEncoderContext->mHeader.Size)),
+ (const uint8_t*)outputBuffer->data() + outputBuffer->range_offset(),
+ outputBuffer->range_length(), encoder->getFormat().get());
+ outputBuffer->release();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+ } else {
+ // For MPEG4, just copy the DSI
+ pEncoderContext->mHeader.Size =
+ (M4OSA_UInt32)outputBuffer->range_length();
+ SAFE_MALLOC(pEncoderContext->mHeader.pBuf, M4OSA_Int8,
+ pEncoderContext->mHeader.Size, "Encoder header");
+ memcpy((void *)pEncoderContext->mHeader.pBuf,
+ (void *)((M4OSA_MemAddr8)(outputBuffer->data())+outputBuffer->range_offset()),
+ pEncoderContext->mHeader.Size);
+ outputBuffer->release();
+ }
+
+ result = encoder->stop();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+cleanUp:
+ // Destroy the graph
+ if ( encoder != NULL ) { encoder.clear(); }
+ client.disconnect();
+ if ( encoderSource != NULL ) { encoderSource.clear(); }
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_getDSI no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_getDSI ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_getDSI end");
+ return err;
+}
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+
+M4OSA_ERR VideoEditorVideoEncoder_cleanup(M4ENCODER_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorVideoEncoder_cleanup begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+ // Release memory
+ SAFE_FREE(pEncoderContext->mHeader.pBuf);
+ SAFE_FREE(pEncoderContext);
+ pContext = M4OSA_NULL;
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_cleanup no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_cleanup ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_cleanup end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_init(M4ENCODER_Format format,
+ M4ENCODER_Context* pContext,
+ M4WRITER_DataInterface* pWriterDataInterface,
+ M4VPP_apply_fct* pVPPfct, M4VPP_Context pVPPctxt,
+ M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData) {
+
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ int encoderInput = OMX_COLOR_FormatYUV420Planar;
+
+ ALOGV("VideoEditorVideoEncoder_init begin: format %d", format);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pWriterDataInterface, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPfct, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPctxt, M4ERR_PARAMETER);
+
+ // Context allocation & initialization
+ SAFE_MALLOC(pEncoderContext, VideoEditorVideoEncoder_Context, 1,
+ "VideoEditorVideoEncoder");
+ pEncoderContext->mState = CREATED;
+ pEncoderContext->mFormat = format;
+ pEncoderContext->mWriterDataInterface = pWriterDataInterface;
+ pEncoderContext->mPreProcFunction = pVPPfct;
+ pEncoderContext->mPreProcContext = pVPPctxt;
+ pEncoderContext->mPuller = NULL;
+
+ // Get color converter and determine encoder input format
+ pEncoderContext->mI420ColorConverter = new I420ColorConverter;
+ if (pEncoderContext->mI420ColorConverter->isLoaded()) {
+ encoderInput = pEncoderContext->mI420ColorConverter->getEncoderInputFormat();
+ }
+ if (encoderInput == OMX_COLOR_FormatYUV420Planar) {
+ delete pEncoderContext->mI420ColorConverter;
+ pEncoderContext->mI420ColorConverter = NULL;
+ }
+ pEncoderContext->mEncoderColorFormat = (OMX_COLOR_FORMATTYPE)encoderInput;
+ ALOGI("encoder input format = 0x%X\n", encoderInput);
+
+ *pContext = pEncoderContext;
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_init no error");
+ } else {
+ VideoEditorVideoEncoder_cleanup(pEncoderContext);
+ *pContext = M4OSA_NULL;
+ ALOGV("VideoEditorVideoEncoder_init ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_init end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_init_H263(M4ENCODER_Context* pContext,
+ M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+ M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+ {
+
+ return VideoEditorVideoEncoder_init(M4ENCODER_kH263, pContext,
+ pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_init_MPEG4(M4ENCODER_Context* pContext,
+ M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+ M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+ {
+
+ return VideoEditorVideoEncoder_init(M4ENCODER_kMPEG4, pContext,
+ pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_init_H264(M4ENCODER_Context* pContext,
+ M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+ M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+ {
+
+ return VideoEditorVideoEncoder_init(M4ENCODER_kH264, pContext,
+ pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_close(M4ENCODER_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorVideoEncoder_close begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
+
+ // Release memory
+ SAFE_FREE(pEncoderContext->mCodecParams);
+
+ // Destroy the graph
+ pEncoderContext->mEncoder.clear();
+ pEncoderContext->mClient.disconnect();
+ pEncoderContext->mEncoderSource.clear();
+
+ delete pEncoderContext->mPuller;
+ pEncoderContext->mPuller = NULL;
+
+ delete pEncoderContext->mI420ColorConverter;
+ pEncoderContext->mI420ColorConverter = NULL;
+
+ // Set the new state
+ pEncoderContext->mState = CREATED;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_close no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_close ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_close end");
+ return err;
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_open(M4ENCODER_Context pContext,
+ M4SYS_AccessUnit* pAU, M4OSA_Void* pParams) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ M4ENCODER_Params* pCodecParams = M4OSA_NULL;
+ status_t result = OK;
+ sp<MetaData> encoderMetadata = NULL;
+ const char* mime = NULL;
+ int32_t iProfile = 0;
+ int32_t iLevel = 0;
+
+ int32_t iFrameRate = 0;
+ uint32_t codecFlags = 0;
+
+ ALOGV(">>> VideoEditorVideoEncoder_open begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pAU, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pParams, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ pCodecParams = (M4ENCODER_Params*)pParams;
+ VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+ // Context initialization
+ pEncoderContext->mAccessUnit = pAU;
+ pEncoderContext->mVideoEditorProfile = MediaProfiles::getInstance();
+ pEncoderContext->mMaxPrefetchFrames =
+ pEncoderContext->mVideoEditorProfile->getVideoEditorCapParamByName(
+ "maxPrefetchYUVFrames");
+
+ // Allocate & initialize the encoding parameters
+ SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_Params, 1,
+ "VideoEditorVideoEncoder");
+
+
+ pEncoderContext->mCodecParams->InputFormat = pCodecParams->InputFormat;
+ pEncoderContext->mCodecParams->InputFrameWidth =
+ pCodecParams->InputFrameWidth;
+ pEncoderContext->mCodecParams->InputFrameHeight =
+ pCodecParams->InputFrameHeight;
+ pEncoderContext->mCodecParams->FrameWidth = pCodecParams->FrameWidth;
+ pEncoderContext->mCodecParams->FrameHeight = pCodecParams->FrameHeight;
+ pEncoderContext->mCodecParams->Bitrate = pCodecParams->Bitrate;
+ pEncoderContext->mCodecParams->FrameRate = pCodecParams->FrameRate;
+ pEncoderContext->mCodecParams->Format = pCodecParams->Format;
+ pEncoderContext->mCodecParams->videoProfile = pCodecParams->videoProfile;
+ pEncoderContext->mCodecParams->videoLevel= pCodecParams->videoLevel;
+
+ // Check output format consistency and resolution
+ VIDEOEDITOR_CHECK(
+ pEncoderContext->mCodecParams->Format == pEncoderContext->mFormat,
+ M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameWidth % 16,
+ M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameHeight % 16,
+ M4ERR_PARAMETER);
+
+ /**
+ * StageFright graph building
+ */
+
+ // Create the meta data for the encoder
+ encoderMetadata = new MetaData;
+ switch( pEncoderContext->mCodecParams->Format ) {
+ case M4ENCODER_kH263:
+ mime = MEDIA_MIMETYPE_VIDEO_H263;
+ break;
+ case M4ENCODER_kMPEG4:
+ mime = MEDIA_MIMETYPE_VIDEO_MPEG4;
+ break;
+ case M4ENCODER_kH264:
+ mime = MEDIA_MIMETYPE_VIDEO_AVC;
+ break;
+ default:
+ VIDEOEDITOR_CHECK(!"VideoEncoder_open : incorrect input format",
+ M4ERR_PARAMETER);
+ break;
+ }
+ iProfile = pEncoderContext->mCodecParams->videoProfile;
+ iLevel = pEncoderContext->mCodecParams->videoLevel;
+ ALOGV("Encoder mime %s profile %d, level %d",
+ mime,iProfile, iLevel);
+ ALOGV("Encoder w %d, h %d, bitrate %d, fps %d",
+ pEncoderContext->mCodecParams->FrameWidth,
+ pEncoderContext->mCodecParams->FrameHeight,
+ pEncoderContext->mCodecParams->Bitrate,
+ pEncoderContext->mCodecParams->FrameRate);
+ CHECK(iProfile != 0x7fffffff);
+ CHECK(iLevel != 0x7fffffff);
+
+ encoderMetadata->setCString(kKeyMIMEType, mime);
+ encoderMetadata->setInt32(kKeyVideoProfile, iProfile);
+ //FIXME:
+ // Temp: Do not set the level for Mpeg4 / H.263 Enc
+ // as OMX.Nvidia.mp4.encoder and OMX.Nvidia.h263.encoder
+ // return 0x80001019
+ if (pEncoderContext->mCodecParams->Format == M4ENCODER_kH264) {
+ encoderMetadata->setInt32(kKeyVideoLevel, iLevel);
+ }
+ encoderMetadata->setInt32(kKeyWidth,
+ (int32_t)pEncoderContext->mCodecParams->FrameWidth);
+ encoderMetadata->setInt32(kKeyStride,
+ (int32_t)pEncoderContext->mCodecParams->FrameWidth);
+ encoderMetadata->setInt32(kKeyHeight,
+ (int32_t)pEncoderContext->mCodecParams->FrameHeight);
+ encoderMetadata->setInt32(kKeySliceHeight,
+ (int32_t)pEncoderContext->mCodecParams->FrameHeight);
+
+ switch( pEncoderContext->mCodecParams->FrameRate ) {
+ case M4ENCODER_k5_FPS: iFrameRate = 5; break;
+ case M4ENCODER_k7_5_FPS: iFrameRate = 8; break;
+ case M4ENCODER_k10_FPS: iFrameRate = 10; break;
+ case M4ENCODER_k12_5_FPS: iFrameRate = 13; break;
+ case M4ENCODER_k15_FPS: iFrameRate = 15; break;
+ case M4ENCODER_k20_FPS: iFrameRate = 20; break;
+ case M4ENCODER_k25_FPS: iFrameRate = 25; break;
+ case M4ENCODER_k30_FPS: iFrameRate = 30; break;
+ case M4ENCODER_kVARIABLE_FPS:
+ iFrameRate = 30;
+ ALOGI("Frame rate set to M4ENCODER_kVARIABLE_FPS: set to 30");
+ break;
+ case M4ENCODER_kUSE_TIMESCALE:
+ iFrameRate = 30;
+ ALOGI("Frame rate set to M4ENCODER_kUSE_TIMESCALE: set to 30");
+ break;
+
+ default:
+ VIDEOEDITOR_CHECK(!"VideoEncoder_open:incorrect framerate",
+ M4ERR_STATE);
+ break;
+ }
+ encoderMetadata->setInt32(kKeyFrameRate, iFrameRate);
+ encoderMetadata->setInt32(kKeyBitRate,
+ (int32_t)pEncoderContext->mCodecParams->Bitrate);
+ encoderMetadata->setInt32(kKeyIFramesInterval, 1);
+
+ encoderMetadata->setInt32(kKeyColorFormat,
+ pEncoderContext->mEncoderColorFormat);
+
+ if (pEncoderContext->mCodecParams->Format != M4ENCODER_kH263) {
+ // Get the encoder DSI
+ err = VideoEditorVideoEncoder_getDSI(pEncoderContext, encoderMetadata);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ }
+
+ // Create the encoder source
+ pEncoderContext->mEncoderSource = VideoEditorVideoEncoderSource::Create(
+ encoderMetadata);
+ VIDEOEDITOR_CHECK(
+ NULL != pEncoderContext->mEncoderSource.get(), M4ERR_STATE);
+
+ // Connect to the OMX client
+ result = pEncoderContext->mClient.connect();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+ codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+ pEncoderContext->mEncoder = OMXCodec::Create(
+ pEncoderContext->mClient.interface(), encoderMetadata, true,
+ pEncoderContext->mEncoderSource, NULL, codecFlags);
+ VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
+ ALOGV("VideoEditorVideoEncoder_open : DONE");
+ pEncoderContext->mPuller = new MediaBufferPuller(
+ pEncoderContext->mEncoder);
+
+ // Set the new state
+ pEncoderContext->mState = OPENED;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_open no error");
+ } else {
+ VideoEditorVideoEncoder_close(pEncoderContext);
+ ALOGV("VideoEditorVideoEncoder_open ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_open end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_processInputBuffer(
+ M4ENCODER_Context pContext, M4OSA_Double Cts,
+ M4OSA_Bool bReachedEOS) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ M4VIFI_ImagePlane pOutPlane[3];
+ MediaBuffer* buffer = NULL;
+ int32_t nbBuffer = 0;
+
+ ALOGV("VideoEditorVideoEncoder_processInputBuffer begin: cts %f", Cts);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ pOutPlane[0].pac_data = M4OSA_NULL;
+ pOutPlane[1].pac_data = M4OSA_NULL;
+ pOutPlane[2].pac_data = M4OSA_NULL;
+
+ if ( M4OSA_FALSE == bReachedEOS ) {
+ M4OSA_UInt32 sizeY = pEncoderContext->mCodecParams->FrameWidth *
+ pEncoderContext->mCodecParams->FrameHeight;
+ M4OSA_UInt32 sizeU = sizeY >> 2;
+ M4OSA_UInt32 size = sizeY + 2*sizeU;
+ M4OSA_UInt8* pData = M4OSA_NULL;
+ buffer = new MediaBuffer((size_t)size);
+ pData = (M4OSA_UInt8*)buffer->data() + buffer->range_offset();
+
+ // Prepare the output image for pre-processing
+ pOutPlane[0].u_width = pEncoderContext->mCodecParams->FrameWidth;
+ pOutPlane[0].u_height = pEncoderContext->mCodecParams->FrameHeight;
+ pOutPlane[0].u_topleft = 0;
+ pOutPlane[0].u_stride = pOutPlane[0].u_width;
+ pOutPlane[1].u_width = pOutPlane[0].u_width/2;
+ pOutPlane[1].u_height = pOutPlane[0].u_height/2;
+ pOutPlane[1].u_topleft = 0;
+ pOutPlane[1].u_stride = pOutPlane[0].u_stride/2;
+ pOutPlane[2].u_width = pOutPlane[1].u_width;
+ pOutPlane[2].u_height = pOutPlane[1].u_height;
+ pOutPlane[2].u_topleft = 0;
+ pOutPlane[2].u_stride = pOutPlane[1].u_stride;
+
+ pOutPlane[0].pac_data = pData;
+ pOutPlane[1].pac_data = pData + sizeY;
+ pOutPlane[2].pac_data = pData + sizeY + sizeU;
+
+ // Apply pre-processing
+ err = pEncoderContext->mPreProcFunction(
+ pEncoderContext->mPreProcContext, M4OSA_NULL, pOutPlane);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Convert MediaBuffer to the encoder input format if necessary
+ if (pEncoderContext->mI420ColorConverter) {
+ I420ColorConverter* converter = pEncoderContext->mI420ColorConverter;
+ int actualWidth = pEncoderContext->mCodecParams->FrameWidth;
+ int actualHeight = pEncoderContext->mCodecParams->FrameHeight;
+
+ int encoderWidth, encoderHeight;
+ ARect encoderRect;
+ int encoderBufferSize;
+
+ if (converter->getEncoderInputBufferInfo(
+ actualWidth, actualHeight,
+ &encoderWidth, &encoderHeight,
+ &encoderRect, &encoderBufferSize) == 0) {
+
+ MediaBuffer* newBuffer = new MediaBuffer(encoderBufferSize);
+
+ if (converter->convertI420ToEncoderInput(
+ pData, // srcBits
+ actualWidth, actualHeight,
+ encoderWidth, encoderHeight,
+ encoderRect,
+ (uint8_t*)newBuffer->data() + newBuffer->range_offset()) < 0) {
+ ALOGE("convertI420ToEncoderInput failed");
+ }
+
+ // switch to new buffer
+ buffer->release();
+ buffer = newBuffer;
+ }
+ }
+
+ // Set the metadata
+ buffer->meta_data()->setInt64(kKeyTime, (int64_t)(Cts*1000));
+ }
+
+ // Push the buffer to the source, a NULL buffer, notifies the source of EOS
+ nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_processInputBuffer error 0x%X", err);
+ } else {
+ if( NULL != buffer ) {
+ buffer->release();
+ }
+ ALOGV("VideoEditorVideoEncoder_processInputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_processInputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_processOutputBuffer(
+ M4ENCODER_Context pContext, MediaBuffer* buffer) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ M4OSA_UInt32 Cts = 0;
+ int32_t i32Tmp = 0;
+ int64_t i64Tmp = 0;
+ status_t result = OK;
+
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != buffer, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+ // Process the returned AU
+ if ( 0 == buffer->range_length() ) {
+ // Encoder has no data yet, nothing unusual
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer : buffer is empty");
+ goto cleanUp;
+ }
+ VIDEOEDITOR_CHECK(0 == ((M4OSA_UInt32)buffer->data())%4, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(buffer->meta_data().get(), M4ERR_PARAMETER);
+ if ( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ){
+ { // Display the DSI
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer DSI %d",
+ buffer->range_length());
+ uint8_t* tmp = (uint8_t*)(buffer->data());
+ for( uint32_t i=0; i<buffer->range_length(); i++ ) {
+ ALOGV("DSI [%d] %.2X", i, tmp[i]);
+ }
+ }
+ } else {
+ // Check the CTS
+ VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
+ M4ERR_STATE);
+
+ pEncoderContext->mNbOutputFrames++;
+ if ( 0 > pEncoderContext->mFirstOutputCts ) {
+ pEncoderContext->mFirstOutputCts = i64Tmp;
+ }
+ pEncoderContext->mLastOutputCts = i64Tmp;
+
+ Cts = (M4OSA_Int32)(i64Tmp/1000);
+ ALOGV("[TS_CHECK] VI/ENC WRITE frame %d @ %lld -> %d (last %d)",
+ pEncoderContext->mNbOutputFrames, i64Tmp, Cts,
+ pEncoderContext->mLastCTS);
+ if ( Cts < pEncoderContext->mLastCTS ) {
+ ALOGV("VideoEncoder_processOutputBuffer WARNING : Cts is going "
+ "backwards %d < %d", Cts, pEncoderContext->mLastCTS);
+ goto cleanUp;
+ }
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer : %d %d",
+ Cts, pEncoderContext->mLastCTS);
+
+ // Retrieve the AU container
+ err = pEncoderContext->mWriterDataInterface->pStartAU(
+ pEncoderContext->mWriterDataInterface->pWriterContext,
+ pEncoderContext->mAccessUnit->stream->streamID,
+ pEncoderContext->mAccessUnit);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Format the AU
+ VIDEOEDITOR_CHECK(
+ buffer->range_length() <= pEncoderContext->mAccessUnit->size,
+ M4ERR_PARAMETER);
+ // Remove H264 AU start code
+ if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
+ if (!memcmp((const uint8_t *)buffer->data() + \
+ buffer->range_offset(), "\x00\x00\x00\x01", 4) ) {
+ buffer->set_range(buffer->range_offset() + 4,
+ buffer->range_length() - 4);
+ }
+ }
+
+ if ( (M4ENCODER_kH264 == pEncoderContext->mFormat) &&
+ (M4OSA_NULL != pEncoderContext->mH264NALUPostProcessFct) ) {
+ // H264 trimming case, NALU post processing is needed
+ M4OSA_Int32 outputSize = pEncoderContext->mAccessUnit->size;
+ err = pEncoderContext->mH264NALUPostProcessFct(
+ pEncoderContext->mH264NALUPostProcessCtx,
+ (M4OSA_UInt8*)buffer->data()+buffer->range_offset(),
+ buffer->range_length(),
+ (M4OSA_UInt8*)pEncoderContext->mAccessUnit->dataAddress,
+ &outputSize);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ pEncoderContext->mAccessUnit->size = (M4OSA_UInt32)outputSize;
+ } else {
+ // The AU can just be copied
+ memcpy((void *)pEncoderContext->mAccessUnit->\
+ dataAddress, (void *)((M4OSA_MemAddr8)(buffer->data())+buffer->\
+ range_offset()), buffer->range_length());
+ pEncoderContext->mAccessUnit->size =
+ (M4OSA_UInt32)buffer->range_length();
+ }
+
+ if ( buffer->meta_data()->findInt32(kKeyIsSyncFrame,&i32Tmp) && i32Tmp){
+ pEncoderContext->mAccessUnit->attribute = AU_RAP;
+ } else {
+ pEncoderContext->mAccessUnit->attribute = AU_P_Frame;
+ }
+ pEncoderContext->mLastCTS = Cts;
+ pEncoderContext->mAccessUnit->CTS = Cts;
+ pEncoderContext->mAccessUnit->DTS = Cts;
+
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer: AU @ 0x%X 0x%X %d %d",
+ pEncoderContext->mAccessUnit->dataAddress,
+ *pEncoderContext->mAccessUnit->dataAddress,
+ pEncoderContext->mAccessUnit->size,
+ pEncoderContext->mAccessUnit->CTS);
+
+ // Write the AU
+ err = pEncoderContext->mWriterDataInterface->pProcessAU(
+ pEncoderContext->mWriterDataInterface->pWriterContext,
+ pEncoderContext->mAccessUnit->stream->streamID,
+ pEncoderContext->mAccessUnit);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ }
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer no error");
+ } else {
+ SAFE_FREE(pEncoderContext->mHeader.pBuf);
+ pEncoderContext->mHeader.Size = 0;
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_processOutputBuffer end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_encode(M4ENCODER_Context pContext,
+ M4VIFI_ImagePlane* pInPlane, M4OSA_Double Cts,
+ M4ENCODER_FrameMode FrameMode) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ status_t result = OK;
+ MediaBuffer* outputBuffer = NULL;
+
+ ALOGV("VideoEditorVideoEncoder_encode 0x%X %f %d", pInPlane, Cts, FrameMode);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ if ( STARTED == pEncoderContext->mState ) {
+ pEncoderContext->mState = BUFFERING;
+ }
+ VIDEOEDITOR_CHECK(
+ (BUFFERING | READING) & pEncoderContext->mState, M4ERR_STATE);
+
+ pEncoderContext->mNbInputFrames++;
+ if ( 0 > pEncoderContext->mFirstInputCts ) {
+ pEncoderContext->mFirstInputCts = Cts;
+ }
+ pEncoderContext->mLastInputCts = Cts;
+
+ ALOGV("VideoEditorVideoEncoder_encode 0x%X %d %f (%d)", pInPlane, FrameMode,
+ Cts, pEncoderContext->mLastCTS);
+
+ // Push the input buffer to the encoder source
+ err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, Cts,
+ M4OSA_FALSE);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Notify the source in case of EOS
+ if ( M4ENCODER_kLastFrame == FrameMode ) {
+ err = VideoEditorVideoEncoder_processInputBuffer(
+ pEncoderContext, 0, M4OSA_TRUE);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+ }
+
+ if ( BUFFERING == pEncoderContext->mState ) {
+ // Prefetch is complete, start reading
+ pEncoderContext->mState = READING;
+ }
+ // Read
+ while (1) {
+ MediaBuffer *outputBuffer =
+ pEncoderContext->mPuller->getBufferNonBlocking();
+
+ if (outputBuffer == NULL) {
+ int32_t YUVBufferNumber =
+ pEncoderContext->mEncoderSource->getNumberOfBuffersInQueue();
+ /* Make sure that the configured maximum number of prefetch YUV frames is
+ * not exceeded. This is to limit the amount of memory usage of video editor engine.
+ * The value of maximum prefetch Yuv frames is defined in media_profiles.xml */
+ if ((YUVBufferNumber < pEncoderContext->mMaxPrefetchFrames) ||
+ (pEncoderContext->mPuller->hasMediaSourceReturnedError()
+ == true)) {
+ break;
+ }
+ } else {
+ // Provide the encoded AU to the writer
+ err = VideoEditorVideoEncoder_processOutputBuffer(pEncoderContext,
+ outputBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ pEncoderContext->mPuller->putBuffer(outputBuffer);
+ }
+ }
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_encode no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_encode ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_encode end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_start(M4ENCODER_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ status_t result = OK;
+
+ ALOGV("VideoEditorVideoEncoder_start begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+ VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
+
+ pEncoderContext->mNbInputFrames = 0;
+ pEncoderContext->mFirstInputCts = -1.0;
+ pEncoderContext->mLastInputCts = -1.0;
+ pEncoderContext->mNbOutputFrames = 0;
+ pEncoderContext->mFirstOutputCts = -1;
+ pEncoderContext->mLastOutputCts = -1;
+
+ result = pEncoderContext->mEncoder->start();
+ VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+ pEncoderContext->mPuller->start();
+
+ // Set the new state
+ pEncoderContext->mState = STARTED;
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_start no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_start ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_start end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_stop(M4ENCODER_Context pContext) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+ MediaBuffer* outputBuffer = NULL;
+ status_t result = OK;
+
+ ALOGV("VideoEditorVideoEncoder_stop begin");
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+ // Send EOS again to make sure the source doesn't block.
+ err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, 0,
+ M4OSA_TRUE);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ // Process the remaining buffers if necessary
+ if ( (BUFFERING | READING) & pEncoderContext->mState ) {
+ while (1) {
+ MediaBuffer *outputBuffer =
+ pEncoderContext->mPuller->getBufferBlocking();
+
+ if (outputBuffer == NULL) break;
+
+ err = VideoEditorVideoEncoder_processOutputBuffer(
+ pEncoderContext, outputBuffer);
+ VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+ pEncoderContext->mPuller->putBuffer(outputBuffer);
+ }
+
+ pEncoderContext->mState = STARTED;
+ }
+
+ // Stop the graph module if necessary
+ if ( STARTED == pEncoderContext->mState ) {
+ pEncoderContext->mPuller->stop();
+ pEncoderContext->mEncoder->stop();
+ pEncoderContext->mState = OPENED;
+ }
+
+ if (pEncoderContext->mNbInputFrames != pEncoderContext->mNbOutputFrames) {
+ ALOGW("Some frames were not encoded: input(%d) != output(%d)",
+ pEncoderContext->mNbInputFrames, pEncoderContext->mNbOutputFrames);
+ }
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_stop no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_stop ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_stop end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_regulBitRate(M4ENCODER_Context pContext) {
+ ALOGW("regulBitRate is not implemented");
+ return M4NO_ERROR;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_setOption(M4ENCODER_Context pContext,
+ M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorVideoEncoder_setOption start optionID 0x%X", optionID);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+ switch( optionID ) {
+ case M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr:
+ pEncoderContext->mH264NALUPostProcessFct =
+ (H264MCS_ProcessEncodedNALU_fct*)optionValue;
+ break;
+ case M4ENCODER_kOptionID_H264ProcessNALUContext:
+ pEncoderContext->mH264NALUPostProcessCtx =
+ (M4OSA_Context)optionValue;
+ break;
+ default:
+ ALOGV("VideoEditorVideoEncoder_setOption: unsupported optionId 0x%X",
+ optionID);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+ break;
+ }
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_setOption no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_setOption ERROR 0x%X", err);
+ }
+ ALOGV("VideoEditorVideoEncoder_setOption end");
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getOption(M4ENCODER_Context pContext,
+ M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+ M4OSA_ERR err = M4NO_ERROR;
+ VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+ ALOGV("VideoEditorVideoEncoder_getOption begin optinId 0x%X", optionID);
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+ pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+ switch( optionID ) {
+ case M4ENCODER_kOptionID_EncoderHeader:
+ VIDEOEDITOR_CHECK(
+ M4OSA_NULL != pEncoderContext->mHeader.pBuf, M4ERR_STATE);
+ *(M4ENCODER_Header**)optionValue = &(pEncoderContext->mHeader);
+ break;
+ default:
+ ALOGV("VideoEditorVideoEncoder_getOption: unsupported optionId 0x%X",
+ optionID);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+ break;
+ }
+
+cleanUp:
+ if ( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_getOption no error");
+ } else {
+ ALOGV("VideoEditorVideoEncoder_getOption ERROR 0x%X", err);
+ }
+ return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface(M4ENCODER_Format format,
+ M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+ M4OSA_ERR err = M4NO_ERROR;
+
+ // Input parameters check
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat, M4ERR_PARAMETER);
+ VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
+
+ ALOGV("VideoEditorVideoEncoder_getInterface begin 0x%x 0x%x %d", pFormat,
+ pEncoderInterface, mode);
+
+ SAFE_MALLOC(*pEncoderInterface, M4ENCODER_GlobalInterface, 1,
+ "VideoEditorVideoEncoder");
+
+ *pFormat = format;
+
+ switch( format ) {
+ case M4ENCODER_kH263:
+ {
+ (*pEncoderInterface)->pFctInit =
+ VideoEditorVideoEncoder_init_H263;
+ break;
+ }
+ case M4ENCODER_kMPEG4:
+ {
+ (*pEncoderInterface)->pFctInit =
+ VideoEditorVideoEncoder_init_MPEG4;
+ break;
+ }
+ case M4ENCODER_kH264:
+ {
+ (*pEncoderInterface)->pFctInit =
+ VideoEditorVideoEncoder_init_H264;
+ break;
+ }
+ default:
+ ALOGV("VideoEditorVideoEncoder_getInterface : unsupported format %d",
+ format);
+ VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+ break;
+ }
+ (*pEncoderInterface)->pFctOpen = VideoEditorVideoEncoder_open;
+ (*pEncoderInterface)->pFctStart = VideoEditorVideoEncoder_start;
+ (*pEncoderInterface)->pFctStop = VideoEditorVideoEncoder_stop;
+ (*pEncoderInterface)->pFctPause = M4OSA_NULL;
+ (*pEncoderInterface)->pFctResume = M4OSA_NULL;
+ (*pEncoderInterface)->pFctClose = VideoEditorVideoEncoder_close;
+ (*pEncoderInterface)->pFctCleanup = VideoEditorVideoEncoder_cleanup;
+ (*pEncoderInterface)->pFctRegulBitRate =
+ VideoEditorVideoEncoder_regulBitRate;
+ (*pEncoderInterface)->pFctEncode = VideoEditorVideoEncoder_encode;
+ (*pEncoderInterface)->pFctSetOption = VideoEditorVideoEncoder_setOption;
+ (*pEncoderInterface)->pFctGetOption = VideoEditorVideoEncoder_getOption;
+
+cleanUp:
+ if( M4NO_ERROR == err ) {
+ ALOGV("VideoEditorVideoEncoder_getInterface no error");
+ } else {
+ *pEncoderInterface = M4OSA_NULL;
+ ALOGV("VideoEditorVideoEncoder_getInterface ERROR 0x%X", err);
+ }
+ return err;
+}
+
+extern "C" {
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+ return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH263, pFormat,
+ pEncoderInterface, mode);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+ return VideoEditorVideoEncoder_getInterface(M4ENCODER_kMPEG4, pFormat,
+ pEncoderInterface, mode);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
+ M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+ return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH264, pFormat,
+ pEncoderInterface, mode);
+
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/libvideoeditor/vss/video_filters/Android.mk b/libvideoeditor/vss/video_filters/Android.mk
new file mode 100755
index 0000000..e2d2111
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/Android.mk
@@ -0,0 +1,5 @@
+#LOCAL_PATH:= $(call my-dir)
+#include $(CLEAR_VARS)
+
+#include $(call all-makefiles-under,$(LOCAL_PATH))
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/video_filters/src/Android.mk b/libvideoeditor/vss/video_filters/src/Android.mk
new file mode 100755
index 0000000..cc2540c
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/Android.mk
@@ -0,0 +1,57 @@
+#
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideofilters
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_videofilters
+
+LOCAL_SRC_FILES:= \
+ M4VIFI_BGR565toYUV420.c \
+ M4VIFI_ResizeRGB888toRGB888.c \
+ M4VIFI_ResizeRGB565toRGB565.c \
+ M4VIFI_Clip.c \
+ M4VIFI_ResizeYUVtoBGR565.c \
+ M4VIFI_RGB888toYUV420.c \
+ M4VIFI_RGB565toYUV420.c \
+ M4VFL_transition.c
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/libvideoeditor/osal/inc \
+ $(TOP)/frameworks/av/libvideoeditor/vss/common/inc
+
+LOCAL_SHARED_LIBRARIES += libdl
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+ -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VFL_transition.c b/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
new file mode 100755
index 0000000..6a5e0b6
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4TRAN_transition.c
+ * @brief
+ ******************************************************************************
+*/
+
+/**
+ * OSAL (memset and memcpy) ***/
+#include "M4OSA_Memory.h"
+
+#include "M4VFL_transition.h"
+
+#include <string.h>
+
+#ifdef LITTLE_ENDIAN
+#define M4VFL_SWAP_SHORT(a) a = ((a & 0xFF) << 8) | ((a & 0xFF00) >> 8)
+#else
+#define M4VFL_SWAP_SHORT(a)
+#endif
+
+#define LUM_FACTOR_MAX 10
+
+
+unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+ M4VFL_ModifLumParam *lum_param, void *user_data)
+{
+ unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
+ unsigned long pix_src;
+ unsigned long u_outpx, u_outpx2;
+ unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+ unsigned long lf1, lf2, lf3;
+ long i, j;
+
+ if (lum_param->copy_chroma != 0)
+ {
+ /* copy chroma plane */
+
+ }
+
+ /* apply luma factor */
+ u_width = plane_in[0].u_width;
+ u_height = plane_in[0].u_height;
+ u_stride = (plane_in[0].u_stride >> 1);
+ u_stride_out = (plane_out[0].u_stride >> 1);
+ p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+ p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+ p_dest_line = p_dest;
+ p_src_line = p_src;
+
+ switch(lum_param->lum_factor)
+ {
+ case 0:
+ /* very specific case : set luma plane to 16 */
+ for (j = u_height; j != 0; j--)
+ {
+ memset((void *)p_dest,16, u_width);
+ p_dest += u_stride_out;
+ }
+ return 0;
+
+ case 1:
+ /* 0.25 */
+ lf1 = 6; lf2 = 6; lf3 = 7;
+ break;
+ case 2:
+ /* 0.375 */
+ lf1 = 7; lf2 = 7; lf3 = 7;
+ break;
+ case 3:
+ /* 0.5 */
+ lf1 = 7; lf2 = 7; lf3 = 8;
+ break;
+ case 4:
+ /* 0.625 */
+ lf1 = 7; lf2 = 8; lf3 = 8;
+ break;
+ case 5:
+ /* 0.75 */
+ lf1 = 8; lf2 = 8; lf3 = 8;
+ break;
+ case 6:
+ /* 0.875 */
+ lf1 = 9; lf2 = 8; lf3 = 7;
+ break;
+ default:
+ lf1 = 8; lf2 = 8; lf3 = 9;
+ break;
+ }
+
+ for (j = u_height; j != 0; j--)
+ {
+ p_dest = p_dest_line;
+ p_src = p_src_line;
+ for (i = (u_width >> 1); i != 0; i--)
+ {
+ pix_src = (unsigned long) *p_src++;
+ pix = pix_src & 0xFF;
+ u_outpx = (((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX);
+ pix = ((pix_src & 0xFF00) >> 8);
+ u_outpx2 = ((((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX)<< 8) ;
+ *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
+ }
+ p_dest_line += u_stride_out;
+ p_src_line += u_stride;
+ }
+ return 0;
+}
+
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in,
+ M4ViComImagePlane *plane_out,
+ unsigned long lum_factor,
+ void *user_data)
+{
+ unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
+ unsigned char *p_csrc, *p_cdest, *p_csrc_line, *p_cdest_line;
+ unsigned long pix_src;
+ unsigned long u_outpx, u_outpx2;
+ unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+ long i, j;
+
+ /* copy or filter chroma */
+ u_width = plane_in[1].u_width;
+ u_height = plane_in[1].u_height;
+ u_stride = plane_in[1].u_stride;
+ u_stride_out = plane_out[1].u_stride;
+ p_cdest_line = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+ p_csrc_line = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
+
+ if (lum_factor > 256)
+ {
+ p_cdest = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+ p_csrc = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+ /* copy chroma */
+ for (j = u_height; j != 0; j--)
+ {
+ for (i = u_width; i != 0; i--)
+ {
+ memcpy((void *)p_cdest_line, (void *)p_csrc_line, u_width);
+ memcpy((void *)p_cdest,(void *) p_csrc, u_width);
+ }
+ p_cdest_line += u_stride_out;
+ p_cdest += u_stride_out;
+ p_csrc_line += u_stride;
+ p_csrc += u_stride;
+ }
+ }
+ else
+ {
+ /* filter chroma */
+ pix = (1024 - lum_factor) << 7;
+ for (j = u_height; j != 0; j--)
+ {
+ p_cdest = p_cdest_line;
+ p_csrc = p_csrc_line;
+ for (i = u_width; i != 0; i--)
+ {
+ *p_cdest++ = ((pix + (*p_csrc++ & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+ }
+ p_cdest_line += u_stride_out;
+ p_csrc_line += u_stride;
+ }
+ p_cdest_line = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+ p_csrc_line = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+ for (j = u_height; j != 0; j--)
+ {
+ p_cdest = p_cdest_line;
+ p_csrc = p_csrc_line;
+ for (i = u_width; i != 0; i--)
+ {
+ *p_cdest++ = ((pix + (*p_csrc & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+ }
+ p_cdest_line += u_stride_out;
+ p_csrc_line += u_stride;
+ }
+ }
+ /* apply luma factor */
+ u_width = plane_in[0].u_width;
+ u_height = plane_in[0].u_height;
+ u_stride = (plane_in[0].u_stride >> 1);
+ u_stride_out = (plane_out[0].u_stride >> 1);
+ p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+ p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+ p_dest_line = p_dest;
+ p_src_line = p_src;
+
+ for (j = u_height; j != 0; j--)
+ {
+ p_dest = p_dest_line;
+ p_src = p_src_line;
+ for (i = (u_width >> 1); i != 0; i--)
+ {
+ pix_src = (unsigned long) *p_src++;
+ pix = pix_src & 0xFF;
+ u_outpx = ((pix * lum_factor) >> LUM_FACTOR_MAX);
+ pix = ((pix_src & 0xFF00) >> 8);
+ u_outpx2 = (((pix * lum_factor) >> LUM_FACTOR_MAX)<< 8) ;
+ *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
+ }
+ p_dest_line += u_stride_out;
+ p_src_line += u_stride;
+ }
+
+ return 0;
+}
+
+/**
+ *************************************************************************************************
+ * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn1,
+ * M4VIFI_ImagePlane *pPlaneIn2,
+ * M4VIFI_ImagePlane *pPlaneOut,
+ * UInt32 Progress)
+ * @brief Blends two YUV 4:2:0 Planar images.
+ * @note Blends YUV420 planar images,
+ * Map the value of progress from (0 - 1000) to (0 - 1024)
+ * Set the range of blendingfactor,
+ * 1. from 0 to (Progress << 1) ;for Progress <= 512
+ * 2. from (( Progress - 512)<< 1) to 1024 ;otherwise
+ * Set the increment of blendingfactor for each element in the image row by the factor,
+ * = (Range-1) / (image width-1) ;for width >= range
+ * = (Range) / (image width) ;otherwise
+ * Loop on each(= i) row of output Y plane (steps of 2)
+ * Loop on each(= j) column of output Y plane (steps of 2)
+ * Get four Y samples and one U & V sample from two input YUV4:2:0 images and
+ * Compute four Y sample and one U & V sample for output YUV4:2:0 image
+ * using the following,
+ * Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) *In2(i,j)
+ * end loop column
+ * end loop row.
+ * @param pUserData: (IN) User Specific Parameter
+ * @param pPlaneIn1: (IN) Pointer to an array of image plane structures maintained
+ * for Y, U and V planes.
+ * @param pPlaneIn2: (IN) Pointer to an array of image plane structures maintained
+ * for Y, U and V planes.
+ * @param pPlaneOut: (OUT) Pointer to an array of image plane structures maintained
+ * for Y, U and V planes.
+ * @param Progress: (IN) Progress value (varies between 0 and 1000)
+ * @return M4VIFI_OK: No error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ *************************************************************************************************
+*/
+
+/** Check for value is EVEN */
+#ifndef IS_EVEN
+#define IS_EVEN(a) (!(a & 0x01))
+#endif
+
+/** Used for fixed point implementation */
+#ifndef MAX_SHORT
+#define MAX_SHORT 0x10000
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#define TRUE !FALSE
+#endif
+
+unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+ M4ViComImagePlane *pPlaneIn1,
+ M4ViComImagePlane *pPlaneIn2,
+ M4ViComImagePlane *pPlaneOut,
+ UInt32 Progress)
+{
+ UInt8 *pu8_data_Y_start1,*pu8_data_U_start1,*pu8_data_V_start1;
+ UInt8 *pu8_data_Y_start2,*pu8_data_U_start2,*pu8_data_V_start2;
+ UInt8 *pu8_data_Y_start3,*pu8_data_U_start3,*pu8_data_V_start3;
+ UInt8 *pu8_data_Y_current1, *pu8_data_Y_next1, *pu8_data_U1, *pu8_data_V1;
+ UInt8 *pu8_data_Y_current2, *pu8_data_Y_next2, *pu8_data_U2, *pu8_data_V2;
+ UInt8 *pu8_data_Y_current3,*pu8_data_Y_next3, *pu8_data_U3, *pu8_data_V3;
+ UInt32 u32_stride_Y1, u32_stride2_Y1, u32_stride_U1, u32_stride_V1;
+ UInt32 u32_stride_Y2, u32_stride2_Y2, u32_stride_U2, u32_stride_V2;
+ UInt32 u32_stride_Y3, u32_stride2_Y3, u32_stride_U3, u32_stride_V3;
+ UInt32 u32_height, u32_width;
+ UInt32 u32_blendfactor, u32_startA, u32_endA, u32_blend_inc, u32_x_accum;
+ UInt32 u32_col, u32_row, u32_rangeA, u32_progress;
+ UInt32 u32_U1,u32_V1,u32_U2,u32_V2, u32_Y1, u32_Y2;
+
+
+ /* Check the Y plane height is EVEN and image plane heights are same */
+ if( (IS_EVEN(pPlaneIn1[0].u_height) == FALSE) ||
+ (IS_EVEN(pPlaneIn2[0].u_height) == FALSE) ||
+ (IS_EVEN(pPlaneOut[0].u_height) == FALSE) ||
+ (pPlaneIn1[0].u_height != pPlaneOut[0].u_height) ||
+ (pPlaneIn2[0].u_height != pPlaneOut[0].u_height) )
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check the Y plane width is EVEN and image plane widths are same */
+ if( (IS_EVEN(pPlaneIn1[0].u_width) == FALSE) ||
+ (IS_EVEN(pPlaneIn2[0].u_width) == FALSE) ||
+ (IS_EVEN(pPlaneOut[0].u_width) == FALSE) ||
+ (pPlaneIn1[0].u_width != pPlaneOut[0].u_width) ||
+ (pPlaneIn2[0].u_width != pPlaneOut[0].u_width) )
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the input1 YUV420 image planes */
+ pu8_data_Y_start1 = pPlaneIn1[0].pac_data + pPlaneIn1[0].u_topleft;
+ pu8_data_U_start1 = pPlaneIn1[1].pac_data + pPlaneIn1[1].u_topleft;
+ pu8_data_V_start1 = pPlaneIn1[2].pac_data + pPlaneIn1[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input2 YUV420 image planes */
+ pu8_data_Y_start2 = pPlaneIn2[0].pac_data + pPlaneIn2[0].u_topleft;
+ pu8_data_U_start2 = pPlaneIn2[1].pac_data + pPlaneIn2[1].u_topleft;
+ pu8_data_V_start2 = pPlaneIn2[2].pac_data + pPlaneIn2[2].u_topleft;
+
+ /* Set the pointer to the beginning of the output YUV420 image planes */
+ pu8_data_Y_start3 = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_data_U_start3 = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_data_V_start3 = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the stride for the next row in each input1 YUV420 plane */
+ u32_stride_Y1 = pPlaneIn1[0].u_stride;
+ u32_stride_U1 = pPlaneIn1[1].u_stride;
+ u32_stride_V1 = pPlaneIn1[2].u_stride;
+
+ /* Set the stride for the next row in each input2 YUV420 plane */
+ u32_stride_Y2 = pPlaneIn2[0].u_stride;
+ u32_stride_U2 = pPlaneIn2[1].u_stride;
+ u32_stride_V2 = pPlaneIn2[2].u_stride;
+
+ /* Set the stride for the next row in each output YUV420 plane */
+ u32_stride_Y3 = pPlaneOut[0].u_stride;
+ u32_stride_U3 = pPlaneOut[1].u_stride;
+ u32_stride_V3 = pPlaneOut[2].u_stride;
+
+ u32_stride2_Y1 = u32_stride_Y1 << 1;
+ u32_stride2_Y2 = u32_stride_Y2 << 1;
+ u32_stride2_Y3 = u32_stride_Y3 << 1;
+
+ /* Get the size of the output image */
+ u32_height = pPlaneOut[0].u_height;
+ u32_width = pPlaneOut[0].u_width;
+
+ /* User Specified Progress value */
+ u32_progress = Progress;
+
+ /* Map Progress value from (0 - 1000) to (0 - 1024) -> for optimisation */
+ if(u32_progress < 1000)
+ u32_progress = ((u32_progress << 10) / 1000);
+ else
+ u32_progress = 1024;
+
+ /* Set the range of blendingfactor */
+ if(u32_progress <= 512)
+ {
+ u32_startA = 0;
+ u32_endA = (u32_progress << 1);
+ }
+ else /* u32_progress > 512 */
+ {
+ u32_startA = (u32_progress - 512) << 1;
+ u32_endA = 1024;
+ }
+ u32_rangeA = u32_endA - u32_startA;
+
+ /* Set the increment of blendingfactor for each element in the image row */
+ if ((u32_width >= u32_rangeA) && (u32_rangeA > 0) )
+ {
+ u32_blend_inc = ((u32_rangeA-1) * MAX_SHORT) / (u32_width - 1);
+ }
+ else /* (u32_width < u32_rangeA) || (u32_rangeA < 0) */
+ {
+ u32_blend_inc = (u32_rangeA * MAX_SHORT) / (u32_width);
+ }
+
+ /* Two YUV420 rows are computed at each pass */
+ for (u32_row = u32_height; u32_row != 0; u32_row -=2)
+ {
+ /* Set pointers to the beginning of the row for each input image1 plane */
+ pu8_data_Y_current1 = pu8_data_Y_start1;
+ pu8_data_U1 = pu8_data_U_start1;
+ pu8_data_V1 = pu8_data_V_start1;
+
+ /* Set pointers to the beginning of the row for each input image2 plane */
+ pu8_data_Y_current2 = pu8_data_Y_start2;
+ pu8_data_U2 = pu8_data_U_start2;
+ pu8_data_V2 = pu8_data_V_start2;
+
+ /* Set pointers to the beginning of the row for each output image plane */
+ pu8_data_Y_current3 = pu8_data_Y_start3;
+ pu8_data_U3 = pu8_data_U_start3;
+ pu8_data_V3 = pu8_data_V_start3;
+
+ /* Set pointers to the beginning of the next row for image luma plane */
+ pu8_data_Y_next1 = pu8_data_Y_current1 + u32_stride_Y1;
+ pu8_data_Y_next2 = pu8_data_Y_current2 + u32_stride_Y2;
+ pu8_data_Y_next3 = pu8_data_Y_current3 + u32_stride_Y3;
+
+ /* Initialise blendfactor */
+ u32_blendfactor = u32_startA;
+ /* Blendfactor Increment accumulator */
+ u32_x_accum = 0;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Update the blending factor */
+ u32_blendfactor = u32_startA + (u32_x_accum >> 16);
+
+ /* Get Luma value (x,y) of input Image1 */
+ u32_Y1 = *pu8_data_Y_current1++;
+
+ /* Get chrominance2 value */
+ u32_U1 = *pu8_data_U1++;
+ u32_V1 = *pu8_data_V1++;
+
+ /* Get Luma value (x,y) of input Image2 */
+ u32_Y2 = *pu8_data_Y_current2++;
+
+ /* Get chrominance2 value */
+ u32_U2 = *pu8_data_U2++;
+ u32_V2 = *pu8_data_V2++;
+
+ /* Compute Luma value (x,y) of Output image */
+ *pu8_data_Y_current3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+ (1024 - u32_blendfactor)*u32_Y1) >> 10);
+ /* Compute chroma(U) value of Output image */
+ *pu8_data_U3++ = (UInt8)((u32_blendfactor * u32_U2 +
+ (1024 - u32_blendfactor)*u32_U1) >> 10);
+ /* Compute chroma(V) value of Output image */
+ *pu8_data_V3++ = (UInt8)((u32_blendfactor * u32_V2 +
+ (1024 - u32_blendfactor)*u32_V1) >> 10);
+
+ /* Get Luma value (x,y+1) of input Image1 */
+ u32_Y1 = *pu8_data_Y_next1++;
+
+ /* Get Luma value (x,y+1) of input Image2 */
+ u32_Y2 = *pu8_data_Y_next2++;
+
+ /* Compute Luma value (x,y+1) of Output image*/
+ *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+ (1024 - u32_blendfactor)*u32_Y1) >> 10);
+ /* Update accumulator */
+ u32_x_accum += u32_blend_inc;
+
+ /* Update the blending factor */
+ u32_blendfactor = u32_startA + (u32_x_accum >> 16);
+
+ /* Get Luma value (x+1,y) of input Image1 */
+ u32_Y1 = *pu8_data_Y_current1++;
+
+ /* Get Luma value (x+1,y) of input Image2 */
+ u32_Y2 = *pu8_data_Y_current2++;
+
+ /* Compute Luma value (x+1,y) of Output image*/
+ *pu8_data_Y_current3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+ (1024 - u32_blendfactor)*u32_Y1) >> 10);
+
+ /* Get Luma value (x+1,y+1) of input Image1 */
+ u32_Y1 = *pu8_data_Y_next1++;
+
+ /* Get Luma value (x+1,y+1) of input Image2 */
+ u32_Y2 = *pu8_data_Y_next2++;
+
+ /* Compute Luma value (x+1,y+1) of Output image*/
+ *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+ (1024 - u32_blendfactor)*u32_Y1) >> 10);
+ /* Update accumulator */
+ u32_x_accum += u32_blend_inc;
+
+ /* Working pointers are incremented just after each storage */
+
+ }/* End of row scanning */
+
+ /* Update working pointer of input image1 for next row */
+ pu8_data_Y_start1 += u32_stride2_Y1;
+ pu8_data_U_start1 += u32_stride_U1;
+ pu8_data_V_start1 += u32_stride_V1;
+
+ /* Update working pointer of input image2 for next row */
+ pu8_data_Y_start2 += u32_stride2_Y2;
+ pu8_data_U_start2 += u32_stride_U2;
+ pu8_data_V_start2 += u32_stride_V2;
+
+ /* Update working pointer of output image for next row */
+ pu8_data_Y_start3 += u32_stride2_Y3;
+ pu8_data_U_start3 += u32_stride_U3;
+ pu8_data_V_start3 += u32_stride_V3;
+
+ }/* End of column scanning */
+
+ return M4VIFI_OK;
+}
+/* End of file M4VIFI_ImageBlendingonYUV420.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
new file mode 100755
index 0000000..c608767
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ****************************************************************************************
+ * @file M4VIFI_BGR565toYUV420.c
+ * @brief Contain video library function
+ * @note Color Conversion Filter
+ * -# Contains the format conversion filters from BGR565 to YUV420
+ ****************************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ *****************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_BGR565toYUV420 (void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Transform BGR565 image to a YUV420 image.
+ * @note Convert BGR565 to YUV420,
+ * Loop on each row ( 2 rows by 2 rows )
+ * Loop on each column ( 2 col by 2 col )
+ * Get 4 BGR samples from input data and build 4 output Y samples
+ * and each single U & V data
+ * end loop on col
+ * end loop on row
+ * @param pUserData: (IN) User Specific Data
+ * @param pPlaneIn: (IN) Pointer to BGR565 Plane
+ * @param pPlaneOut: (OUT) Pointer to YUV420 buffer Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ *****************************************************************************************
+*/
+
+M4VIFI_UInt8 M4VIFI_BGR565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+ M4VIFI_UInt32 u32_stride_bgr, u32_stride_2bgr;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_bgrn_data, *pu8_bgrn;
+ M4VIFI_UInt16 u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+
+ /* Check planes height are appropriate */
+ if( (pPlaneIn->u_height != pPlaneOut[0].u_height) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1)) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check planes width are appropriate */
+ if( (pPlaneIn->u_width != pPlaneOut[0].u_width) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the output data buffers */
+ pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input data buffers */
+ pu8_bgrn_data = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+ /* Get the size of the output image */
+ u32_width = pPlaneOut[0].u_width;
+ u32_height = pPlaneOut[0].u_height;
+
+ /* Set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = pPlaneOut[0].u_stride;
+ u32_stride2_Y = u32_stride_Y << 1;
+ u32_stride_U = pPlaneOut[1].u_stride;
+ u32_stride_V = pPlaneOut[2].u_stride;
+
+ /* Set the size of the memory jumps corresponding to row jump in input plane */
+ u32_stride_bgr = pPlaneIn->u_stride;
+ u32_stride_2bgr = u32_stride_bgr << 1;
+
+ /* Loop on each row of the output image, input coordinates are estimated from output ones */
+ /* Two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* Current Y plane row pointers */
+ pu8_yn = pu8_y_data;
+ /* Next Y plane row pointers */
+ pu8_ys = pu8_yn + u32_stride_Y;
+ /* Current U plane row pointer */
+ pu8_u = pu8_u_data;
+ /* Current V plane row pointer */
+ pu8_v = pu8_v_data;
+
+ pu8_bgrn = pu8_bgrn_data;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Get four BGR 565 samples from input data */
+ u16_pix1 = *( (M4VIFI_UInt16 *) pu8_bgrn);
+ u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_bgrn + CST_RGB_16_SIZE));
+ u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr));
+ u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr + CST_RGB_16_SIZE));
+ /* Unpack RGB565 to 8bit R, G, B */
+ /* (x,y) */
+ GET_BGR565(i32_b00, i32_g00, i32_r00, u16_pix1);
+ /* (x+1,y) */
+ GET_BGR565(i32_b10, i32_g10, i32_r10, u16_pix2);
+ /* (x,y+1) */
+ GET_BGR565(i32_b01, i32_g01, i32_r01, u16_pix3);
+ /* (x+1,y+1) */
+ GET_BGR565(i32_b11, i32_g11, i32_r11, u16_pix4);
+
+ /* Convert BGR value to YUV */
+ i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+ /* luminance value */
+ i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+ i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+ /* luminance value */
+ i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+ i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+ /* luminance value */
+ i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+ i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+ /* luminance value */
+ i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+ /* Store luminance data */
+ pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+ pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+ pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+ /* Store chroma data */
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+ /* Prepare for next column */
+ pu8_bgrn += (CST_RGB_16_SIZE<<1);
+ /* Update current Y plane line pointer*/
+ pu8_yn += 2;
+ /* Update next Y plane line pointer*/
+ pu8_ys += 2;
+ /* Update U plane line pointer*/
+ pu8_u ++;
+ /* Update V plane line pointer*/
+ pu8_v ++;
+ } /* End of horizontal scanning */
+
+ /* Prepare pointers for the next row */
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_bgrn_data += u32_stride_2bgr;
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+/* End of file M4VIFI_BGR565toYUV420.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c b/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
new file mode 100755
index 0000000..e4290b1
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_Clip.c
+ * @brief Management of the RGB Clipping matrix inclusion and Division Table
+ * @note -# Clipping Matrix is used in order to properly manage the inclusion of
+ * the external RGB Clipping matrix used for color conversion.
+ * This file HAS TO BE compiled with all color conversion filters project
+ * -# Division table is used in RGB to HLS color conversion
+ * Important: This file must be compiled during the assembly library building
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+
+CNST M4VIFI_UInt8 M4VIFI_ClipTable[1256]
+= {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
+0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
+0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
+0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
+0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
+0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
+0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
+0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
+0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
+0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
+0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
+0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
+0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
+0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
+0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
+0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
+0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
+0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
+0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
+0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
+0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
+0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
+0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
+0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
+0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+/* Division table for ( 65535/x ); x = 0 to 512 */
+CNST M4VIFI_UInt16 M4VIFI_DivTable[512]
+= {
+0, 65535, 32768, 21845, 16384, 13107, 10922, 9362,
+8192, 7281, 6553, 5957, 5461, 5041, 4681, 4369,
+4096, 3855, 3640, 3449, 3276, 3120, 2978, 2849,
+2730, 2621, 2520, 2427, 2340, 2259, 2184, 2114,
+2048, 1985, 1927, 1872, 1820, 1771, 1724, 1680,
+1638, 1598, 1560, 1524, 1489, 1456, 1424, 1394,
+1365, 1337, 1310, 1285, 1260, 1236, 1213, 1191,
+1170, 1149, 1129, 1110, 1092, 1074, 1057, 1040,
+1024, 1008, 992, 978, 963, 949, 936, 923,
+910, 897, 885, 873, 862, 851, 840, 829,
+819, 809, 799, 789, 780, 771, 762, 753,
+744, 736, 728, 720, 712, 704, 697, 689,
+682, 675, 668, 661, 655, 648, 642, 636,
+630, 624, 618, 612, 606, 601, 595, 590,
+585, 579, 574, 569, 564, 560, 555, 550,
+546, 541, 537, 532, 528, 524, 520, 516,
+512, 508, 504, 500, 496, 492, 489, 485,
+481, 478, 474, 471, 468, 464, 461, 458,
+455, 451, 448, 445, 442, 439, 436, 434,
+431, 428, 425, 422, 420, 417, 414, 412,
+409, 407, 404, 402, 399, 397, 394, 392,
+390, 387, 385, 383, 381, 378, 376, 374,
+372, 370, 368, 366, 364, 362, 360, 358,
+356, 354, 352, 350, 348, 346, 344, 343,
+341, 339, 337, 336, 334, 332, 330, 329,
+327, 326, 324, 322, 321, 319, 318, 316,
+315, 313, 312, 310, 309, 307, 306, 304,
+303, 302, 300, 299, 297, 296, 295, 293,
+292, 291, 289, 288, 287, 286, 284, 283,
+282, 281, 280, 278, 277, 276, 275, 274,
+273, 271, 270, 269, 268, 267, 266, 265,
+264, 263, 262, 261, 260, 259, 258, 257,
+256, 255, 254, 253, 252, 251, 250, 249,
+248, 247, 246, 245, 244, 243, 242, 241,
+240, 240, 239, 238, 237, 236, 235, 234,
+234, 233, 232, 231, 230, 229, 229, 228,
+227, 226, 225, 225, 224, 223, 222, 222,
+221, 220, 219, 219, 218, 217, 217, 216,
+215, 214, 214, 213, 212, 212, 211, 210,
+210, 209, 208, 208, 207, 206, 206, 205,
+204, 204, 203, 202, 202, 201, 201, 200,
+199, 199, 198, 197, 197, 196, 196, 195,
+195, 194, 193, 193, 192, 192, 191, 191,
+190, 189, 189, 188, 188, 187, 187, 186,
+186, 185, 185, 184, 184, 183, 183, 182,
+182, 181, 181, 180, 180, 179, 179, 178,
+178, 177, 177, 176, 176, 175, 175, 174,
+174, 173, 173, 172, 172, 172, 171, 171,
+170, 170, 169, 169, 168, 168, 168, 167,
+167, 166, 166, 165, 165, 165, 164, 164,
+163, 163, 163, 162, 162, 161, 161, 161,
+160, 160, 159, 159, 159, 158, 158, 157,
+157, 157, 156, 156, 156, 155, 155, 154,
+154, 154, 153, 153, 153, 152, 152, 152,
+151, 151, 151, 150, 150, 149, 149, 149,
+148, 148, 148, 147, 147, 147, 146, 146,
+146, 145, 145, 145, 144, 144, 144, 144,
+143, 143, 143, 142, 142, 142, 141, 141,
+141, 140, 140, 140, 140, 139, 139, 139,
+138, 138, 138, 137, 137, 137, 137, 136,
+136, 136, 135, 135, 135, 135, 134, 134,
+134, 134, 133, 133, 133, 132, 132, 132,
+132, 131, 131, 131, 131, 130, 130, 130,
+130, 129, 129, 129, 129, 128, 128, 128
+};
+
+CNST M4VIFI_Int32 const_storage1[8]
+= {
+0x00002568, 0x00003343,0x00000649,0x00000d0f, 0x0000D86C, 0x0000D83B, 0x00010000, 0x00010000
+};
+
+CNST M4VIFI_Int32 const_storage[8]
+= {
+0x00002568, 0x00003343, 0x1BF800, 0x00000649, 0x00000d0f, 0x110180, 0x40cf, 0x22BE00
+};
+
+
+CNST M4VIFI_UInt16 *M4VIFI_DivTable_zero
+ = &M4VIFI_DivTable[0];
+
+CNST M4VIFI_UInt8 *M4VIFI_ClipTable_zero
+ = &M4VIFI_ClipTable[500];
+
+
+/* End of file M4VIFI_Clip.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
new file mode 100755
index 0000000..34cbd57
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB565toYUV420.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @brief Contain video library function
+ * @note Color Conversion Filter
+ * Contains the format conversion filters from RGB565 to YUV420
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief transform RGB565 image to a YUV420 image.
+ * @note Convert RGB565 to YUV420,
+ * Loop on each row ( 2 rows by 2 rows )
+ * Loop on each column ( 2 col by 2 col )
+ * Get 4 RGB samples from input data and build 4 output Y samples
+ * and each single U & V data
+ * end loop on col
+ * end loop on row
+ * @param pUserData: (IN) User Specific Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param pPlaneOut: (OUT) Pointer to YUV420 buffer Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+ M4VIFI_UInt32 u32_stride_rgb, u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+ M4VIFI_UInt16 u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+
+ /* Check planes height are appropriate */
+ if ((pPlaneIn->u_height != pPlaneOut[0].u_height) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1)) ||
+ (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ /* Check planes width are appropriate */
+ if ((pPlaneIn->u_width != pPlaneOut[0].u_width) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+ (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the pointer to the beginning of the output data buffers */
+ pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+ pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+ pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+ /* Set the pointer to the beginning of the input data buffers */
+ pu8_rgbn_data = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+ /* Get the size of the output image */
+ u32_width = pPlaneOut[0].u_width;
+ u32_height = pPlaneOut[0].u_height;
+
+ /* Set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = pPlaneOut[0].u_stride;
+ u32_stride2_Y = u32_stride_Y << 1;
+ u32_stride_U = pPlaneOut[1].u_stride;
+ u32_stride_V = pPlaneOut[2].u_stride;
+
+ /* Set the size of the memory jumps corresponding to row jump in input plane */
+ u32_stride_rgb = pPlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+ /* Loop on each row of the output image, input coordinates are estimated from output ones */
+ /* Two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* Current Y plane row pointers */
+ pu8_yn = pu8_y_data;
+ /* Next Y plane row pointers */
+ pu8_ys = pu8_yn + u32_stride_Y;
+ /* Current U plane row pointer */
+ pu8_u = pu8_u_data;
+ /* Current V plane row pointer */
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn = pu8_rgbn_data;
+
+ /* Loop on each column of the output image */
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* Get four RGB 565 samples from input data */
+ u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+ u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+ u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+ u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+ /* Unpack RGB565 to 8bit R, G, B */
+ /* (x,y) */
+ GET_RGB565(i32_r00,i32_g00,i32_b00,u16_pix1);
+ /* (x+1,y) */
+ GET_RGB565(i32_r10,i32_g10,i32_b10,u16_pix2);
+ /* (x,y+1) */
+ GET_RGB565(i32_r01,i32_g01,i32_b01,u16_pix3);
+ /* (x+1,y+1) */
+ GET_RGB565(i32_r11,i32_g11,i32_b11,u16_pix4);
+
+ /* Convert RGB value to YUV */
+ i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+ /* luminance value */
+ i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+ i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+ /* luminance value */
+ i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+ i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+ /* luminance value */
+ i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+ i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+ /* luminance value */
+ i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+ /* Store luminance data */
+ pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+ pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+ pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+ /* Store chroma data */
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+ /* Prepare for next column */
+ pu8_rgbn += (CST_RGB_16_SIZE<<1);
+ /* Update current Y plane line pointer*/
+ pu8_yn += 2;
+ /* Update next Y plane line pointer*/
+ pu8_ys += 2;
+ /* Update U plane line pointer*/
+ pu8_u ++;
+ /* Update V plane line pointer*/
+ pu8_v ++;
+ } /* End of horizontal scanning */
+
+ /* Prepare pointers for the next row */
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
+
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
new file mode 100755
index 0000000..285a2a6
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "M4VIFI_FiltersAPI.h"
+
+#include "M4VIFI_Defines.h"
+
+#include "M4VIFI_Clip.h"
+
+/***************************************************************************
+Proto:
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
+ M4VIFI_ImagePlane PlaneOut[3]);
+Purpose: filling of the YUV420 plane from a BGR24 plane
+Abstract: Loop on each row ( 2 rows by 2 rows )
+ Loop on each column ( 2 col by 2 col )
+ Get 4 BGR samples from input data and build 4 output Y samples and
+ each single U & V data
+ end loop on col
+ end loop on row
+
+In: RGB24 plane
+InOut: none
+Out: array of 3 M4VIFI_ImagePlane structures
+Modified: ML: RGB function modified to BGR.
+***************************************************************************/
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
+ M4VIFI_ImagePlane PlaneOut[3])
+{
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V, u32_stride_rgb,\
+ u32_stride_2rgb;
+ M4VIFI_UInt32 u32_col, u32_row;
+
+ M4VIFI_Int32 i32_r00, i32_r01, i32_r10, i32_r11;
+ M4VIFI_Int32 i32_g00, i32_g01, i32_g10, i32_g11;
+ M4VIFI_Int32 i32_b00, i32_b01, i32_b10, i32_b11;
+ M4VIFI_Int32 i32_y00, i32_y01, i32_y10, i32_y11;
+ M4VIFI_Int32 i32_u00, i32_u01, i32_u10, i32_u11;
+ M4VIFI_Int32 i32_v00, i32_v01, i32_v10, i32_v11;
+ M4VIFI_UInt8 *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+ M4VIFI_UInt8 *pu8_y_data, *pu8_u_data, *pu8_v_data;
+ M4VIFI_UInt8 *pu8_rgbn_data, *pu8_rgbn;
+
+ /* check sizes */
+ if( (PlaneIn->u_height != PlaneOut[0].u_height) ||
+ (PlaneOut[0].u_height != (PlaneOut[1].u_height<<1)) ||
+ (PlaneOut[0].u_height != (PlaneOut[2].u_height<<1)))
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+
+ if( (PlaneIn->u_width != PlaneOut[0].u_width) ||
+ (PlaneOut[0].u_width != (PlaneOut[1].u_width<<1)) ||
+ (PlaneOut[0].u_width != (PlaneOut[2].u_width<<1)))
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+
+ /* set the pointer to the beginning of the output data buffers */
+ pu8_y_data = PlaneOut[0].pac_data + PlaneOut[0].u_topleft;
+ pu8_u_data = PlaneOut[1].pac_data + PlaneOut[1].u_topleft;
+ pu8_v_data = PlaneOut[2].pac_data + PlaneOut[2].u_topleft;
+
+ /* idem for input buffer */
+ pu8_rgbn_data = PlaneIn->pac_data + PlaneIn->u_topleft;
+
+ /* get the size of the output image */
+ u32_width = PlaneOut[0].u_width;
+ u32_height = PlaneOut[0].u_height;
+
+ /* set the size of the memory jumps corresponding to row jump in each output plane */
+ u32_stride_Y = PlaneOut[0].u_stride;
+ u32_stride2_Y= u32_stride_Y << 1;
+ u32_stride_U = PlaneOut[1].u_stride;
+ u32_stride_V = PlaneOut[2].u_stride;
+
+ /* idem for input plane */
+ u32_stride_rgb = PlaneIn->u_stride;
+ u32_stride_2rgb = u32_stride_rgb << 1;
+
+ /* loop on each row of the output image, input coordinates are estimated from output ones */
+ /* two YUV rows are computed at each pass */
+ for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+ {
+ /* update working pointers */
+ pu8_yn = pu8_y_data;
+ pu8_ys = pu8_yn + u32_stride_Y;
+
+ pu8_u = pu8_u_data;
+ pu8_v = pu8_v_data;
+
+ pu8_rgbn= pu8_rgbn_data;
+
+ /* loop on each column of the output image*/
+ for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+ {
+ /* get RGB samples of 4 pixels */
+ GET_RGB24(i32_r00, i32_g00, i32_b00, pu8_rgbn, 0);
+ GET_RGB24(i32_r10, i32_g10, i32_b10, pu8_rgbn, CST_RGB_24_SIZE);
+ GET_RGB24(i32_r01, i32_g01, i32_b01, pu8_rgbn, u32_stride_rgb);
+ GET_RGB24(i32_r11, i32_g11, i32_b11, pu8_rgbn, u32_stride_rgb + CST_RGB_24_SIZE);
+
+ i32_u00 = U24(i32_r00, i32_g00, i32_b00);
+ i32_v00 = V24(i32_r00, i32_g00, i32_b00);
+ i32_y00 = Y24(i32_r00, i32_g00, i32_b00); /* matrix luminance */
+ pu8_yn[0]= (M4VIFI_UInt8)i32_y00;
+
+ i32_u10 = U24(i32_r10, i32_g10, i32_b10);
+ i32_v10 = V24(i32_r10, i32_g10, i32_b10);
+ i32_y10 = Y24(i32_r10, i32_g10, i32_b10);
+ pu8_yn[1]= (M4VIFI_UInt8)i32_y10;
+
+ i32_u01 = U24(i32_r01, i32_g01, i32_b01);
+ i32_v01 = V24(i32_r01, i32_g01, i32_b01);
+ i32_y01 = Y24(i32_r01, i32_g01, i32_b01);
+ pu8_ys[0]= (M4VIFI_UInt8)i32_y01;
+
+ i32_u11 = U24(i32_r11, i32_g11, i32_b11);
+ i32_v11 = V24(i32_r11, i32_g11, i32_b11);
+ i32_y11 = Y24(i32_r11, i32_g11, i32_b11);
+ pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+ *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+ *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+ pu8_rgbn += (CST_RGB_24_SIZE<<1);
+ pu8_yn += 2;
+ pu8_ys += 2;
+
+ pu8_u ++;
+ pu8_v ++;
+ } /* end of horizontal scanning */
+
+ pu8_y_data += u32_stride2_Y;
+ pu8_u_data += u32_stride_U;
+ pu8_v_data += u32_stride_V;
+ pu8_rgbn_data += u32_stride_2rgb;
+
+
+ } /* End of vertical scanning */
+
+ return M4VIFI_OK;
+}
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
new file mode 100755
index 0000000..617e4ed
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB565toRGB565.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file M4VIFI_ResizeRGB565toRGB565.c
+ * @brief Contain video library function
+ * @note This file has a Resize filter function
+ * Generic resizing of RGB565 (Planar) image
+ ******************************************************************************
+*/
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ ***********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Resizes RGB565 Planar plane.
+ * @param pUserData: (IN) User Data
+ * @param pPlaneIn: (IN) Pointer to RGB565 (Planar) plane buffer
+ * @param pPlaneOut: (OUT) Pointer to RGB565 (Planar) plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ ***********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt16 *pu16_data_in;
+ M4VIFI_UInt16 *pu16_data_out;
+ M4VIFI_UInt32 u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4VIFI_UInt32 u32_stride_in, u32_stride_out;
+ M4VIFI_UInt32 u32_x_inc, u32_y_inc;
+ M4VIFI_UInt32 u32_x_accum, u32_y_accum, u32_x_accum_start;
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_y_frac;
+ M4VIFI_UInt32 u32_x_frac;
+ M4VIFI_UInt32 u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
+ M4VIFI_UInt16 *pu16_src_top;
+ M4VIFI_UInt16 *pu16_src_bottom;
+ M4VIFI_UInt32 i32_b00, i32_g00, i32_r00;
+ M4VIFI_UInt32 i32_b01, i32_g01, i32_r01;
+ M4VIFI_UInt32 i32_b02, i32_g02, i32_r02;
+ M4VIFI_UInt32 i32_b03, i32_g03, i32_r03;
+ M4VIFI_UInt8 count_trans=0;
+
+ /* Check for the RGB width and height are even */
+ if ((IS_EVEN(pPlaneIn->u_height) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_height) == FALSE)) {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ if ((IS_EVEN(pPlaneIn->u_width) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_width) == FALSE)) {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu16_data_in = (M4VIFI_UInt16*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
+ pu16_data_out = (M4VIFI_UInt16*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in = pPlaneIn->u_stride;
+ u32_stride_out = pPlaneOut->u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in = pPlaneIn->u_width;
+ u32_height_in = pPlaneIn->u_height;
+
+ u32_width_out = pPlaneOut->u_width;
+ u32_height_out = pPlaneOut->u_height;
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in) {
+ u32_x_inc = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
+ } else {
+ u32_x_inc = (u32_width_in * MAX_SHORT) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in) {
+ u32_y_inc = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
+ } else {
+ u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_y_inc >= MAX_SHORT) {
+ /*
+ Keep the fractional part, integer part is coded
+ on the 16 high bits and the fractionnal on the 15 low bits
+ */
+ u32_y_accum = u32_y_inc & 0xffff;
+
+ if (!u32_y_accum)
+ {
+ u32_y_accum = MAX_SHORT;
+ }
+
+ u32_y_accum >>= 1;
+ } else {
+ u32_y_accum = 0;
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_x_inc >= MAX_SHORT) {
+ u32_x_accum_start = u32_x_inc & 0xffff;
+
+ if (!u32_x_accum_start) {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ } else {
+ u32_x_accum_start = 0;
+ }
+
+ u32_height = u32_height_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and then uses that
+ result in a linear interpolation donw each column. Each estimated pixel in the
+ output image is a weighted combination of its four neighbours according to the formula:
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+ with R(x) = / x+1 -1 =< x =< 0 \ 1-x 0 =< x =< 1 and a (resp. b)weighting coefficient
+ is the distance from the nearest neighbor in the p (resp. q) direction
+ */
+
+ do { /* Scan all the row */
+
+ /* Vertical weight factor */
+ u32_y_frac = (u32_y_accum>>12)&15;
+
+ /* Reinit accumulator */
+ u32_x_accum = u32_x_accum_start;
+
+ u32_width = u32_width_out;
+
+ do { /* Scan along each row */
+ pu16_src_top = pu16_data_in + (u32_x_accum >> 16);
+ pu16_src_bottom = pu16_src_top + (u32_stride_in>>1);
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
+
+ /* Weighted combination */
+ if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
+ GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
+ GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_top[1]);
+ } else {
+ GET_RGB565(i32_b00,i32_g00,i32_r00,(M4VIFI_UInt16)pu16_src_top[0]);
+ GET_RGB565(i32_b01,i32_g01,i32_r01,(M4VIFI_UInt16)pu16_src_top[1]);
+ GET_RGB565(i32_b02,i32_g02,i32_r02,(M4VIFI_UInt16)pu16_src_bottom[0]);
+ GET_RGB565(i32_b03,i32_g03,i32_r03,(M4VIFI_UInt16)pu16_src_bottom[1]);
+
+ }
+
+ /* Solution to avoid green effects due to transparency */
+ count_trans = 0;
+
+ /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+ if (i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+ {
+ i32_b00 = 31;
+ i32_r00 = 31;
+ count_trans++;
+ }
+ if (i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+ {
+ i32_b01 = 31;
+ i32_r01 = 31;
+ count_trans++;
+ }
+ if (i32_b02 == 0 && i32_g02 == 63 && i32_r02 == 0)
+ {
+ i32_b02 = 31;
+ i32_r02 = 31;
+ count_trans++;
+ }
+ if (i32_b03 == 0 && i32_g03 == 63 && i32_r03 == 0)
+ {
+ i32_b03 = 31;
+ i32_r03 = 31;
+ count_trans++;
+ }
+
+ if (count_trans > 2) {
+ /* pixel is transparent */
+ u32_Rtemp_value = 0;
+ u32_Gtemp_value = 63;
+ u32_Btemp_value = 0;
+ } else {
+ u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
+ i32_r01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_r02*(16-u32_x_frac) +
+ i32_r03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
+ i32_g01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_g02*(16-u32_x_frac) +
+ i32_g03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Btemp_value = (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
+ i32_b01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_b02*(16-u32_x_frac) +
+ i32_b03*u32_x_frac)*u32_y_frac )>>8);
+ }
+
+ *pu16_data_out++ = (M4VIFI_UInt16)( (((u32_Gtemp_value & 0x38) >> 3) | (u32_Btemp_value << 3)) |\
+ ( (((u32_Gtemp_value & 0x7) << 5 ) | u32_Rtemp_value)<<8 ));
+
+ /* Update horizontal accumulator */
+ u32_x_accum += u32_x_inc;
+
+ } while(--u32_width);
+
+
+ /* Update vertical accumulator */
+ u32_y_accum += u32_y_inc;
+ if (u32_y_accum>>16) {
+ pu16_data_in = pu16_data_in + (u32_y_accum >> 16) * (u32_stride_in>>1);
+ u32_y_accum &= 0xffff;
+ }
+
+ } while(--u32_height);
+
+ return M4VIFI_OK;
+}
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
new file mode 100755
index 0000000..deb9d44
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_ResizeYUV420toYUV420.c
+ * @brief Contain video library function
+ * @note This file has a Resize filter function
+ * -# Generic resizing of YUV420 (Planar) image
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ ***********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Resizes YUV420 Planar plane.
+ * @note Basic structure of the function
+ * Loop on each row (step 2)
+ * Loop on each column (step 2)
+ * Get four Y samples and 1 U & V sample
+ * Resize the Y with corresponing U and V samples
+ * Place the YUV in the ouput plane
+ * end loop column
+ * end loop row
+ * For resizing bilinear interpolation linearly interpolates along
+ * each row, and then uses that result in a linear interpolation down each column.
+ * Each estimated pixel in the output image is a weighted
+ * combination of its four neighbours. The ratio of compression
+ * or dilatation is estimated using input and output sizes.
+ * @param pUserData: (IN) User Data
+ * @param pPlaneIn: (IN) Pointer to YUV420 (Planar) plane buffer
+ * @param pPlaneOut: (OUT) Pointer to YUV420 (Planar) plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: Error in width
+ ***********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt8 *pu8_data_in;
+ M4VIFI_UInt8 *pu8_data_out;
+ M4VIFI_UInt32 u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+ M4VIFI_UInt32 u32_stride_in, u32_stride_out;
+ M4VIFI_UInt32 u32_x_inc, u32_y_inc;
+ M4VIFI_UInt32 u32_x_accum, u32_y_accum, u32_x_accum_start;
+ M4VIFI_UInt32 u32_width, u32_height;
+ M4VIFI_UInt32 u32_y_frac;
+ M4VIFI_UInt32 u32_x_frac;
+ M4VIFI_UInt32 u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
+ M4VIFI_UInt8 *pu8_src_top;
+ M4VIFI_UInt8 *pu8_src_bottom;
+ M4VIFI_UInt32 i32_b00, i32_g00, i32_r00;
+ M4VIFI_UInt32 i32_b01, i32_g01, i32_r01;
+ M4VIFI_UInt32 i32_b02, i32_g02, i32_r02;
+ M4VIFI_UInt32 i32_b03, i32_g03, i32_r03;
+
+ /* Check for the YUV width and height are even */
+ if ((IS_EVEN(pPlaneIn->u_height) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_height) == FALSE))
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+
+ if ((IS_EVEN(pPlaneIn->u_width) == FALSE) ||
+ (IS_EVEN(pPlaneOut->u_width) == FALSE))
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+ }
+
+
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu8_data_in = (M4VIFI_UInt8*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
+ pu8_data_out = (M4VIFI_UInt8*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in = pPlaneIn->u_stride;
+ u32_stride_out = pPlaneOut->u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in = pPlaneIn->u_width;
+ u32_height_in = pPlaneIn->u_height;
+
+ u32_width_out = pPlaneOut->u_width;
+ u32_height_out = pPlaneOut->u_height;
+
+ /* Compute horizontal ratio between src and destination width.*/
+ if (u32_width_out >= u32_width_in)
+ {
+ u32_x_inc = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
+ }
+ else
+ {
+ u32_x_inc = (u32_width_in * MAX_SHORT) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height.*/
+ if (u32_height_out >= u32_height_in)
+ {
+ u32_y_inc = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
+ }
+ else
+ {
+ u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
+ }
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_y_inc >= MAX_SHORT)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded
+ on the 16 high bits and the fractionnal on the 15 low bits
+ */
+ u32_y_accum = u32_y_inc & 0xffff;
+
+ if (!u32_y_accum)
+ {
+ u32_y_accum = MAX_SHORT;
+ }
+
+ u32_y_accum >>= 1;
+ }
+ else
+ {
+ u32_y_accum = 0;
+ }
+
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_x_inc >= MAX_SHORT)
+ {
+ u32_x_accum_start = u32_x_inc & 0xffff;
+
+ if (!u32_x_accum_start)
+ {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ }
+ else
+ {
+ u32_x_accum_start = 0;
+ }
+
+ u32_height = u32_height_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and then uses that
+ result in a linear interpolation donw each column. Each estimated pixel in the
+ output image is a weighted combination of its four neighbours according to the formula:
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+ with R(x) = / x+1 -1 =< x =< 0 \ 1-x 0 =< x =< 1 and a (resp. b)weighting coefficient
+ is the distance from the nearest neighbor in the p (resp. q) direction
+ */
+
+ do { /* Scan all the row */
+
+ /* Vertical weight factor */
+ u32_y_frac = (u32_y_accum>>12)&15;
+
+ /* Reinit accumulator */
+ u32_x_accum = u32_x_accum_start;
+
+ u32_width = u32_width_out;
+
+ do { /* Scan along each row */
+ pu8_src_top = pu8_data_in + (u32_x_accum >> 16)*3;
+ pu8_src_bottom = pu8_src_top + (u32_stride_in);
+ u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
+
+ if ((u32_width == 1) && (u32_width_in == u32_width_out)) {
+ /*
+ When input height is equal to output height and input width
+ equal to output width, replicate the corner pixels for
+ interpolation
+ */
+ if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
+ GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
+ GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,0);
+ GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_top,0);
+ GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_top,0);
+ }
+ /*
+ When input height is not equal to output height and
+ input width equal to output width, replicate the
+ column for interpolation
+ */
+ else {
+ GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
+ GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,0);
+ GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_bottom,0);
+ GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_bottom,0);
+ }
+ } else {
+ /*
+ When input height is equal to output height and
+ input width not equal to output width, replicate the
+ row for interpolation
+ */
+ if ((u32_height == 1) && (u32_height_in == u32_height_out)) {
+ GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
+ GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,3);
+ GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_top,0);
+ GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_top,3);
+ } else {
+ GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
+ GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,3);
+ GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_bottom,0);
+ GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_bottom,3);
+ }
+ }
+ u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
+ i32_r01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_r02*(16-u32_x_frac) +
+ i32_r03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
+ i32_g01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_g02*(16-u32_x_frac) +
+ i32_g03*u32_x_frac)*u32_y_frac )>>8);
+
+ u32_Btemp_value = (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
+ i32_b01*u32_x_frac)*(16-u32_y_frac) +
+ (i32_b02*(16-u32_x_frac) +
+ i32_b03*u32_x_frac)*u32_y_frac )>>8);
+
+ *pu8_data_out++ = u32_Btemp_value ;
+ *pu8_data_out++ = u32_Gtemp_value ;
+ *pu8_data_out++ = u32_Rtemp_value ;
+
+ /* Update horizontal accumulator */
+ u32_x_accum += u32_x_inc;
+
+ } while(--u32_width);
+
+ //pu16_data_out = pu16_data_out + (u32_stride_out>>1) - (u32_width_out);
+
+ /* Update vertical accumulator */
+ u32_y_accum += u32_y_inc;
+ if (u32_y_accum>>16)
+ {
+ pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * (u32_stride_in) ;
+ u32_y_accum &= 0xffff;
+ }
+ } while(--u32_height);
+
+ return M4VIFI_OK;
+}
+/* End of file M4VIFI_ResizeRGB565toRGB565.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
new file mode 100755
index 0000000..0042e80
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_ResizeYUV420toBGR565.c
+ * @brief Contain video library function
+ * @note This file has a Combo filter function
+ * -# Resizes YUV420 and converts to RGR565 with rotation
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ *********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pContext, M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Resize YUV420 plane and converts to BGR565 with +90 rotation.
+ * @note Basic sturture of the function
+ * Loop on each row (step 2)
+ * Loop on each column (step 2)
+ * Get four Y samples and 1 u & V sample
+ * Resize the Y with corresponing U and V samples
+ * Compute the four corresponding R G B values
+ * Place the R G B in the ouput plane in rotated fashion
+ * end loop column
+ * end loop row
+ * For resizing bilinear interpolation linearly interpolates along
+ * each row, and then uses that result in a linear interpolation down each column.
+ * Each estimated pixel in the output image is a weighted
+ * combination of its four neighbours. The ratio of compression
+ * or dilatation is estimated using input and output sizes.
+ * @param pPlaneIn: (IN) Pointer to YUV plane buffer
+ * @param pContext: (IN) Context Pointer
+ * @param pPlaneOut: (OUT) Pointer to BGR565 Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ *********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void* pContext,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt8 *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
+ M4VIFI_UInt32 *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
+
+ M4VIFI_UInt32 u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
+ M4VIFI_UInt32 u32_stride_in[PLANES];
+ M4VIFI_UInt32 u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
+ M4VIFI_UInt32 u32_x_inc[PLANES], u32_y_inc[PLANES];
+ M4VIFI_UInt32 u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
+ M4VIFI_UInt32 u32_y_accum_Y, u32_y_accum_U;
+ M4VIFI_UInt32 u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
+ M4VIFI_Int32 U_32, V_32, Y_32, Yval_32;
+ M4VIFI_UInt8 u8_Red, u8_Green, u8_Blue;
+ M4VIFI_UInt32 u32_row, u32_col;
+
+ M4VIFI_UInt32 u32_plane;
+ M4VIFI_UInt32 u32_rgb_temp1, u32_rgb_temp2;
+ M4VIFI_UInt32 u32_rgb_temp3,u32_rgb_temp4;
+ M4VIFI_UInt32 u32_check_size;
+
+ M4VIFI_UInt8 *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
+ M4VIFI_UInt8 *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
+
+ /* Check for the YUV width and height are even */
+ u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
+ if( u32_check_size == FALSE )
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+ u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
+ if (u32_check_size == FALSE )
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+ }
+ /* Make the ouput width and height as even */
+ pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
+ pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
+ pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
+
+ /* Assignment of output pointer */
+ pu8_data_out = pPlaneOut->pac_data + pPlaneOut->u_topleft;
+ /* Assignment of output width(rotated) */
+ u32_width_out = pPlaneOut->u_width;
+ /* Assignment of output height(rotated) */
+ u32_height_out = pPlaneOut->u_height;
+
+ u32_width2_RGB = pPlaneOut->u_width >> 1;
+ u32_height2_RGB = pPlaneOut->u_height >> 1;
+
+ u32_stride_out = pPlaneOut->u_stride >> 1;
+ u32_stride2_out = pPlaneOut->u_stride >> 2;
+
+ for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
+ {
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
+ u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
+ }
+ /* Compute horizontal ratio between src and destination width for Y Plane. */
+ if (u32_width_out >= u32_width_in[YPlane])
+ {
+ u32_x_inc[YPlane] = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
+ }
+ else
+ {
+ u32_x_inc[YPlane] = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height for Y Plane.*/
+ if (u32_height_out >= u32_height_in[YPlane])
+ {
+ u32_y_inc[YPlane] = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
+ }
+ else
+ {
+ u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
+ }
+
+ /* Compute horizontal ratio between src and destination width for U and V Planes. */
+ if (u32_width2_RGB >= u32_width_in[UPlane])
+ {
+ u32_x_inc[UPlane] = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
+ }
+ else
+ {
+ u32_x_inc[UPlane] = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
+ }
+
+ /* Compute vertical ratio between src and destination height for U and V Planes. */
+
+ if (u32_height2_RGB >= u32_height_in[UPlane])
+ {
+ u32_y_inc[UPlane] = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
+ }
+ else
+ {
+ u32_y_inc[UPlane] = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
+ }
+
+ u32_y_inc[VPlane] = u32_y_inc[UPlane];
+ u32_x_inc[VPlane] = u32_x_inc[UPlane];
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits,and represents a value between 0 and 0.5
+ */
+ if (u32_y_inc[YPlane] > MAX_SHORT)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded on the 16 high bits,
+ and the fractionnal on the 15 low bits
+ */
+ u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
+ u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
+
+ if (!u32_y_accum_Y)
+ {
+ u32_y_accum_Y = MAX_SHORT;
+ u32_y_accum_U = MAX_SHORT;
+ }
+ u32_y_accum_Y >>= 1;
+ u32_y_accum_U >>= 1;
+ }
+ else
+ {
+ u32_y_accum_Y = 0;
+ u32_y_accum_U = 0;
+
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_x_inc[YPlane] > MAX_SHORT)
+ {
+ u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
+
+ if (!u32_x_accum_start)
+ {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ }
+ else
+ {
+ u32_x_accum_start = 0;
+ }
+
+ pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and then uses that
+ result in a linear interpolation donw each column. Each estimated pixel in the
+ output image is a weighted combination of its four neighbours according to the formula :
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+ with R(x) = / x+1 -1 =< x =< 0 \ 1-x 0 =< x =< 1 and a (resp. b) weighting coefficient
+ is the distance from the nearest neighbor in the p (resp. q) direction
+ */
+ for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
+ {
+ u32_x_accum_Y = u32_x_accum_start;
+ u32_x_accum_U = u32_x_accum_start;
+
+ /* Vertical weight factor */
+ u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
+ u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
+
+ /* RGB current line position pointer */
+ pu32_rgb_data_current = pu32_rgb_data_start ;
+
+ /* RGB next line position pointer */
+ pu32_rgb_data_next = pu32_rgb_data_current + (u32_stride2_out);
+
+ /* Y Plane next row pointer */
+ pu8_data_in1[YPlane] = pu8_data_in[YPlane];
+
+ u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
+ if (u32_rgb_temp3 >> 16)
+ {
+ pu8_data_in1[YPlane] = pu8_data_in[YPlane] +
+ (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
+ u32_rgb_temp3 &= 0xffff;
+ }
+ u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
+
+ for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
+ {
+
+ /* Input Y plane elements */
+ pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Input U Plane elements */
+ pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
+ pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
+
+ pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
+ pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
+
+ /* Horizontal weight factor for Y plane */
+ u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+ /* Horizontal weight factor for U and V planes */
+ u32_x_frac_U = (u32_x_accum_U >> 12)&15;
+
+ /* Weighted combination */
+ U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
+ *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
+ + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+ V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
+ *(16-u32_y_frac_U)+ (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
+ + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+ u32_x_accum_U += (u32_x_inc[UPlane]);
+
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ u32_rgb_temp1 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ u32_rgb_temp1 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
+
+ u32_x_accum_Y += u32_x_inc[YPlane];
+
+ /* Horizontal weight factor */
+ u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ u32_rgb_temp2 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ u32_rgb_temp2 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+ PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+ PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
+
+ u32_x_accum_Y += u32_x_inc[YPlane];
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32=Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32=Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+ PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+ PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+ } /* End of horizontal scanning */
+
+ u32_y_accum_Y = u32_rgb_temp3 + (u32_y_inc[YPlane]);
+ u32_y_accum_U += (u32_y_inc[UPlane]);
+
+ /* Y plane row update */
+ if (u32_y_accum_Y >> 16)
+ {
+ pu8_data_in[YPlane] = pu8_data_in1[YPlane] +
+ ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
+ u32_y_accum_Y &= 0xffff;
+ }
+ else
+ {
+ pu8_data_in[YPlane] = pu8_data_in1[YPlane];
+ }
+ /* U and V planes row update */
+ if (u32_y_accum_U >> 16)
+ {
+ pu8_data_in[UPlane] = pu8_data_in[UPlane] +
+ (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
+ pu8_data_in[VPlane] = pu8_data_in[VPlane] +
+ (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
+ u32_y_accum_U &= 0xffff;
+ }
+ /* BGR pointer Update */
+ pu32_rgb_data_start += u32_stride_out;
+
+ } /* End of vertical scanning */
+ return M4VIFI_OK;
+}
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
new file mode 100755
index 0000000..eda9d07
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file M4VIFI_ResizeYUV420toRGB565RotatedRight.c
+ * @brief Contain video library function
+ * @note This file has a Combo filter function
+ * -# Resizes YUV420 and converts to RGR565 with rotation
+ * @date
+ * - 2004/08/11: Creation
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include "M4VIFI_Clip.h"
+
+/**
+ ********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(void *pContext,
+ * M4VIFI_ImagePlane *pPlaneIn,
+ * M4VIFI_ImagePlane *pPlaneOut)
+ * @brief Resize YUV420 plane and converts to RGB565 with +90 rotation.
+ * @note Basic sturture of the function
+ * Loop on each row (step 2)
+ * Loop on each column (step 2)
+ * Get four Y samples and 1 u & V sample
+ * Resize the Y with corresponing U and V samples
+ * Compute the four corresponding R G B values
+ * Place the R G B in the ouput plane in rotated fashion
+ * end loop column
+ * end loop row
+ * For resizing bilinear interpolation linearly interpolates along
+ * each row, and then uses that result in a linear interpolation down each column.
+ * Each estimated pixel in the output image is a weighted
+ * combination of its four neighbours. The ratio of compression
+ * or dilatation is estimated using input and output sizes.
+ * @param pPlaneIn: (IN) Pointer to YUV plane buffer
+ * @param pContext: (IN) Context Pointer
+ * @param pPlaneOut: (OUT) Pointer to BGR565 Plane
+ * @return M4VIFI_OK: there is no error
+ * @return M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return M4VIFI_ILLEGAL_FRAME_WIDTH: YUV Plane width is ODD
+ ********************************************************************************************
+*/
+M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void* pContext,
+ M4VIFI_ImagePlane *pPlaneIn,
+ M4VIFI_ImagePlane *pPlaneOut)
+{
+ M4VIFI_UInt8 *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
+ M4VIFI_UInt32 *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
+
+ M4VIFI_UInt32 u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
+ M4VIFI_UInt32 u32_stride_in[PLANES];
+ M4VIFI_UInt32 u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
+ M4VIFI_UInt32 u32_x_inc[PLANES], u32_y_inc[PLANES];
+ M4VIFI_UInt32 u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
+ M4VIFI_UInt32 u32_y_accum_Y, u32_y_accum_U;
+ M4VIFI_UInt32 u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
+ M4VIFI_Int32 U_32, V_32, Y_32, Yval_32;
+ M4VIFI_UInt8 u8_Red, u8_Green, u8_Blue;
+ M4VIFI_UInt32 u32_row, u32_col;
+
+ M4VIFI_UInt32 u32_plane;
+ M4VIFI_UInt32 u32_rgb_temp1, u32_rgb_temp2;
+ M4VIFI_UInt32 u32_rgb_temp3,u32_rgb_temp4;
+ M4VIFI_UInt32 u32_check_size;
+
+ M4VIFI_UInt8 *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
+ M4VIFI_UInt8 *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
+
+ /* Check for the width and height are even */
+ u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
+ if( u32_check_size == FALSE )
+ {
+ return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+ }
+ u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
+ if (u32_check_size == FALSE )
+ {
+ return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+ }
+ /* Make the ouput width and height as even */
+ pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
+ pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
+ pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
+
+ /* Assignment of output pointer */
+ pu8_data_out = pPlaneOut->pac_data + pPlaneOut->u_topleft;
+ /* Assignment of output width(rotated) */
+ u32_width_out = pPlaneOut->u_width;
+ /* Assignment of output height(rotated) */
+ u32_height_out = pPlaneOut->u_height;
+
+ /* Set the bounds of the active image */
+ u32_width2_RGB = pPlaneOut->u_width >> 1;
+ u32_height2_RGB = pPlaneOut->u_height >> 1;
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_out = pPlaneOut->u_stride >> 1;
+ u32_stride2_out = pPlaneOut->u_stride >> 2;
+
+ for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
+ {
+ /* Set the working pointers at the beginning of the input/output data field */
+ pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
+
+ /* Get the memory jump corresponding to a row jump */
+ u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
+
+ /* Set the bounds of the active image */
+ u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
+ u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
+ }
+ /* Compute horizontal ratio between src and destination width for Y Plane.*/
+ if (u32_width_out >= u32_width_in[YPlane])
+ {
+ u32_x_inc[YPlane] = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
+ }
+ else
+ {
+ u32_x_inc[YPlane] = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
+ }
+
+ /* Compute vertical ratio between src and destination height for Y Plane.*/
+ if (u32_height_out >= u32_height_in[YPlane])
+ {
+ u32_y_inc[YPlane] = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
+ }
+ else
+ {
+ u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
+ }
+
+ /* Compute horizontal ratio between src and destination width for U and V Planes.*/
+ if (u32_width2_RGB >= u32_width_in[UPlane])
+ {
+ u32_x_inc[UPlane] = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
+ }
+ else
+ {
+ u32_x_inc[UPlane] = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
+ }
+
+ /* Compute vertical ratio between src and destination height for U and V Planes.*/
+
+ if (u32_height2_RGB >= u32_height_in[UPlane])
+ {
+ u32_y_inc[UPlane] = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
+ }
+ else
+ {
+ u32_y_inc[UPlane] = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
+ }
+
+ u32_y_inc[VPlane] = u32_y_inc[UPlane];
+ u32_x_inc[VPlane] = u32_x_inc[UPlane];
+
+ /*
+ Calculate initial accumulator value : u32_y_accum_start.
+ u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_y_inc[YPlane] > MAX_SHORT)
+ {
+ /*
+ Keep the fractionnal part, assimung that integer part is coded on the 16 high bits,
+ and the fractionnal on the 15 low bits
+ */
+ u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
+ u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
+
+ if (!u32_y_accum_Y)
+ {
+ u32_y_accum_Y = MAX_SHORT;
+ u32_y_accum_U = MAX_SHORT;
+ }
+ u32_y_accum_Y >>= 1;
+ u32_y_accum_U >>= 1;
+ }
+ else
+ {
+ u32_y_accum_Y = 0;
+ u32_y_accum_U = 0;
+
+ }
+
+ /*
+ Calculate initial accumulator value : u32_x_accum_start.
+ u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+ */
+ if (u32_x_inc[YPlane] > MAX_SHORT)
+ {
+ u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
+
+ if (!u32_x_accum_start)
+ {
+ u32_x_accum_start = MAX_SHORT;
+ }
+
+ u32_x_accum_start >>= 1;
+ }
+ else
+ {
+ u32_x_accum_start = 0;
+ }
+ /* Intialise the RGB pointer */
+ pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
+
+ /*
+ Bilinear interpolation linearly interpolates along each row, and then uses that
+ result in a linear interpolation donw each column. Each estimated pixel in the
+ output image is a weighted combination of its four neighbours according to the formula :
+ F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+ with R(x) = / x+1 -1 =< x =< 0 \ 1-x 0 =< x =< 1 and a (resp. b) weighting coefficient
+ is the distance from the nearest neighbor in the p (resp. q) direction
+ */
+ for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
+ {
+ u32_x_accum_Y = u32_x_accum_start;
+ u32_x_accum_U = u32_x_accum_start;
+
+ /* Vertical weight factor */
+ u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
+ u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
+
+ /* RGB current line Position Pointer */
+ pu32_rgb_data_current = pu32_rgb_data_start ;
+
+ /* RGB next line position pointer */
+ pu32_rgb_data_next = pu32_rgb_data_current + (u32_stride2_out);
+
+ /* Y Plane next row pointer */
+ pu8_data_in1[YPlane] = pu8_data_in[YPlane];
+
+ u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
+ if (u32_rgb_temp3 >> 16)
+ {
+ pu8_data_in1[YPlane] = pu8_data_in[YPlane] +
+ (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
+ u32_rgb_temp3 &= 0xffff;
+ }
+ u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
+
+ for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
+ {
+
+ /* Input Y plane elements */
+ pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Input U Plane elements */
+ pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
+ pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
+
+ pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
+ pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
+
+ /* Horizontal weight factor for Y Plane */
+ u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+ /* Horizontal weight factor for U and V Planes */
+ u32_x_frac_U = (u32_x_accum_U >> 12)&15;
+
+ /* Weighted combination */
+ U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
+ *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
+ + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+ V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
+ *(16-u32_y_frac_U) + (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
+ + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+ u32_x_accum_U += (u32_x_inc[UPlane]);
+
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ u32_rgb_temp1 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ u32_rgb_temp1 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
+
+ u32_x_accum_Y += u32_x_inc[YPlane];
+ /* Horizontal weight factor */
+ u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ u32_rgb_temp2 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ u32_rgb_temp2 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32 = Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32 = Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+ PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+ PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+
+ pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
+ pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+ /* Weighted combination */
+ Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+ *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+ + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
+
+ u32_x_accum_Y += u32_x_inc[YPlane];
+ /* YUV to RGB */
+ #ifdef __RGB_V1__
+ Yval_32=Y_32*37;
+ #else /* __RGB_V1__v */
+ Yval_32=Y_32*0x2568;
+ #endif /* __RGB_V1__v */
+
+ DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+ /* Pack 8 bit R,G,B to RGB565 */
+ #ifdef LITTLE_ENDIAN
+ *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+ PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+ #else /* LITTLE_ENDIAN */
+ *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+ PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+ #endif /* LITTLE_ENDIAN */
+
+ } /* End of horizontal scanning */
+
+ u32_y_accum_Y = u32_rgb_temp3 + (u32_y_inc[YPlane]);
+ u32_y_accum_U += (u32_y_inc[UPlane]);
+
+ /* Y plane row update */
+ if (u32_y_accum_Y >> 16)
+ {
+ pu8_data_in[YPlane] = pu8_data_in1[YPlane] +
+ ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
+ u32_y_accum_Y &= 0xffff;
+ }
+ else
+ {
+ pu8_data_in[YPlane] = pu8_data_in1[YPlane];
+ }
+ /* U and V planes row update */
+ if (u32_y_accum_U >> 16)
+ {
+ pu8_data_in[UPlane] = pu8_data_in[UPlane] +
+ (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
+ pu8_data_in[VPlane] = pu8_data_in[VPlane] +
+ (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
+ u32_y_accum_U &= 0xffff;
+ }
+
+ pu32_rgb_data_start += u32_stride_out;
+
+ } /* End of vertical scanning */
+ return M4VIFI_OK;
+}
+