summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xlibvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h3
-rwxr-xr-xlibvideoeditor/vss/src/M4VSS3GPP_EditVideo.c85
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h3
-rwxr-xr-xlibvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp34
4 files changed, 89 insertions, 36 deletions
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
index 4bf2e84..7c500da 100755
--- a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
@@ -273,6 +273,9 @@
/**
* Output file must be 3GPP or MP3 */
#define M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0117)
+/**
+ * Can not find a valid video frame */
+#define M4VSS3GPP_ERR_NO_VALID_VID_FRAME M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0118)
#endif /* __M4VSS3GPP_ErrorCodes_H__ */
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
index 59d57e5..f19f412 100755
--- a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
@@ -20,6 +20,8 @@
* @note
******************************************************************************
*/
+#undef M4OSA_TRACE_LEVEL
+#define M4OSA_TRACE_LEVEL 1
/****************/
/*** Includes ***/
@@ -491,7 +493,6 @@ M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
}
#endif //M4VSS_SUPPORT_OMX_CODECS
-
}
else if( M4NO_ERROR != err ) /**< ...or an encoder error */
{
@@ -655,7 +656,6 @@ M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
}
#endif //M4VSS_SUPPORT_OMX_CODECS
-
}
else if( M4NO_ERROR != err ) /**< ...or an encoder error */
{
@@ -1198,7 +1198,7 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
if ((pC->pC1->isRenderDup == M4OSA_TRUE) ||
(M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
pTmp = pC->yuv1;
- if (pC->pC1->lastDecodedPlane != M4NO_ERROR) {
+ if (pC->pC1->lastDecodedPlane != M4OSA_NULL) {
/* Copy last decoded plane to output plane */
memcpy((void *)pTmp[0].pac_data,
(void *)pC->pC1->lastDecodedPlane[0].pac_data,
@@ -1209,6 +1209,12 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
memcpy((void *)pTmp[2].pac_data,
(void *)pC->pC1->lastDecodedPlane[2].pac_data,
(pTmp[2].u_height * pTmp[2].u_width));
+ } else {
+ err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
+ M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
+ err, __FILE__, __LINE__);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
}
pC->pC1->lastDecodedPlane = pTmp;
}
@@ -1238,7 +1244,7 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
if ((pC->pC2->isRenderDup == M4OSA_TRUE) ||
(M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) {
pTmp = pC->yuv2;
- if (pC->pC2->lastDecodedPlane != M4NO_ERROR) {
+ if (pC->pC2->lastDecodedPlane != M4OSA_NULL) {
/* Copy last decoded plane to output plane */
memcpy((void *)pTmp[0].pac_data,
(void *)pC->pC2->lastDecodedPlane[0].pac_data,
@@ -1249,6 +1255,12 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
memcpy((void *)pTmp[2].pac_data,
(void *)pC->pC2->lastDecodedPlane[2].pac_data,
(pTmp[2].u_height * pTmp[2].u_width));
+ } else {
+ err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
+ M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
+ err, __FILE__, __LINE__);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
}
pC->pC2->lastDecodedPlane = pTmp;
}
@@ -1505,17 +1517,29 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) {
/**
* Copy last decoded plane to output plane */
- memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data,
- (void *)pC->pC1->lastDecodedPlane[0].pac_data,
- (pC->pC1->m_pPreResizeFrame[0].u_height * pC->pC1->m_pPreResizeFrame[0].u_width));
-
- memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data,
- (void *)pC->pC1->lastDecodedPlane[1].pac_data,
- (pC->pC1->m_pPreResizeFrame[1].u_height * pC->pC1->m_pPreResizeFrame[1].u_width));
-
- memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data,
- (void *)pC->pC1->lastDecodedPlane[2].pac_data,
- (pC->pC1->m_pPreResizeFrame[2].u_height * pC->pC1->m_pPreResizeFrame[2].u_width));
+ if (pC->pC1->lastDecodedPlane != M4OSA_NULL) {
+
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[0].pac_data,
+ (pC->pC1->m_pPreResizeFrame[0].u_height * \
+ pC->pC1->m_pPreResizeFrame[0].u_width));
+
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[1].pac_data,
+ (pC->pC1->m_pPreResizeFrame[1].u_height * \
+ pC->pC1->m_pPreResizeFrame[1].u_width));
+
+ memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[2].pac_data,
+ (pC->pC1->m_pPreResizeFrame[2].u_height * \
+ pC->pC1->m_pPreResizeFrame[2].u_width));
+ } else {
+ err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
+ M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
+ err, __FILE__, __LINE__);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
if(pC->nbActiveEffects > 0) {
/**
@@ -1587,17 +1611,26 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
}
/**
* Copy last decoded plane to output plane */
- memcpy((void *)pLastDecodedFrame[0].pac_data,
- (void *)pC->pC1->lastDecodedPlane[0].pac_data,
- (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width));
-
- memcpy((void *)pLastDecodedFrame[1].pac_data,
- (void *)pC->pC1->lastDecodedPlane[1].pac_data,
- (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width));
-
- memcpy((void *)pLastDecodedFrame[2].pac_data,
- (void *)pC->pC1->lastDecodedPlane[2].pac_data,
- (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width));
+ if (pC->pC1->lastDecodedPlane != M4OSA_NULL &&
+ pLastDecodedFrame != M4OSA_NULL) {
+ memcpy((void *)pLastDecodedFrame[0].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[0].pac_data,
+ (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width));
+
+ memcpy((void *)pLastDecodedFrame[1].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[1].pac_data,
+ (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width));
+
+ memcpy((void *)pLastDecodedFrame[2].pac_data,
+ (void *)pC->pC1->lastDecodedPlane[2].pac_data,
+ (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width));
+ } else {
+ err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME;
+ M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)",
+ err, __FILE__, __LINE__);
+ pC->ewc.VppError = err;
+ return M4NO_ERROR;
+ }
pTmp = pPlaneOut;
/**
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
index cca5ee9..6762643 100755
--- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
@@ -115,6 +115,9 @@ typedef struct {
ARect mCropRect; // These are obtained from kKeyCropRect.
I420ColorConverter* mI420ColorConverter;
+ // Time interval between two consequtive/neighboring video frames.
+ M4_MediaTime mFrameIntervalMs;
+
} VideoEditorVideoDecoder_Context;
} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
index aa26252..21d3c30 100755
--- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
@@ -982,6 +982,11 @@ M4OSA_ERR VideoEditorVideoDecoder_create(M4OSA_Context *pContext,
pDecShellContext->mLastOutputCts = -1;
pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+ // Calculate the interval between two video frames.
+ CHECK(pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate > 0);
+ pDecShellContext->mFrameIntervalMs =
+ 1000.0 / pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate;
+
/**
* StageFright graph building
*/
@@ -1423,8 +1428,25 @@ M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context,
ALOGV("VideoEditorVideoDecoder_decode,decoded frametime = %lf,size = %d",
(M4_MediaTime)lFrameTime, pDecoderBuffer->size() );
- // If bJump is false, we need to save every decoded buffer
- if (!bJump) {
+ /*
+ * We need to save a buffer if bJump == false to a queue. These
+ * buffers have a timestamp >= the target time, *pTime (for instance,
+ * the transition between two videos, or a trimming postion inside
+ * one video), since they are part of the transition clip or the
+ * trimmed video.
+ *
+ * If *pTime does not have the same value as any of the existing
+ * video frames, we would like to get the buffer right before *pTime
+ * and in the transcoding phrase, this video frame will be encoded
+ * as a key frame and becomes the first video frame for the transition or the
+ * trimmed video to be generated. This buffer must also be queued.
+ *
+ */
+ int64_t targetTimeMs =
+ pDecShellContext->m_lastDecodedCTS +
+ pDecShellContext->mFrameIntervalMs +
+ tolerance;
+ if (!bJump || targetTimeMs > *pTime) {
lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer);
if (lerr != M4NO_ERROR) {
goto VIDEOEDITOR_VideoDecode_cleanUP;
@@ -1432,14 +1454,6 @@ M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context,
}
}
- // If bJump is true, we only need to copy the last buffer
- if (bJump) {
- lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer);
- if (lerr != M4NO_ERROR) {
- goto VIDEOEDITOR_VideoDecode_cleanUP;
- }
- }
-
pDecShellContext->mNbOutputFrames++;
if ( 0 > pDecShellContext->mFirstOutputCts ) {
pDecShellContext->mFirstOutputCts = *pTime;