From 3e1e78ccd5936111f31a709592f11aa7c677c42d Mon Sep 17 00:00:00 2001 From: James Dong Date: Mon, 4 Jun 2012 19:07:28 -0700 Subject: Fix green or corrupted video frames in the exported movies At least two use cases are affected: trimming and transition clip generation. If the starting time of the transition or the trimming position is not the same as that of any existing video frame, the video frame right before the transition starting position or the trimming position must be encoded as the the first video frame for the transtion or the trimmed video, and it is encoded as a key frame. This frame was not saved without the patch. In the transcoding phrase, since we could not find such a frame, the existing code did not return an error, and incorrectly used some other frame to serve as the reference key frame for the remaining video frames of the transition or the trimmed video. As a result, we may see corrupted or green video frames in the exported movies. The fix is to always save the frame right before the trimming position or the transition starting time, so that a key frame will always be encoded, and the right reference frame is used for remaining video frames. In addition, checks are added to prevent from future mistakes to use incorrect reference frames. Patch was originally contributed by teng.hong@nxp.com Change-Id: I182784d4c1786224445224634edab3338bf2278f related-to-bug: 5713461 --- libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h | 3 + libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c | 85 +++++++++++++++------- .../inc/VideoEditorVideoDecoder_internal.h | 3 + .../src/VideoEditorVideoDecoder.cpp | 34 ++++++--- 4 files changed, 89 insertions(+), 36 deletions(-) diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h index 4bf2e84..7c500da 100755 --- a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h +++ b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h @@ -273,6 +273,9 @@ /** * Output file must be 3GPP or MP3 */ #define M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0117) +/** + * Can not find a valid video frame */ +#define M4VSS3GPP_ERR_NO_VALID_VID_FRAME M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0118) #endif /* __M4VSS3GPP_ErrorCodes_H__ */ diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c index 59d57e5..f19f412 100755 --- a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c +++ b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c @@ -20,6 +20,8 @@ * @note ****************************************************************************** */ +#undef M4OSA_TRACE_LEVEL +#define M4OSA_TRACE_LEVEL 1 /****************/ /*** Includes ***/ @@ -491,7 +493,6 @@ M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC ) } #endif //M4VSS_SUPPORT_OMX_CODECS - } else if( M4NO_ERROR != err ) /**< ...or an encoder error */ { @@ -655,7 +656,6 @@ M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC ) } #endif //M4VSS_SUPPORT_OMX_CODECS - } else if( M4NO_ERROR != err ) /**< ...or an encoder error */ { @@ -1198,7 +1198,7 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, if ((pC->pC1->isRenderDup == M4OSA_TRUE) || (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) { pTmp = pC->yuv1; - if (pC->pC1->lastDecodedPlane != M4NO_ERROR) { + if (pC->pC1->lastDecodedPlane != M4OSA_NULL) { /* Copy last decoded plane to output plane */ memcpy((void *)pTmp[0].pac_data, (void *)pC->pC1->lastDecodedPlane[0].pac_data, @@ -1209,6 +1209,12 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, memcpy((void *)pTmp[2].pac_data, (void *)pC->pC1->lastDecodedPlane[2].pac_data, (pTmp[2].u_height * pTmp[2].u_width)); + } else { + err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME; + M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)", + err, __FILE__, __LINE__); + pC->ewc.VppError = err; + return M4NO_ERROR; } pC->pC1->lastDecodedPlane = pTmp; } @@ -1238,7 +1244,7 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, if ((pC->pC2->isRenderDup == M4OSA_TRUE) || (M4WAR_VIDEORENDERER_NO_NEW_FRAME == err)) { pTmp = pC->yuv2; - if (pC->pC2->lastDecodedPlane != M4NO_ERROR) { + if (pC->pC2->lastDecodedPlane != M4OSA_NULL) { /* Copy last decoded plane to output plane */ memcpy((void *)pTmp[0].pac_data, (void *)pC->pC2->lastDecodedPlane[0].pac_data, @@ -1249,6 +1255,12 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, memcpy((void *)pTmp[2].pac_data, (void *)pC->pC2->lastDecodedPlane[2].pac_data, (pTmp[2].u_height * pTmp[2].u_width)); + } else { + err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME; + M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)", + err, __FILE__, __LINE__); + pC->ewc.VppError = err; + return M4NO_ERROR; } pC->pC2->lastDecodedPlane = pTmp; } @@ -1505,17 +1517,29 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, if (M4OSA_NULL != pC->pC1->m_pPreResizeFrame) { /** * Copy last decoded plane to output plane */ - memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data, - (void *)pC->pC1->lastDecodedPlane[0].pac_data, - (pC->pC1->m_pPreResizeFrame[0].u_height * pC->pC1->m_pPreResizeFrame[0].u_width)); - - memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data, - (void *)pC->pC1->lastDecodedPlane[1].pac_data, - (pC->pC1->m_pPreResizeFrame[1].u_height * pC->pC1->m_pPreResizeFrame[1].u_width)); - - memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data, - (void *)pC->pC1->lastDecodedPlane[2].pac_data, - (pC->pC1->m_pPreResizeFrame[2].u_height * pC->pC1->m_pPreResizeFrame[2].u_width)); + if (pC->pC1->lastDecodedPlane != M4OSA_NULL) { + + memcpy((void *)pC->pC1->m_pPreResizeFrame[0].pac_data, + (void *)pC->pC1->lastDecodedPlane[0].pac_data, + (pC->pC1->m_pPreResizeFrame[0].u_height * \ + pC->pC1->m_pPreResizeFrame[0].u_width)); + + memcpy((void *)pC->pC1->m_pPreResizeFrame[1].pac_data, + (void *)pC->pC1->lastDecodedPlane[1].pac_data, + (pC->pC1->m_pPreResizeFrame[1].u_height * \ + pC->pC1->m_pPreResizeFrame[1].u_width)); + + memcpy((void *)pC->pC1->m_pPreResizeFrame[2].pac_data, + (void *)pC->pC1->lastDecodedPlane[2].pac_data, + (pC->pC1->m_pPreResizeFrame[2].u_height * \ + pC->pC1->m_pPreResizeFrame[2].u_width)); + } else { + err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME; + M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)", + err, __FILE__, __LINE__); + pC->ewc.VppError = err; + return M4NO_ERROR; + } if(pC->nbActiveEffects > 0) { /** @@ -1587,17 +1611,26 @@ M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn, } /** * Copy last decoded plane to output plane */ - memcpy((void *)pLastDecodedFrame[0].pac_data, - (void *)pC->pC1->lastDecodedPlane[0].pac_data, - (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width)); - - memcpy((void *)pLastDecodedFrame[1].pac_data, - (void *)pC->pC1->lastDecodedPlane[1].pac_data, - (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width)); - - memcpy((void *)pLastDecodedFrame[2].pac_data, - (void *)pC->pC1->lastDecodedPlane[2].pac_data, - (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width)); + if (pC->pC1->lastDecodedPlane != M4OSA_NULL && + pLastDecodedFrame != M4OSA_NULL) { + memcpy((void *)pLastDecodedFrame[0].pac_data, + (void *)pC->pC1->lastDecodedPlane[0].pac_data, + (pLastDecodedFrame[0].u_height * pLastDecodedFrame[0].u_width)); + + memcpy((void *)pLastDecodedFrame[1].pac_data, + (void *)pC->pC1->lastDecodedPlane[1].pac_data, + (pLastDecodedFrame[1].u_height * pLastDecodedFrame[1].u_width)); + + memcpy((void *)pLastDecodedFrame[2].pac_data, + (void *)pC->pC1->lastDecodedPlane[2].pac_data, + (pLastDecodedFrame[2].u_height * pLastDecodedFrame[2].u_width)); + } else { + err = M4VSS3GPP_ERR_NO_VALID_VID_FRAME; + M4OSA_TRACE1_3("Can not find an input frame. Set error 0x%x in %s (%d)", + err, __FILE__, __LINE__); + pC->ewc.VppError = err; + return M4NO_ERROR; + } pTmp = pPlaneOut; /** diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h index cca5ee9..6762643 100755 --- a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h +++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h @@ -115,6 +115,9 @@ typedef struct { ARect mCropRect; // These are obtained from kKeyCropRect. I420ColorConverter* mI420ColorConverter; + // Time interval between two consequtive/neighboring video frames. + M4_MediaTime mFrameIntervalMs; + } VideoEditorVideoDecoder_Context; } //namespace android diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp index aa26252..21d3c30 100755 --- a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp +++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp @@ -982,6 +982,11 @@ M4OSA_ERR VideoEditorVideoDecoder_create(M4OSA_Context *pContext, pDecShellContext->mLastOutputCts = -1; pDecShellContext->m_pDecBufferPool = M4OSA_NULL; + // Calculate the interval between two video frames. + CHECK(pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate > 0); + pDecShellContext->mFrameIntervalMs = + 1000.0 / pDecShellContext->m_pVideoStreamhandler->m_averageFrameRate; + /** * StageFright graph building */ @@ -1423,8 +1428,25 @@ M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context, ALOGV("VideoEditorVideoDecoder_decode,decoded frametime = %lf,size = %d", (M4_MediaTime)lFrameTime, pDecoderBuffer->size() ); - // If bJump is false, we need to save every decoded buffer - if (!bJump) { + /* + * We need to save a buffer if bJump == false to a queue. These + * buffers have a timestamp >= the target time, *pTime (for instance, + * the transition between two videos, or a trimming postion inside + * one video), since they are part of the transition clip or the + * trimmed video. + * + * If *pTime does not have the same value as any of the existing + * video frames, we would like to get the buffer right before *pTime + * and in the transcoding phrase, this video frame will be encoded + * as a key frame and becomes the first video frame for the transition or the + * trimmed video to be generated. This buffer must also be queued. + * + */ + int64_t targetTimeMs = + pDecShellContext->m_lastDecodedCTS + + pDecShellContext->mFrameIntervalMs + + tolerance; + if (!bJump || targetTimeMs > *pTime) { lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer); if (lerr != M4NO_ERROR) { goto VIDEOEDITOR_VideoDecode_cleanUP; @@ -1432,14 +1454,6 @@ M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context, } } - // If bJump is true, we only need to copy the last buffer - if (bJump) { - lerr = copyBufferToQueue(pDecShellContext, pDecoderBuffer); - if (lerr != M4NO_ERROR) { - goto VIDEOEDITOR_VideoDecode_cleanUP; - } - } - pDecShellContext->mNbOutputFrames++; if ( 0 > pDecShellContext->mFirstOutputCts ) { pDecShellContext->mFirstOutputCts = *pTime; -- cgit v1.1