diff options
-rw-r--r-- | include/media/stagefright/MetaData.h | 2 | ||||
-rw-r--r-- | include/media/stagefright/OMXCodec.h | 3 | ||||
-rw-r--r-- | media/libstagefright/MPEG4Extractor.cpp | 50 | ||||
-rw-r--r-- | media/libstagefright/OMXCodec.cpp | 74 | ||||
-rw-r--r-- | media/libstagefright/Utils.cpp | 50 | ||||
-rw-r--r-- | media/libstagefright/codecs/hevcdec/Android.mk | 22 | ||||
-rw-r--r-- | media/libstagefright/codecs/hevcdec/SoftHEVC.cpp | 710 | ||||
-rw-r--r-- | media/libstagefright/codecs/hevcdec/SoftHEVC.h | 117 | ||||
-rw-r--r-- | media/libstagefright/data/media_codecs_google_video.xml | 1 | ||||
-rw-r--r-- | media/libstagefright/omx/SoftOMXPlugin.cpp | 1 | ||||
-rw-r--r-- | media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp | 6 |
11 files changed, 1028 insertions, 8 deletions
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h index e862ec3..d38d976 100644 --- a/include/media/stagefright/MetaData.h +++ b/include/media/stagefright/MetaData.h @@ -53,6 +53,7 @@ enum { kKeyESDS = 'esds', // raw data kKeyAACProfile = 'aacp', // int32_t kKeyAVCC = 'avcc', // raw data + kKeyHVCC = 'hvcc', // raw data kKeyD263 = 'd263', // raw data kKeyVorbisInfo = 'vinf', // raw data kKeyVorbisBooks = 'vboo', // raw data @@ -170,6 +171,7 @@ enum { enum { kTypeESDS = 'esds', kTypeAVCC = 'avcc', + kTypeHVCC = 'hvcc', kTypeD263 = 'd263', }; diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h index 5121c17..5590b60 100644 --- a/include/media/stagefright/OMXCodec.h +++ b/include/media/stagefright/OMXCodec.h @@ -352,6 +352,9 @@ private: int64_t getDecodingTimeUs(); + status_t parseHEVCCodecSpecificData( + const void *data, size_t size, + unsigned *profile, unsigned *level); status_t parseAVCCodecSpecificData( const void *data, size_t size, unsigned *profile, unsigned *level); diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp index e07b6aa..76546f3 100644 --- a/media/libstagefright/MPEG4Extractor.cpp +++ b/media/libstagefright/MPEG4Extractor.cpp @@ -95,6 +95,7 @@ private: uint64_t* mCurrentSampleInfoOffsets; bool mIsAVC; + bool mIsHEVC; size_t mNALLengthSize; bool mStarted; @@ -317,6 +318,9 @@ static const char *FourCC2MIME(uint32_t fourcc) { case FOURCC('a', 'v', 'c', '1'): return MEDIA_MIMETYPE_VIDEO_AVC; + case FOURCC('h', 'v', 'c', '1'): + case FOURCC('h', 'e', 'v', '1'): + return MEDIA_MIMETYPE_VIDEO_HEVC; default: CHECK(!"should not be here."); return NULL; @@ -1288,6 +1292,8 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { case FOURCC('H', '2', '6', '3'): case FOURCC('h', '2', '6', '3'): case FOURCC('a', 'v', 'c', '1'): + case FOURCC('h', 'v', 'c', '1'): + case FOURCC('h', 'e', 'v', '1'): { mHasVideo = true; @@ -1580,6 +1586,21 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { break; } + case FOURCC('h', 'v', 'c', 'C'): + { + sp<ABuffer> buffer = new ABuffer(chunk_data_size); + + if (mDataSource->readAt( + data_offset, buffer->data(), chunk_data_size) < chunk_data_size) { + return ERROR_IO; + } + + mLastTrack->meta->setData( + kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size); + + *offset += chunk_size; + break; + } case FOURCC('d', '2', '6', '3'): { @@ -2452,6 +2473,11 @@ status_t MPEG4Extractor::verifyTrack(Track *track) { || type != kTypeAVCC) { return ERROR_MALFORMED; } + } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) { + if (!track->meta->findData(kKeyHVCC, &type, &data, &size) + || type != kTypeHVCC) { + return ERROR_MALFORMED; + } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) { if (!track->meta->findData(kKeyESDS, &type, &data, &size) @@ -2776,6 +2802,7 @@ MPEG4Source::MPEG4Source( mCurrentSampleInfoOffsetsAllocSize(0), mCurrentSampleInfoOffsets(NULL), mIsAVC(false), + mIsHEVC(false), mNALLengthSize(0), mStarted(false), mGroup(NULL), @@ -2800,6 +2827,7 @@ MPEG4Source::MPEG4Source( CHECK(success); mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC); + mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC); if (mIsAVC) { uint32_t type; @@ -2814,6 +2842,18 @@ MPEG4Source::MPEG4Source( // The number of bytes used to encode the length of a NAL unit. mNALLengthSize = 1 + (ptr[4] & 3); + } else if (mIsHEVC) { + uint32_t type; + const void *data; + size_t size; + CHECK(format->findData(kKeyHVCC, &type, &data, &size)); + + const uint8_t *ptr = (const uint8_t *)data; + + CHECK(size >= 7); + CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1 + + mNALLengthSize = 1 + (ptr[14 + 7] & 3); } CHECK(format->findInt32(kKeyTrackID, &mTrackId)); @@ -3562,7 +3602,7 @@ status_t MPEG4Source::read( } } - if (!mIsAVC || mWantsNALFragments) { + if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) { if (newBuffer) { ssize_t num_bytes_read = mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size); @@ -3594,7 +3634,7 @@ status_t MPEG4Source::read( ++mCurrentSampleIndex; } - if (!mIsAVC) { + if (!mIsAVC && !mIsHEVC) { *out = mBuffer; mBuffer = NULL; @@ -3837,7 +3877,7 @@ status_t MPEG4Source::fragmentedRead( bufmeta->setData(kKeyCryptoKey, 0, mCryptoKey, 16); } - if (!mIsAVC || mWantsNALFragments) { + if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) { if (newBuffer) { ssize_t num_bytes_read = mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size); @@ -3869,7 +3909,7 @@ status_t MPEG4Source::fragmentedRead( ++mCurrentSampleIndex; } - if (!mIsAVC) { + if (!mIsAVC && !mIsHEVC) { *out = mBuffer; mBuffer = NULL; @@ -4043,6 +4083,8 @@ static bool isCompatibleBrand(uint32_t fourcc) { FOURCC('i', 's', 'o', 'm'), FOURCC('i', 's', 'o', '2'), FOURCC('a', 'v', 'c', '1'), + FOURCC('h', 'v', 'c', '1'), + FOURCC('h', 'e', 'v', '1'), FOURCC('3', 'g', 'p', '4'), FOURCC('m', 'p', '4', '1'), FOURCC('m', 'p', '4', '2'), diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index c028dbf..354712c 100644 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -381,6 +381,57 @@ sp<MediaSource> OMXCodec::Create( return NULL; } +status_t OMXCodec::parseHEVCCodecSpecificData( + const void *data, size_t size, + unsigned *profile, unsigned *level) { + const uint8_t *ptr = (const uint8_t *)data; + + // verify minimum size and configurationVersion == 1. + if (size < 7 || ptr[0] != 1) { + return ERROR_MALFORMED; + } + + *profile = (ptr[1] & 31); + *level = ptr[12]; + + ptr += 22; + size -= 22; + + size_t numofArrays = (char)ptr[0]; + ptr += 1; + size -= 1; + size_t j = 0, i = 0; + for (i = 0; i < numofArrays; i++) { + ptr += 1; + size -= 1; + + // Num of nals + size_t numofNals = U16_AT(ptr); + ptr += 2; + size -= 2; + + for (j = 0;j < numofNals;j++) { + if (size < 2) { + return ERROR_MALFORMED; + } + + size_t length = U16_AT(ptr); + + ptr += 2; + size -= 2; + + if (size < length) { + return ERROR_MALFORMED; + } + addCodecSpecificData(ptr, length); + + ptr += length; + size -= length; + } + } + return OK; +} + status_t OMXCodec::parseAVCCodecSpecificData( const void *data, size_t size, unsigned *profile, unsigned *level) { @@ -493,6 +544,20 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) { CODEC_LOGI( "AVC profile = %u (%s), level = %u", profile, AVCProfileToString(profile), level); + } else if (meta->findData(kKeyHVCC, &type, &data, &size)) { + // Parse the HEVCDecoderConfigurationRecord + + unsigned profile, level; + status_t err; + if ((err = parseHEVCCodecSpecificData( + data, size, &profile, &level)) != OK) { + ALOGE("Malformed HEVC codec specific data."); + return err; + } + + CODEC_LOGI( + "HEVC profile = %u , level = %u", + profile, level); } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) { addCodecSpecificData(data, size); @@ -822,6 +887,8 @@ void OMXCodec::setVideoInputFormat( OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused; if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) { compressionFormat = OMX_VIDEO_CodingAVC; + } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) { + compressionFormat = OMX_VIDEO_CodingHEVC; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) { compressionFormat = OMX_VIDEO_CodingMPEG4; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) { @@ -1217,6 +1284,8 @@ status_t OMXCodec::setVideoOutputFormat( compressionFormat = OMX_VIDEO_CodingAVC; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) { compressionFormat = OMX_VIDEO_CodingMPEG4; + } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) { + compressionFormat = OMX_VIDEO_CodingHEVC; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) { compressionFormat = OMX_VIDEO_CodingH263; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP8, mime)) { @@ -1411,6 +1480,8 @@ void OMXCodec::setComponentRole( "audio_decoder.g711alaw", "audio_encoder.g711alaw" }, { MEDIA_MIMETYPE_VIDEO_AVC, "video_decoder.avc", "video_encoder.avc" }, + { MEDIA_MIMETYPE_VIDEO_HEVC, + "video_decoder.hevc", "video_encoder.hevc" }, { MEDIA_MIMETYPE_VIDEO_MPEG4, "video_decoder.mpeg4", "video_encoder.mpeg4" }, { MEDIA_MIMETYPE_VIDEO_H263, @@ -3009,7 +3080,8 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) { size_t size = specific->mSize; - if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mMIME) + if ((!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mMIME) || + !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mMIME)) && !(mQuirks & kWantsNALFragments)) { static const uint8_t kNALStartCode[4] = { 0x00, 0x00, 0x00, 0x01 }; diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp index 047fac7..7ff31a1 100644 --- a/media/libstagefright/Utils.cpp +++ b/media/libstagefright/Utils.cpp @@ -217,6 +217,56 @@ status_t convertMetaDataToMessage( buffer->meta()->setInt32("csd", true); buffer->meta()->setInt64("timeUs", 0); msg->setBuffer("csd-1", buffer); + } else if (meta->findData(kKeyHVCC, &type, &data, &size)) { + const uint8_t *ptr = (const uint8_t *)data; + + CHECK(size >= 7); + CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1 + uint8_t profile = ptr[1] & 31; + uint8_t level = ptr[12]; + ptr += 22; + size -= 22; + + + size_t numofArrays = (char)ptr[0]; + ptr += 1; + size -= 1; + size_t j = 0, i = 0; + + sp<ABuffer> buffer = new ABuffer(1024); + buffer->setRange(0, 0); + + for (i = 0; i < numofArrays; i++) { + ptr += 1; + size -= 1; + + //Num of nals + size_t numofNals = U16_AT(ptr); + + ptr += 2; + size -= 2; + + for (j = 0; j < numofNals; j++) { + CHECK(size >= 2); + size_t length = U16_AT(ptr); + + ptr += 2; + size -= 2; + + CHECK(size >= length); + + memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4); + memcpy(buffer->data() + buffer->size() + 4, ptr, length); + buffer->setRange(0, buffer->size() + 4 + length); + + ptr += length; + size -= length; + } + } + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + msg->setBuffer("csd-0", buffer); + } else if (meta->findData(kKeyESDS, &type, &data, &size)) { ESDS esds((const char *)data, size); CHECK_EQ(esds.InitCheck(), (status_t)OK); diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk new file mode 100644 index 0000000..2fe347b --- /dev/null +++ b/media/libstagefright/codecs/hevcdec/Android.mk @@ -0,0 +1,22 @@ +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_MODULE := libstagefright_soft_hevcdec +LOCAL_MODULE_TAGS := optional + +LOCAL_STATIC_LIBRARIES := libhevcdec +LOCAL_SRC_FILES := SoftHEVC.cpp + +LOCAL_C_INCLUDES := $(TOP)/external/libhevc/decoder +LOCAL_C_INCLUDES += $(TOP)/external/libhevc/common +LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include +LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax + +LOCAL_SHARED_LIBRARIES := libstagefright +LOCAL_SHARED_LIBRARIES += libstagefright_omx +LOCAL_SHARED_LIBRARIES += libstagefright_foundation +LOCAL_SHARED_LIBRARIES += libutils +LOCAL_SHARED_LIBRARIES += liblog + + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp new file mode 100644 index 0000000..0aae5ed --- /dev/null +++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp @@ -0,0 +1,710 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SoftHEVC" +#include <utils/Log.h> + +#include "ihevc_typedefs.h" +#include "iv.h" +#include "ivd.h" +#include "ithread.h" +#include "ihevcd_cxa.h" +#include "SoftHEVC.h" + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaDefs.h> +#include <OMX_VideoExt.h> + +namespace android { + +#define componentName "video_decoder.hevc" +#define codingType OMX_VIDEO_CodingHEVC +#define CODEC_MIME_TYPE MEDIA_MIMETYPE_VIDEO_HEVC + +/** Function and structure definitions to keep code similar for each codec */ +#define ivdec_api_function ihevcd_cxa_api_function +#define ivdext_init_ip_t ihevcd_cxa_init_ip_t +#define ivdext_init_op_t ihevcd_cxa_init_op_t +#define ivdext_fill_mem_rec_ip_t ihevcd_cxa_fill_mem_rec_ip_t +#define ivdext_fill_mem_rec_op_t ihevcd_cxa_fill_mem_rec_op_t +#define ivdext_ctl_set_num_cores_ip_t ihevcd_cxa_ctl_set_num_cores_ip_t +#define ivdext_ctl_set_num_cores_op_t ihevcd_cxa_ctl_set_num_cores_op_t + +#define IVDEXT_CMD_CTL_SET_NUM_CORES \ + (IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES + +static const CodecProfileLevel kProfileLevels[] = { + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel1 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel2 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel21 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel3 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel31 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel4 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel41 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel5 }, + { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 }, +}; + +SoftHEVC::SoftHEVC( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SoftVideoDecoderOMXComponent(name, componentName, codingType, + kProfileLevels, ARRAY_SIZE(kProfileLevels), + CODEC_MAX_WIDTH /* width */, CODEC_MAX_HEIGHT /* height */, callbacks, + appData, component) { + initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, + CODEC_MIME_TYPE); + + mOmxColorFormat = OMX_COLOR_FormatYUV420Planar; + mStride = mWidth; + + if (OMX_COLOR_FormatYUV420Planar == mOmxColorFormat) { + mIvColorFormat = IV_YUV_420P; + } else if (OMX_COLOR_FormatYUV420SemiPlanar == mOmxColorFormat) { + mIvColorFormat = IV_YUV_420SP_UV; + } + + mInitWidth = mWidth; + mInitHeight = mHeight; + + CHECK_EQ(initDecoder(), (status_t)OK); +} + +SoftHEVC::~SoftHEVC() { + ALOGD("In SoftHEVC::~SoftHEVC"); + CHECK_EQ(deInitDecoder(), (status_t)OK); +} + +static size_t GetCPUCoreCount() { + long cpuCoreCount = 1; +#if defined(_SC_NPROCESSORS_ONLN) + cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN); +#else + // _SC_NPROC_ONLN must be defined... + cpuCoreCount = sysconf(_SC_NPROC_ONLN); +#endif + CHECK(cpuCoreCount >= 1); + ALOGD("Number of CPU cores: %ld", cpuCoreCount); + return (size_t)cpuCoreCount; +} + +status_t SoftHEVC::getVersion() { + ivd_ctl_getversioninfo_ip_t s_ctl_ip; + ivd_ctl_getversioninfo_op_t s_ctl_op; + UWORD8 au1_buf[512]; + IV_API_CALL_STATUS_T status; + + s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL; + s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_GETVERSION; + s_ctl_ip.u4_size = sizeof(ivd_ctl_getversioninfo_ip_t); + s_ctl_op.u4_size = sizeof(ivd_ctl_getversioninfo_op_t); + s_ctl_ip.pv_version_buffer = au1_buf; + s_ctl_ip.u4_version_buffer_size = sizeof(au1_buf); + + status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, + (void *)&s_ctl_op); + + if (status != IV_SUCCESS) { + ALOGE("Error in getting version number: 0x%x", + s_ctl_op.u4_error_code); + } else { + ALOGD("Ittiam decoder version number: %s", + (char *)s_ctl_ip.pv_version_buffer); + } + return OK; +} + +status_t SoftHEVC::setParams(WORD32 stride, IVD_VIDEO_DECODE_MODE_T decMode) { + ivd_ctl_set_config_ip_t s_ctl_ip; + ivd_ctl_set_config_op_t s_ctl_op; + IV_API_CALL_STATUS_T status; + s_ctl_ip.u4_disp_wd = stride; + s_ctl_ip.e_frm_skip_mode = IVD_SKIP_NONE; + + s_ctl_ip.e_frm_out_mode = IVD_DISPLAY_FRAME_OUT; + s_ctl_ip.e_vid_dec_mode = decMode; + s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL; + s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_SETPARAMS; + s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t); + s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t); + + ALOGD("Set the run-time (dynamic) parameters"); + status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, + (void *)&s_ctl_op); + + if (status != IV_SUCCESS) { + ALOGE("Error in setting the run-time parameters: 0x%x", + s_ctl_op.u4_error_code); + + return UNKNOWN_ERROR; + } + return OK; +} + +status_t SoftHEVC::resetPlugin() { + mIsInFlush = false; + mReceivedEOS = false; + memset(mTimeStamps, 0, sizeof(mTimeStamps)); + memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid)); + + /* Initialize both start and end times */ + gettimeofday(&mTimeStart, NULL); + gettimeofday(&mTimeEnd, NULL); + + return OK; +} + +status_t SoftHEVC::resetDecoder() { + ivd_ctl_reset_ip_t s_ctl_ip; + ivd_ctl_reset_op_t s_ctl_op; + IV_API_CALL_STATUS_T status; + + s_ctl_ip.e_cmd = IVD_CMD_VIDEO_CTL; + s_ctl_ip.e_sub_cmd = IVD_CMD_CTL_RESET; + s_ctl_ip.u4_size = sizeof(ivd_ctl_reset_ip_t); + s_ctl_op.u4_size = sizeof(ivd_ctl_reset_op_t); + + status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, + (void *)&s_ctl_op); + if (IV_SUCCESS != status) { + ALOGE("Error in reset: 0x%x", s_ctl_op.u4_error_code); + return UNKNOWN_ERROR; + } + + /* Set the run-time (dynamic) parameters */ + setParams(0, IVD_DECODE_FRAME); + + /* Set number of cores/threads to be used by the codec */ + setNumCores(); + + return OK; +} + +status_t SoftHEVC::setNumCores() { + ivdext_ctl_set_num_cores_ip_t s_set_cores_ip; + ivdext_ctl_set_num_cores_op_t s_set_cores_op; + IV_API_CALL_STATUS_T status; + s_set_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL; + s_set_cores_ip.e_sub_cmd = IVDEXT_CMD_CTL_SET_NUM_CORES; + s_set_cores_ip.u4_num_cores = MIN(mNumCores, CODEC_MAX_NUM_CORES); + s_set_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t); + s_set_cores_op.u4_size = sizeof(ivdext_ctl_set_num_cores_op_t); + ALOGD("Set number of cores to %u", s_set_cores_ip.u4_num_cores); + status = ivdec_api_function(mCodecCtx, (void *)&s_set_cores_ip, + (void *)&s_set_cores_op); + if (IV_SUCCESS != status) { + ALOGE("Error in setting number of cores: 0x%x", + s_set_cores_op.u4_error_code); + return UNKNOWN_ERROR; + } + return OK; +} + +status_t SoftHEVC::setFlushMode() { + IV_API_CALL_STATUS_T status; + ivd_ctl_flush_ip_t s_video_flush_ip; + ivd_ctl_flush_op_t s_video_flush_op; + + s_video_flush_ip.e_cmd = IVD_CMD_VIDEO_CTL; + s_video_flush_ip.e_sub_cmd = IVD_CMD_CTL_FLUSH; + s_video_flush_ip.u4_size = sizeof(ivd_ctl_flush_ip_t); + s_video_flush_op.u4_size = sizeof(ivd_ctl_flush_op_t); + ALOGD("Set the decoder in flush mode "); + + /* Set the decoder in Flush mode, subsequent decode() calls will flush */ + status = ivdec_api_function(mCodecCtx, (void *)&s_video_flush_ip, + (void *)&s_video_flush_op); + + if (status != IV_SUCCESS) { + ALOGE("Error in setting the decoder in flush mode: (%d) 0x%x", status, + s_video_flush_op.u4_error_code); + return UNKNOWN_ERROR; + } + + mIsInFlush = true; + return OK; +} + +status_t SoftHEVC::initDecoder() { + IV_API_CALL_STATUS_T status; + + UWORD32 u4_num_reorder_frames; + UWORD32 u4_num_ref_frames; + UWORD32 u4_share_disp_buf; + WORD32 i4_level; + + mNumCores = GetCPUCoreCount(); + + /* Initialize number of ref and reorder modes (for HEVC) */ + u4_num_reorder_frames = 16; + u4_num_ref_frames = 16; + u4_share_disp_buf = 0; + + if ((mWidth * mHeight) > (1920 * 1088)) { + i4_level = 50; + } else if ((mWidth * mHeight) > (1280 * 720)) { + i4_level = 41; + } else { + i4_level = 31; + } + + { + iv_num_mem_rec_ip_t s_num_mem_rec_ip; + iv_num_mem_rec_op_t s_num_mem_rec_op; + + s_num_mem_rec_ip.u4_size = sizeof(s_num_mem_rec_ip); + s_num_mem_rec_op.u4_size = sizeof(s_num_mem_rec_op); + s_num_mem_rec_ip.e_cmd = IV_CMD_GET_NUM_MEM_REC; + + ALOGV("Get number of mem records"); + status = ivdec_api_function(mCodecCtx, (void*)&s_num_mem_rec_ip, + (void*)&s_num_mem_rec_op); + if (IV_SUCCESS != status) { + ALOGE("Error in getting mem records: 0x%x", + s_num_mem_rec_op.u4_error_code); + return UNKNOWN_ERROR; + } + + mNumMemRecords = s_num_mem_rec_op.u4_num_mem_rec; + } + + mMemRecords = (iv_mem_rec_t*)ivd_aligned_malloc( + 128, mNumMemRecords * sizeof(iv_mem_rec_t)); + if (mMemRecords == NULL) { + ALOGE("Allocation failure"); + return NO_MEMORY; + } + + { + size_t i; + ivdext_fill_mem_rec_ip_t s_fill_mem_ip; + ivdext_fill_mem_rec_op_t s_fill_mem_op; + iv_mem_rec_t *ps_mem_rec; + + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_size = + sizeof(ivdext_fill_mem_rec_ip_t); + s_fill_mem_ip.i4_level = i4_level; + s_fill_mem_ip.u4_num_reorder_frames = u4_num_reorder_frames; + s_fill_mem_ip.u4_num_ref_frames = u4_num_ref_frames; + s_fill_mem_ip.u4_share_disp_buf = u4_share_disp_buf; + s_fill_mem_ip.u4_num_extra_disp_buf = 0; + s_fill_mem_ip.e_output_format = mIvColorFormat; + + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.e_cmd = IV_CMD_FILL_NUM_MEM_REC; + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.pv_mem_rec_location = mMemRecords; + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_wd = mWidth; + s_fill_mem_ip.s_ivd_fill_mem_rec_ip_t.u4_max_frm_ht = mHeight; + s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_size = + sizeof(ivdext_fill_mem_rec_op_t); + + ps_mem_rec = mMemRecords; + for (i = 0; i < mNumMemRecords; i++) + ps_mem_rec[i].u4_size = sizeof(iv_mem_rec_t); + + status = ivdec_api_function(mCodecCtx, (void *)&s_fill_mem_ip, + (void *)&s_fill_mem_op); + + if (IV_SUCCESS != status) { + ALOGE("Error in filling mem records: 0x%x", + s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_error_code); + return UNKNOWN_ERROR; + } + mNumMemRecords = + s_fill_mem_op.s_ivd_fill_mem_rec_op_t.u4_num_mem_rec_filled; + + ps_mem_rec = mMemRecords; + + for (i = 0; i < mNumMemRecords; i++) { + ps_mem_rec->pv_base = ivd_aligned_malloc( + ps_mem_rec->u4_mem_alignment, ps_mem_rec->u4_mem_size); + if (ps_mem_rec->pv_base == NULL) { + ALOGE("Allocation failure for memory record #%zu of size %u", + i, ps_mem_rec->u4_mem_size); + status = IV_FAIL; + return NO_MEMORY; + } + + ps_mem_rec++; + } + } + + /* Initialize the decoder */ + { + ivdext_init_ip_t s_init_ip; + ivdext_init_op_t s_init_op; + + void *dec_fxns = (void *)ivdec_api_function; + + s_init_ip.s_ivd_init_ip_t.u4_size = sizeof(ivdext_init_ip_t); + s_init_ip.s_ivd_init_ip_t.e_cmd = (IVD_API_COMMAND_TYPE_T)IV_CMD_INIT; + s_init_ip.s_ivd_init_ip_t.pv_mem_rec_location = mMemRecords; + s_init_ip.s_ivd_init_ip_t.u4_frm_max_wd = mWidth; + s_init_ip.s_ivd_init_ip_t.u4_frm_max_ht = mHeight; + + s_init_ip.i4_level = i4_level; + s_init_ip.u4_num_reorder_frames = u4_num_reorder_frames; + s_init_ip.u4_num_ref_frames = u4_num_ref_frames; + s_init_ip.u4_share_disp_buf = u4_share_disp_buf; + s_init_ip.u4_num_extra_disp_buf = 0; + + s_init_op.s_ivd_init_op_t.u4_size = sizeof(s_init_op); + + s_init_ip.s_ivd_init_ip_t.u4_num_mem_rec = mNumMemRecords; + s_init_ip.s_ivd_init_ip_t.e_output_format = mIvColorFormat; + + mCodecCtx = (iv_obj_t*)mMemRecords[0].pv_base; + mCodecCtx->pv_fxns = dec_fxns; + mCodecCtx->u4_size = sizeof(iv_obj_t); + + ALOGD("Initializing decoder"); + status = ivdec_api_function(mCodecCtx, (void *)&s_init_ip, + (void *)&s_init_op); + if (status != IV_SUCCESS) { + ALOGE("Error in init: 0x%x", + s_init_op.s_ivd_init_op_t.u4_error_code); + return UNKNOWN_ERROR; + } + } + + /* Reset the plugin state */ + resetPlugin(); + + /* Set the run time (dynamic) parameters */ + setParams(0, IVD_DECODE_FRAME); + + /* Set number of cores/threads to be used by the codec */ + setNumCores(); + + /* Get codec version */ + getVersion(); + + /* Allocate internal picture buffer */ + mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, mStride * mHeight * 3 / 2); + if (NULL == mFlushOutBuffer) { + ALOGE("Could not allocate flushOutputBuffer of size %zu", mStride * mHeight * 3 / 2); + return NO_MEMORY; + } + + return OK; +} + +status_t SoftHEVC::deInitDecoder() { + size_t i; + iv_mem_rec_t *ps_mem_rec; + ps_mem_rec = mMemRecords; + ALOGD("Freeing codec memory"); + for (i = 0; i < mNumMemRecords; i++) { + ivd_aligned_free(ps_mem_rec->pv_base); + ps_mem_rec++; + } + + ivd_aligned_free(mMemRecords); + ivd_aligned_free(mFlushOutBuffer); + return OK; +} + +void SoftHEVC::onReset() { + ALOGD("onReset called"); + SoftVideoDecoderOMXComponent::onReset(); + + resetDecoder(); + resetPlugin(); +} + +void SoftHEVC::onPortFlushCompleted(OMX_U32 portIndex) { + ALOGD("onPortFlushCompleted on port %d", portIndex); + + /* Once the output buffers are flushed, ignore any buffers that are held in decoder */ + if (kOutputPortIndex == portIndex) { + setFlushMode(); + + /* Reset the time stamp arrays */ + memset(mTimeStamps, 0, sizeof(mTimeStamps)); + memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid)); + + while (true) { + ivd_video_decode_ip_t s_dec_ip; + ivd_video_decode_op_t s_dec_op; + IV_API_CALL_STATUS_T status; + size_t sizeY, sizeUV; + + s_dec_ip.e_cmd = IVD_CMD_VIDEO_DECODE; + + s_dec_ip.u4_ts = 0; + s_dec_ip.pv_stream_buffer = NULL; + s_dec_ip.u4_num_Bytes = 0; + + s_dec_ip.u4_size = sizeof(ivd_video_decode_ip_t); + s_dec_op.u4_size = sizeof(ivd_video_decode_op_t); + + sizeY = mStride * mHeight; + sizeUV = sizeY / 4; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[0] = sizeY; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[1] = sizeUV; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[2] = sizeUV; + + s_dec_ip.s_out_buffer.pu1_bufs[0] = mFlushOutBuffer; + s_dec_ip.s_out_buffer.pu1_bufs[1] = + s_dec_ip.s_out_buffer.pu1_bufs[0] + sizeY; + s_dec_ip.s_out_buffer.pu1_bufs[2] = + s_dec_ip.s_out_buffer.pu1_bufs[1] + sizeUV; + s_dec_ip.s_out_buffer.u4_num_bufs = 3; + + status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, + (void *)&s_dec_op); + if (0 == s_dec_op.u4_output_present) { + resetPlugin(); + break; + } + } + } +} + +void SoftHEVC::onQueueFilled(OMX_U32 portIndex) { + IV_API_CALL_STATUS_T status; + + UNUSED(portIndex); + + if (mOutputPortSettingsChange != NONE) { + return; + } + + List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex); + List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex); + + /* If input EOS is seen and decoder is not in flush mode, + * set the decoder in flush mode. + * There can be a case where EOS is sent along with last picture data + * In that case, only after decoding that input data, decoder has to be + * put in flush. This case is handled here */ + + if (mReceivedEOS && !mIsInFlush) { + setFlushMode(); + } + + while (outQueue.size() == kNumBuffers) { + BufferInfo *inInfo; + OMX_BUFFERHEADERTYPE *inHeader; + + BufferInfo *outInfo; + OMX_BUFFERHEADERTYPE *outHeader; + size_t timeStampIx; + + inInfo = NULL; + inHeader = NULL; + + if (!mIsInFlush) { + if (!inQueue.empty()) { + inInfo = *inQueue.begin(); + inHeader = inInfo->mHeader; + } else { + break; + } + } + + outInfo = *outQueue.begin(); + outHeader = outInfo->mHeader; + outHeader->nFlags = 0; + outHeader->nTimeStamp = 0; + outHeader->nOffset = 0; + + if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) { + ALOGD("EOS seen on input"); + mReceivedEOS = true; + if (inHeader->nFilledLen == 0) { + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + setFlushMode(); + } + } + + /* Get a free slot in timestamp array to hold input timestamp */ + { + size_t i; + timeStampIx = 0; + for (i = 0; i < MAX_TIME_STAMPS; i++) { + if (!mTimeStampsValid[i]) { + timeStampIx = i; + break; + } + } + if (inHeader != NULL) { + mTimeStampsValid[timeStampIx] = true; + mTimeStamps[timeStampIx] = inHeader->nTimeStamp; + } + } + + { + ivd_video_decode_ip_t s_dec_ip; + ivd_video_decode_op_t s_dec_op; + WORD32 timeDelay, timeTaken; + size_t sizeY, sizeUV; + + s_dec_ip.e_cmd = IVD_CMD_VIDEO_DECODE; + + /* When in flush and after EOS with zero byte input, + * inHeader is set to zero. Hence check for non-null */ + if (inHeader != NULL) { + s_dec_ip.u4_ts = timeStampIx; + s_dec_ip.pv_stream_buffer = inHeader->pBuffer + + inHeader->nOffset; + s_dec_ip.u4_num_Bytes = inHeader->nFilledLen; + } else { + s_dec_ip.u4_ts = 0; + s_dec_ip.pv_stream_buffer = NULL; + s_dec_ip.u4_num_Bytes = 0; + } + + s_dec_ip.u4_size = sizeof(ivd_video_decode_ip_t); + s_dec_op.u4_size = sizeof(ivd_video_decode_op_t); + + sizeY = mStride * mHeight; + sizeUV = sizeY / 4; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[0] = sizeY; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[1] = sizeUV; + s_dec_ip.s_out_buffer.u4_min_out_buf_size[2] = sizeUV; + + s_dec_ip.s_out_buffer.pu1_bufs[0] = outHeader->pBuffer; + s_dec_ip.s_out_buffer.pu1_bufs[1] = + s_dec_ip.s_out_buffer.pu1_bufs[0] + sizeY; + s_dec_ip.s_out_buffer.pu1_bufs[2] = + s_dec_ip.s_out_buffer.pu1_bufs[1] + sizeUV; + s_dec_ip.s_out_buffer.u4_num_bufs = 3; + + GETTIME(&mTimeStart, NULL); + /* Compute time elapsed between end of previous decode() + * to start of current decode() */ + TIME_DIFF(mTimeEnd, mTimeStart, timeDelay); + + status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, + (void *)&s_dec_op); + + GETTIME(&mTimeEnd, NULL); + /* Compute time taken for decode() */ + TIME_DIFF(mTimeStart, mTimeEnd, timeTaken); + + ALOGD("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay, + s_dec_op.u4_num_bytes_consumed); + + if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) { + /* If the input did not contain picture data, then ignore + * the associated timestamp */ + mTimeStampsValid[timeStampIx] = false; + } + + /* If valid height and width are decoded, + * then look at change in resolution */ + if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) { + uint32_t width = s_dec_op.u4_pic_wd; + uint32_t height = s_dec_op.u4_pic_ht; + + if ((width != mWidth || height != mHeight)) { + mWidth = width; + mHeight = height; + mStride = mWidth; + + /* If width and height are greater than the + * the dimensions used during codec create, then + * delete the current instance and recreate an instance with + * new dimensions */ + /* TODO: The following does not work currently, since the decoder + * currently returns 0 x 0 as width height when it is not supported + * Once the decoder is updated to return actual width and height, + * then this can be validated*/ + + if ((mWidth * mHeight) > (mInitWidth * mInitHeight)) { + status_t ret; + ALOGD("Trying reInit"); + ret = deInitDecoder(); + if (OK != ret) { + // TODO: Handle graceful exit + ALOGE("Create failure"); + return; + } + + mInitWidth = mWidth; + mInitHeight = mHeight; + + ret = initDecoder(); + if (OK != ret) { + // TODO: Handle graceful exit + ALOGE("Create failure"); + return; + } + } + updatePortDefinitions(); + + notify(OMX_EventPortSettingsChanged, 1, 0, NULL); + mOutputPortSettingsChange = AWAITING_DISABLED; + return; + } + } + + if (s_dec_op.u4_output_present) { + outHeader->nFilledLen = (mStride * mHeight * 3) / 2; + + outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts]; + mTimeStampsValid[s_dec_op.u4_ts] = false; + + outInfo->mOwnedByUs = false; + outQueue.erase(outQueue.begin()); + outInfo = NULL; + notifyFillBufferDone(outHeader); + outHeader = NULL; + } else { + /* If in flush mode and no output is returned by the codec, + * then come out of flush mode */ + mIsInFlush = false; + + /* If EOS was recieved on input port and there is no output + * from the codec, then signal EOS on output port */ + if (mReceivedEOS) { + outHeader->nFilledLen = 0; + outHeader->nFlags |= OMX_BUFFERFLAG_EOS; + + outInfo->mOwnedByUs = false; + outQueue.erase(outQueue.begin()); + outInfo = NULL; + notifyFillBufferDone(outHeader); + outHeader = NULL; + resetPlugin(); + } + } + } + + // TODO: Handle more than one picture data + if (inHeader != NULL) { + inInfo->mOwnedByUs = false; + inQueue.erase(inQueue.begin()); + inInfo = NULL; + notifyEmptyBufferDone(inHeader); + inHeader = NULL; + } + } +} + +} // namespace android + +android::SoftOMXComponent *createSoftOMXComponent(const char *name, + const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData, + OMX_COMPONENTTYPE **component) { + return new android::SoftHEVC(name, callbacks, appData, component); +} diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h new file mode 100644 index 0000000..20db0e1 --- /dev/null +++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_HEVC_H_ + +#define SOFT_HEVC_H_ + +#include "SoftVideoDecoderOMXComponent.h" +#include <sys/time.h> + +namespace android { + +#define ivd_aligned_malloc(alignment, size) memalign(alignment, size) +#define ivd_aligned_free(buf) free(buf) + +/** Number of entries in the time-stamp array */ +#define MAX_TIME_STAMPS 64 + +/** Maximum number of cores supported by the codec */ +#define CODEC_MAX_NUM_CORES 4 + +#define CODEC_MAX_WIDTH 1920 + +#define CODEC_MAX_HEIGHT 1088 + +/** Input buffer size */ +#define INPUT_BUF_SIZE (1024 * 1024) + +#define MIN(a, b) ((a) < (b)) ? (a) : (b) + +/** Used to remove warnings about unused parameters */ +#define UNUSED(x) ((void)(x)) + +/** Get time */ +#define GETTIME(a, b) gettimeofday(a, b); + +/** Compute difference between start and end */ +#define TIME_DIFF(start, end, diff) \ + diff = ((end.tv_sec - start.tv_sec) * 1000000) + \ + (end.tv_usec - start.tv_usec); + +struct SoftHEVC: public SoftVideoDecoderOMXComponent { + SoftHEVC(const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component); + +protected: + virtual ~SoftHEVC(); + + virtual void onQueueFilled(OMX_U32 portIndex); + virtual void onPortFlushCompleted(OMX_U32 portIndex); + virtual void onReset(); +private: + // Number of input and output buffers + enum { + kNumBuffers = 8 + }; + + iv_obj_t *mCodecCtx; // Codec context + iv_mem_rec_t *mMemRecords; // Memory records requested by the codec + size_t mNumMemRecords; // Number of memory records requested by the codec + + uint32_t mNewWidth; // New width after change in resolution + uint32_t mNewHeight; // New height after change in resolution + uint32_t mInitWidth; // Width used during codec creation + uint32_t mInitHeight; // Height used during codec creation + size_t mStride; // Stride to be used for display buffers + + size_t mNumCores; // Number of cores to be uesd by the codec + + struct timeval mTimeStart; // Time at the start of decode() + struct timeval mTimeEnd; // Time at the end of decode() + + // Internal buffer to be used to flush out the buffers from decoder + uint8_t *mFlushOutBuffer; + + // Status of entries in the timestamp array + bool mTimeStampsValid[MAX_TIME_STAMPS]; + + // Timestamp array - Since codec does not take 64 bit timestamps, + // they are maintained in the plugin + OMX_S64 mTimeStamps[MAX_TIME_STAMPS]; + + OMX_COLOR_FORMATTYPE mOmxColorFormat; // OMX Color format + IV_COLOR_FORMAT_T mIvColorFormat; // Ittiam Color format + + bool mIsInFlush; // codec is flush mode + bool mReceivedEOS; // EOS is receieved on input port + bool mIsAdapting; // plugin in middle of change in resolution + + status_t initDecoder(); + status_t deInitDecoder(); + status_t setFlushMode(); + status_t setParams(WORD32 stride, IVD_VIDEO_DECODE_MODE_T decMode); + status_t getVersion(); + status_t setNumCores(); + status_t resetDecoder(); + status_t resetPlugin(); + + DISALLOW_EVIL_CONSTRUCTORS (SoftHEVC); +}; + +} // namespace android + +#endif // SOFT_HEVC_H_ diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml index 41e0efb..9b930bc 100644 --- a/media/libstagefright/data/media_codecs_google_video.xml +++ b/media/libstagefright/data/media_codecs_google_video.xml @@ -19,6 +19,7 @@ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" /> <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" /> <MediaCodec name="OMX.google.h264.decoder" type="video/avc" /> + <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" /> <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" /> <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" /> </Decoders> diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp index 65f5404..9b6958a 100644 --- a/media/libstagefright/omx/SoftOMXPlugin.cpp +++ b/media/libstagefright/omx/SoftOMXPlugin.cpp @@ -42,6 +42,7 @@ static const struct { { "OMX.google.amrwb.encoder", "amrwbenc", "audio_encoder.amrwb" }, { "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" }, { "OMX.google.h264.encoder", "h264enc", "video_encoder.avc" }, + { "OMX.google.hevc.decoder", "hevcdec", "video_decoder.hevc" }, { "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" }, { "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" }, { "OMX.google.h263.decoder", "mpeg4dec", "video_decoder.h263" }, diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp index eb9fcf7..1c383f7 100644 --- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp +++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp @@ -183,12 +183,12 @@ OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter( return OMX_ErrorUnsupportedIndex; } - if (index >= mNumProfileLevels) { + if (profileLevel->nProfileIndex >= mNumProfileLevels) { return OMX_ErrorNoMore; } - profileLevel->eProfile = mProfileLevels[index].mProfile; - profileLevel->eLevel = mProfileLevels[index].mLevel; + profileLevel->eProfile = mProfileLevels[profileLevel->nProfileIndex].mProfile; + profileLevel->eLevel = mProfileLevels[profileLevel->nProfileIndex].mLevel; return OMX_ErrorNone; } |