summaryrefslogtreecommitdiffstats
path: root/media/libstagefright/MediaCodecSource.cpp
diff options
context:
space:
mode:
authorPraveen Chavan <pchavan@codeaurora.org>2014-10-16 11:49:14 -0700
committerChong Zhang <chz@google.com>2014-11-12 17:22:36 -0800
commit1099188151eb63af24ecf542b58d4257bbb8236a (patch)
treed1f15d781c76f511465da68c6d334ebcaf8b6eac /media/libstagefright/MediaCodecSource.cpp
parent868e2f0148b1c491e4fd4839b46d73f6216a058d (diff)
downloadframeworks_av-1099188151eb63af24ecf542b58d4257bbb8236a.zip
frameworks_av-1099188151eb63af24ecf542b58d4257bbb8236a.tar.gz
frameworks_av-1099188151eb63af24ecf542b58d4257bbb8236a.tar.bz2
Stagefright: use MediaCodec in async mode for recording
Async mode reduces the number of messages posted between MediaCodec and MediaCodecSource. This reduces thread wakeups and helps reduce CPU utilization. Bug: 18246026 Change-Id: I4b0837f309fdd12e323c1dfa72525f5a31971a03
Diffstat (limited to 'media/libstagefright/MediaCodecSource.cpp')
-rw-r--r--media/libstagefright/MediaCodecSource.cpp269
1 files changed, 105 insertions, 164 deletions
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0fecda8..c26e909 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -422,19 +422,11 @@ status_t MediaCodecSource::initEncoder() {
}
}
- err = mEncoder->start();
-
- if (err != OK) {
- return err;
- }
-
- err = mEncoder->getInputBuffers(&mEncoderInputBuffers);
+ mEncoderActivityNotify = new AMessage(
+ kWhatEncoderActivity, mReflector->id());
+ mEncoder->setCallback(mEncoderActivityNotify);
- if (err != OK) {
- return err;
- }
-
- err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
+ err = mEncoder->start();
if (err != OK) {
return err;
@@ -461,14 +453,6 @@ void MediaCodecSource::releaseEncoder() {
mbuf->release();
}
}
-
- for (size_t i = 0; i < mEncoderInputBuffers.size(); ++i) {
- sp<ABuffer> accessUnit = mEncoderInputBuffers.itemAt(i);
- accessUnit->setMediaBufferBase(NULL);
- }
-
- mEncoderInputBuffers.clear();
- mEncoderOutputBuffers.clear();
}
status_t MediaCodecSource::postSynchronouslyAndReturnError(
@@ -539,20 +523,6 @@ void MediaCodecSource::resume(int64_t skipFramesBeforeUs) {
}
}
-void MediaCodecSource::scheduleDoMoreWork() {
- if (mDoMoreWorkPending) {
- return;
- }
-
- mDoMoreWorkPending = true;
-
- if (mEncoderActivityNotify == NULL) {
- mEncoderActivityNotify = new AMessage(
- kWhatEncoderActivity, mReflector->id());
- }
- mEncoder->requestActivityNotification(mEncoderActivityNotify);
-}
-
status_t MediaCodecSource::feedEncoderInputBuffers() {
while (!mInputBufferQueue.empty()
&& !mAvailEncoderInputIndices.empty()) {
@@ -587,16 +557,22 @@ status_t MediaCodecSource::feedEncoderInputBuffers() {
#endif // DEBUG_DRIFT_TIME
}
+ sp<ABuffer> inbuf;
+ status_t err = mEncoder->getInputBuffer(bufferIndex, &inbuf);
+ if (err != OK || inbuf == NULL) {
+ mbuf->release();
+ signalEOS();
+ break;
+ }
+
size = mbuf->size();
- memcpy(mEncoderInputBuffers.itemAt(bufferIndex)->data(),
- mbuf->data(), size);
+ memcpy(inbuf->data(), mbuf->data(), size);
if (mIsVideo) {
// video encoder will release MediaBuffer when done
// with underlying data.
- mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase(
- mbuf);
+ inbuf->setMediaBufferBase(mbuf);
} else {
mbuf->release();
}
@@ -615,113 +591,6 @@ status_t MediaCodecSource::feedEncoderInputBuffers() {
return OK;
}
-status_t MediaCodecSource::doMoreWork(int32_t numInput, int32_t numOutput) {
- status_t err = OK;
-
- if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
- while (numInput-- > 0) {
- size_t bufferIndex;
- err = mEncoder->dequeueInputBuffer(&bufferIndex);
-
- if (err != OK) {
- break;
- }
-
- mAvailEncoderInputIndices.push_back(bufferIndex);
- }
-
- feedEncoderInputBuffers();
- }
-
- while (numOutput-- > 0) {
- size_t bufferIndex;
- size_t offset;
- size_t size;
- int64_t timeUs;
- uint32_t flags;
- native_handle_t* handle = NULL;
- err = mEncoder->dequeueOutputBuffer(
- &bufferIndex, &offset, &size, &timeUs, &flags);
-
- if (err != OK) {
- if (err == INFO_FORMAT_CHANGED) {
- continue;
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
- continue;
- }
-
- if (err == -EAGAIN) {
- err = OK;
- }
- break;
- }
- if (!(flags & MediaCodec::BUFFER_FLAG_EOS)) {
- sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
-
- MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
- memcpy(mbuf->data(), outbuf->data(), outbuf->size());
-
- if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
- if (mIsVideo) {
- int64_t decodingTimeUs;
- if (mFlags & FLAG_USE_SURFACE_INPUT) {
- // GraphicBufferSource is supposed to discard samples
- // queued before start, and offset timeUs by start time
- CHECK_GE(timeUs, 0ll);
- // TODO:
- // Decoding time for surface source is unavailable,
- // use presentation time for now. May need to move
- // this logic into MediaCodec.
- decodingTimeUs = timeUs;
- } else {
- CHECK(!mDecodingTimeQueue.empty());
- decodingTimeUs = *(mDecodingTimeQueue.begin());
- mDecodingTimeQueue.erase(mDecodingTimeQueue.begin());
- }
- mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
-
- ALOGV("[video] time %" PRId64 " us (%.2f secs), dts/pts diff %" PRId64,
- timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
- } else {
- int64_t driftTimeUs = 0;
-#if DEBUG_DRIFT_TIME
- CHECK(!mDriftTimeQueue.empty());
- driftTimeUs = *(mDriftTimeQueue.begin());
- mDriftTimeQueue.erase(mDriftTimeQueue.begin());
- mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
-#endif // DEBUG_DRIFT_TIME
- ALOGV("[audio] time %" PRId64 " us (%.2f secs), drift %" PRId64,
- timeUs, timeUs / 1E6, driftTimeUs);
- }
- mbuf->meta_data()->setInt64(kKeyTime, timeUs);
- } else {
- mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
- }
- if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
- mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
- }
- mbuf->setObserver(this);
- mbuf->add_ref();
-
- {
- Mutex::Autolock autoLock(mOutputBufferLock);
- mOutputBufferQueue.push_back(mbuf);
- mOutputBufferCond.signal();
- }
- }
-
- mEncoder->releaseOutputBuffer(bufferIndex);
-
- if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- err = ERROR_END_OF_STREAM;
- break;
- }
- }
-
- return err;
-}
-
status_t MediaCodecSource::onStart(MetaData *params) {
if (mStopping) {
ALOGE("Failed to start while we're stopping");
@@ -749,7 +618,6 @@ status_t MediaCodecSource::onStart(MetaData *params) {
startTimeUs = -1ll;
}
resume(startTimeUs);
- scheduleDoMoreWork();
} else {
CHECK(mPuller != NULL);
sp<AMessage> notify = new AMessage(
@@ -793,37 +661,110 @@ void MediaCodecSource::onMessageReceived(const sp<AMessage> &msg) {
mInputBufferQueue.push_back(mbuf);
feedEncoderInputBuffers();
- scheduleDoMoreWork();
break;
}
case kWhatEncoderActivity:
{
- mDoMoreWorkPending = false;
-
if (mEncoder == NULL) {
break;
}
- int32_t numInput, numOutput;
+ int32_t cbID;
+ CHECK(msg->findInt32("callbackID", &cbID));
+ if (cbID == MediaCodec::CB_INPUT_AVAILABLE) {
+ int32_t index;
+ CHECK(msg->findInt32("index", &index));
+
+ mAvailEncoderInputIndices.push_back(index);
+ feedEncoderInputBuffers();
+ } else if (cbID == MediaCodec::CB_OUTPUT_AVAILABLE) {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+ native_handle_t* handle = NULL;
+
+ CHECK(msg->findInt32("index", &index));
+ CHECK(msg->findSize("offset", &offset));
+ CHECK(msg->findSize("size", &size));
+ CHECK(msg->findInt64("timeUs", &timeUs));
+ CHECK(msg->findInt32("flags", &flags));
+
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ mEncoder->releaseOutputBuffer(index);
+ signalEOS();
+ break;
+ }
- if (!msg->findInt32("input-buffers", &numInput)) {
- numInput = INT32_MAX;
- }
- if (!msg->findInt32("output-buffers", &numOutput)) {
- numOutput = INT32_MAX;
- }
+ sp<ABuffer> outbuf;
+ status_t err = mEncoder->getOutputBuffer(index, &outbuf);
+ if (err != OK || outbuf == NULL) {
+ signalEOS();
+ break;
+ }
- status_t err = doMoreWork(numInput, numOutput);
+ MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
+ memcpy(mbuf->data(), outbuf->data(), outbuf->size());
- if (err == OK) {
- scheduleDoMoreWork();
- } else {
- // reached EOS, or error
- signalEOS(err);
- }
+ if (!(flags & MediaCodec::BUFFER_FLAG_CODECCONFIG)) {
+ if (mIsVideo) {
+ int64_t decodingTimeUs;
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ // GraphicBufferSource is supposed to discard samples
+ // queued before start, and offset timeUs by start time
+ CHECK_GE(timeUs, 0ll);
+ // TODO:
+ // Decoding time for surface source is unavailable,
+ // use presentation time for now. May need to move
+ // this logic into MediaCodec.
+ decodingTimeUs = timeUs;
+ } else {
+ CHECK(!mDecodingTimeQueue.empty());
+ decodingTimeUs = *(mDecodingTimeQueue.begin());
+ mDecodingTimeQueue.erase(mDecodingTimeQueue.begin());
+ }
+ mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
- break;
+ ALOGV("[video] time %" PRId64 " us (%.2f secs), dts/pts diff %" PRId64,
+ timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
+ } else {
+ int64_t driftTimeUs = 0;
+#if DEBUG_DRIFT_TIME
+ CHECK(!mDriftTimeQueue.empty());
+ driftTimeUs = *(mDriftTimeQueue.begin());
+ mDriftTimeQueue.erase(mDriftTimeQueue.begin());
+ mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
+#endif // DEBUG_DRIFT_TIME
+ ALOGV("[audio] time %" PRId64 " us (%.2f secs), drift %" PRId64,
+ timeUs, timeUs / 1E6, driftTimeUs);
+ }
+ mbuf->meta_data()->setInt64(kKeyTime, timeUs);
+ } else {
+ mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
+ }
+ if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
+ mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
+ }
+ mbuf->setObserver(this);
+ mbuf->add_ref();
+
+ {
+ Mutex::Autolock autoLock(mOutputBufferLock);
+ mOutputBufferQueue.push_back(mbuf);
+ mOutputBufferCond.signal();
+ }
+
+ mEncoder->releaseOutputBuffer(index);
+ } else if (cbID == MediaCodec::CB_ERROR) {
+ status_t err;
+ CHECK(msg->findInt32("err", &err));
+ ALOGE("Encoder (%s) reported error : 0x%x",
+ mIsVideo ? "video" : "audio", err);
+ signalEOS();
+ }
+ break;
}
case kWhatStart:
{