summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
Diffstat (limited to 'media')
-rw-r--r--media/libmedia/AudioEffect.cpp11
-rw-r--r--media/libmedia/AudioTrack.cpp119
-rw-r--r--media/libmedia/IOMX.cpp58
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp71
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h3
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp15
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h3
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp12
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp119
-rw-r--r--media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h1
-rw-r--r--media/libstagefright/ACodec.cpp149
-rw-r--r--media/libstagefright/CameraSource.cpp17
-rw-r--r--media/libstagefright/MPEG4Extractor.cpp22
-rw-r--r--media/libstagefright/MPEG4Writer.cpp9
-rw-r--r--media/libstagefright/MediaCodecList.cpp102
-rw-r--r--media/libstagefright/OMXCodec.cpp7
-rw-r--r--media/libstagefright/OggExtractor.cpp478
-rw-r--r--media/libstagefright/SampleTable.cpp4
-rwxr-xr-xmedia/libstagefright/codecs/avcenc/SoftAVCEnc.cpp8
-rw-r--r--media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp39
-rw-r--r--media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp26
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp18
-rw-r--r--media/libstagefright/http/MediaHTTP.cpp5
-rw-r--r--media/libstagefright/include/OMXNodeInstance.h6
-rw-r--r--media/libstagefright/include/OggExtractor.h4
-rw-r--r--media/libstagefright/mpeg2ts/ATSParser.cpp5
-rw-r--r--media/libstagefright/omx/GraphicBufferSource.cpp79
-rw-r--r--media/libstagefright/omx/GraphicBufferSource.h25
-rw-r--r--media/libstagefright/omx/OMX.cpp27
-rw-r--r--media/libstagefright/omx/OMXNodeInstance.cpp23
-rw-r--r--media/libstagefright/omx/tests/OMXHarness.cpp6
-rw-r--r--media/libstagefright/omx/tests/OMXHarness.h2
32 files changed, 1166 insertions, 307 deletions
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index bbeb854..ff82544 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -134,12 +134,14 @@ status_t AudioEffect::set(const effect_uuid_t *type,
if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
ALOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
+ if (iEffect == 0) {
+ mStatus = NO_INIT;
+ }
return mStatus;
}
mEnabled = (volatile int32_t)enabled;
- mIEffect = iEffect;
cblk = iEffect->getCblk();
if (cblk == 0) {
mStatus = NO_INIT;
@@ -147,6 +149,7 @@ status_t AudioEffect::set(const effect_uuid_t *type,
return mStatus;
}
+ mIEffect = iEffect;
mCblkMemory = cblk;
mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer());
int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
@@ -177,11 +180,11 @@ AudioEffect::~AudioEffect()
mIEffect->disconnect();
IInterface::asBinder(mIEffect)->unlinkToDeath(mIEffectClient);
}
+ mIEffect.clear();
+ mCblkMemory.clear();
+ mIEffectClient.clear();
IPCThreadState::self()->flushCommands();
}
- mIEffect.clear();
- mIEffectClient.clear();
- mCblkMemory.clear();
}
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 070baa1..81ae6d7 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -38,11 +38,23 @@ static const int kMaxLoopCountNotifications = 32;
namespace android {
// ---------------------------------------------------------------------------
+// TODO: Move to a separate .h
+
template <typename T>
-const T &min(const T &x, const T &y) {
+static inline const T &min(const T &x, const T &y) {
return x < y ? x : y;
}
+template <typename T>
+static inline const T &max(const T &x, const T &y) {
+ return x > y ? x : y;
+}
+
+static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
+{
+ return ((double)frames * 1000000000) / ((double)sampleRate * speed);
+}
+
static int64_t convertTimespecToUs(const struct timespec &tv)
{
return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
@@ -480,6 +492,8 @@ status_t AudioTrack::set(
mObservedSequence = mSequence;
mInUnderrun = false;
mPreviousTimestampValid = false;
+ mTimestampStartupGlitchReported = false;
+ mRetrogradeMotionReported = false;
return NO_ERROR;
}
@@ -507,6 +521,8 @@ status_t AudioTrack::start()
// reset current position as seen by client to 0
mPosition = 0;
mPreviousTimestampValid = false;
+ mTimestampStartupGlitchReported = false;
+ mRetrogradeMotionReported = false;
// For offloaded tracks, we don't know if the hardware counters are really zero here,
// since the flush is asynchronous and stop may not fully drain.
@@ -1759,7 +1775,7 @@ nsecs_t AudioTrack::processAudioBuffer()
// Cache other fields that will be needed soon
uint32_t sampleRate = mSampleRate;
float speed = mPlaybackRate.mSpeed;
- uint32_t notificationFrames = mNotificationFramesAct;
+ const uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
mRefreshRemaining = false;
mRemainingFrames = notificationFrames;
@@ -1797,7 +1813,14 @@ nsecs_t AudioTrack::processAudioBuffer()
mLock.unlock();
+ // get anchor time to account for callbacks.
+ const nsecs_t timeBeforeCallbacks = systemTime();
+
if (waitStreamEnd) {
+ // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
+ // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
+ // (and make sure we don't callback for more data while we're stopping).
+ // This helps with position, marker notifications, and track invalidation.
struct timespec timeout;
timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
timeout.tv_nsec = 0;
@@ -1882,12 +1905,17 @@ nsecs_t AudioTrack::processAudioBuffer()
minFrames = kPoll * notificationFrames;
}
+ // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+ static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
+ const nsecs_t timeAfterCallbacks = systemTime();
+
// Convert frame units to time units
nsecs_t ns = NS_WHENEVER;
if (minFrames != (uint32_t) ~0) {
- // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
- static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
- ns = ((double)minFrames * 1000000000) / ((double)sampleRate * speed) + kFudgeNs;
+ ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
+ ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
+ // TODO: Should we warn if the callback time is too long?
+ if (ns < 0) ns = 0;
}
// If not supplying data by EVENT_MORE_DATA, then we're done
@@ -1895,6 +1923,13 @@ nsecs_t AudioTrack::processAudioBuffer()
return ns;
}
+ // EVENT_MORE_DATA callback handling.
+ // Timing for linear pcm audio data formats can be derived directly from the
+ // buffer fill level.
+ // Timing for compressed data is not directly available from the buffer fill level,
+ // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
+ // to return a certain fill level.
+
struct timespec timeout;
const struct timespec *requested = &ClientProxy::kForever;
if (ns != NS_WHENEVER) {
@@ -1925,12 +1960,15 @@ nsecs_t AudioTrack::processAudioBuffer()
return NS_NEVER;
}
- if (mRetryOnPartialBuffer && !isOffloaded()) {
+ if (mRetryOnPartialBuffer && audio_is_linear_pcm(mFormat)) {
mRetryOnPartialBuffer = false;
if (avail < mRemainingFrames) {
- int64_t myns = ((double)(mRemainingFrames - avail) * 1100000000)
- / ((double)sampleRate * speed);
- if (ns < 0 || myns < ns) {
+ if (ns > 0) { // account for obtain time
+ const nsecs_t timeNow = systemTime();
+ ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
+ }
+ nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
+ if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
ns = myns;
}
return ns;
@@ -1953,7 +1991,42 @@ nsecs_t AudioTrack::processAudioBuffer()
// Keep this thread going to handle timed events and
// still try to get more data in intervals of WAIT_PERIOD_MS
// but don't just loop and block the CPU, so wait
- return WAIT_PERIOD_MS * 1000000LL;
+
+ // mCbf(EVENT_MORE_DATA, ...) might either
+ // (1) Block until it can fill the buffer, returning 0 size on EOS.
+ // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
+ // (3) Return 0 size when no data is available, does not wait for more data.
+ //
+ // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
+ // We try to compute the wait time to avoid a tight sleep-wait cycle,
+ // especially for case (3).
+ //
+ // The decision to support (1) and (2) affect the sizing of mRemainingFrames
+ // and this loop; whereas for case (3) we could simply check once with the full
+ // buffer size and skip the loop entirely.
+
+ nsecs_t myns;
+ if (audio_is_linear_pcm(mFormat)) {
+ // time to wait based on buffer occupancy
+ const nsecs_t datans = mRemainingFrames <= avail ? 0 :
+ framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
+ // audio flinger thread buffer size (TODO: adjust for fast tracks)
+ const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
+ // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
+ myns = datans + (afns / 2);
+ } else {
+ // FIXME: This could ping quite a bit if the buffer isn't full.
+ // Note that when mState is stopping we waitStreamEnd, so it never gets here.
+ myns = kWaitPeriodNs;
+ }
+ if (ns > 0) { // account for obtain and callback time
+ const nsecs_t timeNow = systemTime();
+ ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
+ }
+ if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
+ ns = myns;
+ }
+ return ns;
}
size_t releasedFrames = writtenSize / mFrameSize;
@@ -2149,7 +2222,12 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
}
// Check whether a pending flush or stop has completed, as those commands may
- // be asynchronous or return near finish.
+ // be asynchronous or return near finish or exhibit glitchy behavior.
+ //
+ // Originally this showed up as the first timestamp being a continuation of
+ // the previous song under gapless playback.
+ // However, we sometimes see zero timestamps, then a glitch of
+ // the previous song's position, and then correct timestamps afterwards.
if (mStartUs != 0 && mSampleRate != 0) {
static const int kTimeJitterUs = 100000; // 100 ms
static const int k1SecUs = 1000000;
@@ -2167,16 +2245,29 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
// Verify that the counter can't count faster than the sample rate
- // since the start time. If greater, then that means we have failed
+ // since the start time. If greater, then that means we may have failed
// to completely flush or stop the previous playing track.
- ALOGW("incomplete flush or stop:"
+ ALOGW_IF(!mTimestampStartupGlitchReported,
+ "getTimestamp startup glitch detected"
" deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
(long long)deltaTimeUs, (long long)deltaPositionByUs,
timestamp.mPosition);
+ mTimestampStartupGlitchReported = true;
+ if (previousTimestampValid
+ && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
+ timestamp = mPreviousTimestamp;
+ mPreviousTimestampValid = true;
+ return NO_ERROR;
+ }
return WOULD_BLOCK;
}
+ if (deltaPositionByUs != 0) {
+ mStartUs = 0; // don't check again, we got valid nonzero position.
+ }
+ } else {
+ mStartUs = 0; // don't check again, start time expired.
}
- mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
+ mTimestampStartupGlitchReported = false;
}
} else {
// Update the mapping between local consumed (mPosition) and server consumed (mServer)
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index ca1cdc7..16da65e 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -1077,16 +1077,29 @@ public:
: BpInterface<IOMXObserver>(impl) {
}
- virtual void onMessage(const omx_message &msg) {
+ virtual void onMessages(const std::list<omx_message> &messages) {
Parcel data, reply;
- data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
- data.write(&msg, sizeof(msg));
- if (msg.fenceFd >= 0) {
- data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+ std::list<omx_message>::const_iterator it = messages.cbegin();
+ bool first = true;
+ while (it != messages.cend()) {
+ const omx_message &msg = *it++;
+ if (first) {
+ data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
+ data.writeInt32(msg.node);
+ first = false;
+ }
+ data.writeInt32(msg.fenceFd >= 0);
+ if (msg.fenceFd >= 0) {
+ data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+ }
+ data.writeInt32(msg.type);
+ data.write(&msg.u, sizeof(msg.u));
+ ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
+ }
+ if (!first) {
+ data.writeInt32(-1); // mark end
+ remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
}
- ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
-
- remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -1098,19 +1111,28 @@ status_t BnOMXObserver::onTransact(
case OBSERVER_ON_MSG:
{
CHECK_OMX_INTERFACE(IOMXObserver, data, reply);
+ IOMX::node_id node = data.readInt32();
+ std::list<omx_message> messages;
+ status_t err = FAILED_TRANSACTION; // must receive at least one message
+ do {
+ int haveFence = data.readInt32();
+ if (haveFence < 0) { // we use -1 to mark end of messages
+ break;
+ }
+ omx_message msg;
+ msg.node = node;
+ msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+ msg.type = (typeof(msg.type))data.readInt32();
+ err = data.read(&msg.u, sizeof(msg.u));
+ ALOGV("onTransact reading message %d, size %zu", msg.type, sizeof(msg));
+ messages.push_back(msg);
+ } while (err == OK);
- omx_message msg;
- data.read(&msg, sizeof(msg));
- if (msg.fenceFd >= 0) {
- msg.fenceFd = ::dup(data.readFileDescriptor());
+ if (err == OK) {
+ onMessages(messages);
}
- ALOGV("onTransact reading message %d, size %zu", msg.type, sizeof(msg));
-
- // XXX Could use readInplace maybe?
- onMessage(msg);
-
- return NO_ERROR;
+ return err;
}
default:
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 7c40121..ae869d6 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1482,20 +1482,11 @@ status_t MediaPlayerService::AudioOutput::open(
AudioCallback cb, void *cookie,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo,
- bool doNotReconnect)
+ bool doNotReconnect,
+ uint32_t suggestedFrameCount)
{
- mCallback = cb;
- mCallbackCookie = cookie;
-
- // Check argument "bufferCount" against the mininum buffer count
- if (bufferCount < mMinBufferCount) {
- ALOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount);
- bufferCount = mMinBufferCount;
-
- }
ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
format, bufferCount, mSessionId, flags);
- size_t frameCount;
// offloading is only supported in callback mode for now.
// offloadInfo must be present if offload flag is set
@@ -1504,20 +1495,36 @@ status_t MediaPlayerService::AudioOutput::open(
return BAD_VALUE;
}
+ // compute frame count for the AudioTrack internal buffer
+ size_t frameCount;
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
frameCount = 0; // AudioTrack will get frame count from AudioFlinger
} else {
+ // try to estimate the buffer processing fetch size from AudioFlinger.
+ // framesPerBuffer is approximate and generally correct, except when it's not :-).
uint32_t afSampleRate;
size_t afFrameCount;
-
if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
return NO_INIT;
}
if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
return NO_INIT;
}
+ const size_t framesPerBuffer =
+ (unsigned long long)sampleRate * afFrameCount / afSampleRate;
- frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+ if (bufferCount == 0) {
+ // use suggestedFrameCount
+ bufferCount = (suggestedFrameCount + framesPerBuffer - 1) / framesPerBuffer;
+ }
+ // Check argument bufferCount against the mininum buffer count
+ if (bufferCount != 0 && bufferCount < mMinBufferCount) {
+ ALOGV("bufferCount (%d) increased to %d", bufferCount, mMinBufferCount);
+ bufferCount = mMinBufferCount;
+ }
+ // if frameCount is 0, then AudioTrack will get frame count from AudioFlinger
+ // which will be the minimum size permitted.
+ frameCount = bufferCount * framesPerBuffer;
}
if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
@@ -1528,6 +1535,9 @@ status_t MediaPlayerService::AudioOutput::open(
}
}
+ mCallback = cb;
+ mCallbackCookie = cookie;
+
// Check whether we can recycle the track
bool reuse = false;
bool bothOffloaded = false;
@@ -1669,7 +1679,7 @@ status_t MediaPlayerService::AudioOutput::open(
t->setVolume(mLeftVolume, mRightVolume);
mSampleRateHz = sampleRate;
- mFlags = flags;
+ mFlags = t->getFlags(); // we suggest the flags above, but new AudioTrack() may not grant it.
mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
uint32_t pos;
if (t->getPosition(&pos) == OK) {
@@ -1678,7 +1688,9 @@ status_t MediaPlayerService::AudioOutput::open(
mTrack = t;
status_t res = NO_ERROR;
- if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+ // Note some output devices may give us a direct track even though we don't specify it.
+ // Example: Line application b/17459982.
+ if ((mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) {
res = t->setPlaybackRate(mPlaybackRate);
if (res == NO_ERROR) {
t->setAuxEffectSendLevel(mSendLevel);
@@ -1744,12 +1756,14 @@ ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size,
void MediaPlayerService::AudioOutput::stop()
{
ALOGV("stop");
+ mBytesWritten = 0;
if (mTrack != 0) mTrack->stop();
}
void MediaPlayerService::AudioOutput::flush()
{
ALOGV("flush");
+ mBytesWritten = 0;
if (mTrack != 0) mTrack->flush();
}
@@ -1851,20 +1865,23 @@ void MediaPlayerService::AudioOutput::CallbackWrapper(
me, buffer->raw, buffer->size, me->mCallbackCookie,
CB_EVENT_FILL_BUFFER);
- if ((me->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0 &&
- actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
- // We've reached EOS but the audio track is not stopped yet,
- // keep playing silence.
+ // Log when no data is returned from the callback.
+ // (1) We may have no data (especially with network streaming sources).
+ // (2) We may have reached the EOS and the audio track is not stopped yet.
+ // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+ // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
+ //
+ // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+ // nevertheless for power reasons, we don't want to see too many of these.
- memset(buffer->raw, 0, buffer->size);
- actualSize = buffer->size;
- }
+ ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+ me->mBytesWritten += actualSize; // benign race with reader.
buffer->size = actualSize;
} break;
-
case AudioTrack::EVENT_STREAM_END:
+ // currently only occurs for offloaded callbacks
ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
(*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
me->mCallbackCookie, CB_EVENT_STREAM_END);
@@ -1877,11 +1894,15 @@ void MediaPlayerService::AudioOutput::CallbackWrapper(
break;
case AudioTrack::EVENT_UNDERRUN:
- // This occurs when there is no data available, typically occurring
+ // This occurs when there is no data available, typically
// when there is a failure to supply data to the AudioTrack. It can also
// occur in non-offloaded mode when the audio device comes out of standby.
//
- // If you see this at the start of playback, there probably was a glitch.
+ // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+ // it may sound like an audible pop or glitch.
+ //
+ // The underrun event is sent once per track underrun; the condition is reset
+ // when more data is sent to the AudioTrack.
ALOGI("callbackwrapper: EVENT_UNDERRUN (discarded)");
break;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index e9f72b8..7527506 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -98,7 +98,8 @@ class MediaPlayerService : public BnMediaPlayerService
AudioCallback cb, void *cookie,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
const audio_offload_info_t *offloadInfo = NULL,
- bool doNotReconnect = false);
+ bool doNotReconnect = false,
+ uint32_t suggestedFrameCount = 0);
virtual status_t start();
virtual ssize_t write(const void* buffer, size_t size, bool blocking = true);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e16a4b5..98abe9c 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1194,8 +1194,7 @@ void StagefrightRecorder::clipVideoFrameWidth() {
}
}
-status_t StagefrightRecorder::checkVideoEncoderCapabilities(
- bool *supportsCameraSourceMetaDataMode) {
+status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
/* hardware codecs must support camera source meta data mode */
Vector<CodecCapabilities> codecs;
OMXClient client;
@@ -1207,9 +1206,6 @@ status_t StagefrightRecorder::checkVideoEncoderCapabilities(
mVideoEncoder == VIDEO_ENCODER_VP8 ? MEDIA_MIMETYPE_VIDEO_VP8 :
mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
false /* decoder */, true /* hwCodec */, &codecs);
- *supportsCameraSourceMetaDataMode = codecs.size() > 0;
- ALOGV("encoder %s camera source meta-data mode",
- *supportsCameraSourceMetaDataMode ? "supports" : "DOES NOT SUPPORT");
if (!mCaptureTimeLapse) {
// Dont clip for time lapse capture as encoder will have enough
@@ -1418,9 +1414,7 @@ status_t StagefrightRecorder::setupMediaSource(
status_t StagefrightRecorder::setupCameraSource(
sp<CameraSource> *cameraSource) {
status_t err = OK;
- bool encoderSupportsCameraSourceMetaDataMode;
- if ((err = checkVideoEncoderCapabilities(
- &encoderSupportsCameraSourceMetaDataMode)) != OK) {
+ if ((err = checkVideoEncoderCapabilities()) != OK) {
return err;
}
Size videoSize;
@@ -1436,14 +1430,13 @@ status_t StagefrightRecorder::setupCameraSource(
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenTimeLapseFrameCaptureUs,
- encoderSupportsCameraSourceMetaDataMode);
+ mTimeBetweenTimeLapseFrameCaptureUs);
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
videoSize, mFrameRate,
- mPreviewSurface, encoderSupportsCameraSourceMetaDataMode);
+ mPreviewSurface);
}
mCamera.clear();
mCameraProxy.clear();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 7473f42..8af9278 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -141,8 +141,7 @@ private:
status_t setupRTPRecording();
status_t setupMPEG2TSRecording();
sp<MediaSource> createAudioSource();
- status_t checkVideoEncoderCapabilities(
- bool *supportsCameraSourceMetaDataMode);
+ status_t checkVideoEncoderCapabilities();
status_t checkAudioEncoderCapabilities();
// Generic MediaSource set-up. Returns the appropriate
// source (CameraSource or SurfaceMediaSource)
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 6abc81c..c649c62 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -24,6 +24,7 @@
#include "NuPlayerRenderer.h"
#include "NuPlayerSource.h"
+#include <cutils/properties.h>
#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -40,6 +41,10 @@
namespace android {
+static inline bool getAudioDeepBufferSetting() {
+ return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
+}
+
NuPlayer::Decoder::Decoder(
const sp<AMessage> &notify,
const sp<Source> &source,
@@ -539,9 +544,10 @@ void NuPlayer::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
uint32_t flags;
int64_t durationUs;
bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
- if (!hasVideo &&
- mSource->getDuration(&durationUs) == OK &&
- durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
+ if (getAudioDeepBufferSetting() // override regardless of source duration
+ || (!hasVideo
+ && mSource->getDuration(&durationUs) == OK
+ && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
} else {
flags = AUDIO_OUTPUT_FLAG_NONE;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 396ead6..eb4e67d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
#include "NuPlayerRenderer.h"
-
+#include <cutils/properties.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -36,6 +36,36 @@
namespace android {
+/*
+ * Example of common configuration settings in shell script form
+
+ #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
+ adb shell setprop audio.offload.disable 1
+
+ #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
+ adb shell setprop audio.offload.video 1
+
+ #Use audio callbacks for PCM data
+ adb shell setprop media.stagefright.audio.cbk 1
+
+ #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
+ adb shell setprop media.stagefright.audio.deep 1
+
+ #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
+ adb shell setprop media.stagefright.audio.sink 1000
+
+ * These configurations take effect for the next track played (not the current track).
+ */
+
+static inline bool getUseAudioCallbackSetting() {
+ return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
+}
+
+static inline int32_t getAudioSinkPcmMsSetting() {
+ return property_get_int32(
+ "media.stagefright.audio.sink", 500 /* default_value */);
+}
+
// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
// is closed to allow the audio DSP to power down.
static const int64_t kOffloadPauseMaxUs = 10000000ll;
@@ -87,6 +117,7 @@ NuPlayer::Renderer::Renderer(
mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
mTotalBuffersQueued(0),
mLastAudioBufferDrained(0),
+ mUseAudioCallback(false),
mWakeLock(new AWakeLock()) {
mMediaClock = new MediaClock;
mPlaybackRate = mPlaybackSettings.mSpeed;
@@ -592,8 +623,7 @@ void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
}
void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
- if (mDrainAudioQueuePending || mSyncQueues || mPaused
- || offloadingAudio()) {
+ if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
return;
}
@@ -642,12 +672,14 @@ size_t NuPlayer::Renderer::AudioSinkCallback(
case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
{
+ ALOGV("AudioSink::CB_EVENT_STREAM_END");
me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
break;
}
case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
{
+ ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
me->notifyAudioTearDown();
break;
}
@@ -659,7 +691,7 @@ size_t NuPlayer::Renderer::AudioSinkCallback(
size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
Mutex::Autolock autoLock(mLock);
- if (!offloadingAudio() || mPaused) {
+ if (!mUseAudioCallback) {
return 0;
}
@@ -667,13 +699,13 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
size_t sizeCopied = 0;
bool firstEntry = true;
+ QueueEntry *entry; // will be valid after while loop if hasEOS is set.
while (sizeCopied < size && !mAudioQueue.empty()) {
- QueueEntry *entry = &*mAudioQueue.begin();
+ entry = &*mAudioQueue.begin();
if (entry->mBuffer == NULL) { // EOS
hasEOS = true;
mAudioQueue.erase(mAudioQueue.begin());
- entry = NULL;
break;
}
@@ -681,7 +713,7 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
firstEntry = false;
int64_t mediaTimeUs;
CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+ ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
}
@@ -714,10 +746,28 @@ size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
}
+ // for non-offloaded audio, we need to compute the frames written because
+ // there is no EVENT_STREAM_END notification. The frames written gives
+ // an estimate on the pending played out duration.
+ if (!offloadingAudio()) {
+ mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
+ }
+
if (hasEOS) {
(new AMessage(kWhatStopAudioSink, this))->post();
+ // As there is currently no EVENT_STREAM_END callback notification for
+ // non-offloaded audio tracks, we need to post the EOS ourselves.
+ if (!offloadingAudio()) {
+ int64_t postEOSDelayUs = 0;
+ if (mAudioSink->needsTrailingPadding()) {
+ postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
+ }
+ ALOGV("fillAudioBuffer: notifyEOS "
+ "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
+ mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
+ notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
+ }
}
-
return sizeCopied;
}
@@ -749,6 +799,7 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
}
#endif
+ uint32_t prevFramesWritten = mNumFramesWritten;
while (!mAudioQueue.empty()) {
QueueEntry *entry = &*mAudioQueue.begin();
@@ -778,7 +829,8 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
int64_t mediaTimeUs;
CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+ ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
+ mediaTimeUs / 1E6);
onNewAudioMediaTime(mediaTimeUs);
}
@@ -846,7 +898,13 @@ bool NuPlayer::Renderer::onDrainAudioQueue() {
}
mMediaClock->updateMaxTimeMedia(maxTimeMedia);
- return !mAudioQueue.empty();
+ // calculate whether we need to reschedule another write.
+ bool reschedule = !mAudioQueue.empty()
+ && (!mPaused
+ || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
+ //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
+ // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
+ return reschedule;
}
int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
@@ -1230,9 +1288,8 @@ void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
++mAudioDrainGeneration;
prepareForMediaRenderingStart_l();
- if (offloadingAudio()) {
- clearAudioFirstAnchorTime_l();
- }
+ // the frame count will be reset after flush.
+ clearAudioFirstAnchorTime_l();
}
mDrainAudioQueuePending = false;
@@ -1240,7 +1297,9 @@ void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
if (offloadingAudio()) {
mAudioSink->pause();
mAudioSink->flush();
- mAudioSink->start();
+ if (!mPaused) {
+ mAudioSink->start();
+ }
} else {
mAudioSink->pause();
mAudioSink->flush();
@@ -1345,7 +1404,7 @@ void NuPlayer::Renderer::onPause() {
{
Mutex::Autolock autoLock(mLock);
- ++mAudioDrainGeneration;
+ // we do not increment audio drain generation so that we fill audio buffer during pause.
++mVideoDrainGeneration;
prepareForMediaRenderingStart_l();
mPaused = true;
@@ -1590,12 +1649,13 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
audioSinkChanged = true;
mAudioSink->close();
+
err = mAudioSink->open(
sampleRate,
numChannels,
(audio_channel_mask_t)channelMask,
audioFormat,
- 8 /* bufferCount */,
+ 0 /* bufferCount - unused */,
&NuPlayer::Renderer::AudioSinkCallback,
this,
(audio_output_flags_t)offloadFlags,
@@ -1613,7 +1673,9 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
// before reaching the hardware.
// TODO
mCurrentOffloadInfo = offloadInfo;
- err = mAudioSink->start();
+ if (!mPaused) { // for preview mode, don't start if paused
+ err = mAudioSink->start();
+ }
ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
}
if (err != OK) {
@@ -1623,6 +1685,7 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
ALOGV("openAudioSink: offload failed");
}
+ mUseAudioCallback = true; // offload mode transfers data through callback
}
}
if (!offloadOnly && !offloadingAudio()) {
@@ -1646,17 +1709,27 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
audioSinkChanged = true;
mAudioSink->close();
mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ // Note: It is possible to set up the callback, but not use it to send audio data.
+ // This requires a fix in AudioSink to explicitly specify the transfer mode.
+ mUseAudioCallback = getUseAudioCallbackSetting();
+
+ // Compute the desired buffer size.
+ // For callback mode, the amount of time before wakeup is about half the buffer size.
+ const uint32_t frameCount =
+ (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
+
status_t err = mAudioSink->open(
sampleRate,
numChannels,
(audio_channel_mask_t)channelMask,
AUDIO_FORMAT_PCM_16_BIT,
- 8 /* bufferCount */,
- NULL,
- NULL,
+ 0 /* bufferCount - unused */,
+ mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
+ mUseAudioCallback ? this : NULL,
(audio_output_flags_t)pcmFlags,
NULL,
- true /* doNotReconnect */);
+ true /* doNotReconnect */,
+ frameCount);
if (err == OK) {
err = mAudioSink->setPlaybackRate(mPlaybackSettings);
}
@@ -1666,7 +1739,9 @@ status_t NuPlayer::Renderer::onOpenAudioSink(
return err;
}
mCurrentPcmInfo = info;
- mAudioSink->start();
+ if (!mPaused) { // for preview mode, don't start if paused
+ mAudioSink->start();
+ }
}
if (audioSinkChanged) {
onAudioSinkChanged();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index fbdf5bf..c2fea40 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -194,6 +194,7 @@ private:
int32_t mTotalBuffersQueued;
int32_t mLastAudioBufferDrained;
+ bool mUseAudioCallback;
sp<AWakeLock> mWakeLock;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3a98e8c..172e19c 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -106,6 +106,16 @@ static void InitOMXParams(T *params) {
params->nVersion.s.nStep = 0;
}
+struct MessageList : public RefBase {
+ MessageList() {
+ }
+ std::list<sp<AMessage> > &getList() { return mList; }
+private:
+ std::list<sp<AMessage> > mList;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MessageList);
+};
+
struct CodecObserver : public BnOMXObserver {
CodecObserver() {}
@@ -114,55 +124,65 @@ struct CodecObserver : public BnOMXObserver {
}
// from IOMXObserver
- virtual void onMessage(const omx_message &omx_msg) {
- sp<AMessage> msg = mNotify->dup();
-
- msg->setInt32("type", omx_msg.type);
- msg->setInt32("node", omx_msg.node);
-
- switch (omx_msg.type) {
- case omx_message::EVENT:
- {
- msg->setInt32("event", omx_msg.u.event_data.event);
- msg->setInt32("data1", omx_msg.u.event_data.data1);
- msg->setInt32("data2", omx_msg.u.event_data.data2);
- break;
+ virtual void onMessages(const std::list<omx_message> &messages) {
+ sp<AMessage> notify;
+ bool first = true;
+ sp<MessageList> msgList = new MessageList();
+ for (std::list<omx_message>::const_iterator it = messages.cbegin();
+ it != messages.cend(); ++it) {
+ const omx_message &omx_msg = *it;
+ if (first) {
+ notify = mNotify->dup();
+ notify->setInt32("node", omx_msg.node);
}
- case omx_message::EMPTY_BUFFER_DONE:
- {
- msg->setInt32("buffer", omx_msg.u.buffer_data.buffer);
- msg->setInt32("fence_fd", omx_msg.fenceFd);
- break;
- }
+ sp<AMessage> msg = new AMessage;
+ msg->setInt32("type", omx_msg.type);
+ switch (omx_msg.type) {
+ case omx_message::EVENT:
+ {
+ msg->setInt32("event", omx_msg.u.event_data.event);
+ msg->setInt32("data1", omx_msg.u.event_data.data1);
+ msg->setInt32("data2", omx_msg.u.event_data.data2);
+ break;
+ }
- case omx_message::FILL_BUFFER_DONE:
- {
- msg->setInt32(
- "buffer", omx_msg.u.extended_buffer_data.buffer);
- msg->setInt32(
- "range_offset",
- omx_msg.u.extended_buffer_data.range_offset);
- msg->setInt32(
- "range_length",
- omx_msg.u.extended_buffer_data.range_length);
- msg->setInt32(
- "flags",
- omx_msg.u.extended_buffer_data.flags);
- msg->setInt64(
- "timestamp",
- omx_msg.u.extended_buffer_data.timestamp);
- msg->setInt32(
- "fence_fd", omx_msg.fenceFd);
- break;
- }
+ case omx_message::EMPTY_BUFFER_DONE:
+ {
+ msg->setInt32("buffer", omx_msg.u.buffer_data.buffer);
+ msg->setInt32("fence_fd", omx_msg.fenceFd);
+ break;
+ }
- default:
- ALOGE("Unrecognized message type: %d", omx_msg.type);
- break;
- }
+ case omx_message::FILL_BUFFER_DONE:
+ {
+ msg->setInt32(
+ "buffer", omx_msg.u.extended_buffer_data.buffer);
+ msg->setInt32(
+ "range_offset",
+ omx_msg.u.extended_buffer_data.range_offset);
+ msg->setInt32(
+ "range_length",
+ omx_msg.u.extended_buffer_data.range_length);
+ msg->setInt32(
+ "flags",
+ omx_msg.u.extended_buffer_data.flags);
+ msg->setInt64(
+ "timestamp",
+ omx_msg.u.extended_buffer_data.timestamp);
+ msg->setInt32(
+ "fence_fd", omx_msg.fenceFd);
+ break;
+ }
- msg->post();
+ default:
+ ALOGE("Unrecognized message type: %d", omx_msg.type);
+ break;
+ }
+ msgList->getList().push_back(msg);
+ }
+ notify->setObject("messages", msgList);
+ notify->post();
}
protected:
@@ -200,8 +220,15 @@ protected:
void postFillThisBuffer(BufferInfo *info);
private:
+ // Handles an OMX message. Returns true iff message was handled.
bool onOMXMessage(const sp<AMessage> &msg);
+ // Handles a list of messages. Returns true iff messages were handled.
+ bool onOMXMessageList(const sp<AMessage> &msg);
+
+ // returns true iff this message is for this component and the component is alive
+ bool checkOMXMessage(const sp<AMessage> &msg);
+
bool onOMXEmptyBufferDone(IOMX::buffer_id bufferID, int fenceFd);
bool onOMXFillBufferDone(
@@ -4402,9 +4429,14 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
break;
}
+ case ACodec::kWhatOMXMessageList:
+ {
+ return checkOMXMessage(msg) ? onOMXMessageList(msg) : true;
+ }
+
case ACodec::kWhatOMXMessage:
{
- return onOMXMessage(msg);
+ return checkOMXMessage(msg) ? onOMXMessage(msg) : true;
}
case ACodec::kWhatSetSurface:
@@ -4463,16 +4495,13 @@ bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
return true;
}
-bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
- int32_t type;
- CHECK(msg->findInt32("type", &type));
-
+bool ACodec::BaseState::checkOMXMessage(const sp<AMessage> &msg) {
// there is a possibility that this is an outstanding message for a
// codec that we have already destroyed
if (mCodec->mNode == 0) {
ALOGI("ignoring message as already freed component: %s",
msg->debugString().c_str());
- return true;
+ return false;
}
IOMX::node_id nodeID;
@@ -4481,6 +4510,24 @@ bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
ALOGE("Unexpected message for nodeID: %u, should have been %u", nodeID, mCodec->mNode);
return false;
}
+ return true;
+}
+
+bool ACodec::BaseState::onOMXMessageList(const sp<AMessage> &msg) {
+ sp<RefBase> obj;
+ CHECK(msg->findObject("messages", &obj));
+ sp<MessageList> msgList = static_cast<MessageList *>(obj.get());
+
+ for (std::list<sp<AMessage>>::const_iterator it = msgList->getList().cbegin();
+ it != msgList->getList().cend(); ++it) {
+ onOMXMessage(*it);
+ }
+ return true;
+}
+
+bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
+ int32_t type;
+ CHECK(msg->findInt32("type", &type));
switch (type) {
case omx_message::EVENT:
@@ -5316,7 +5363,7 @@ bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
return false;
}
- notify = new AMessage(kWhatOMXMessage, mCodec);
+ notify = new AMessage(kWhatOMXMessageList, mCodec);
observer->setNotificationMessage(notify);
mCodec->mComponentName = componentName;
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 1b788f3..2606e44 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -608,6 +608,16 @@ status_t CameraSource::startCameraRecording() {
}
}
+ err = mCamera->sendCommand(
+ CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
+
+ // This could happen for CameraHAL1 clients; thus the failure is
+ // not a fatal error
+ if (err != OK) {
+ ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
+ mEncoderFormat, mEncoderDataSpace, err);
+ }
+
err = OK;
if (mCameraFlags & FLAGS_HOT_CAMERA) {
mCamera->unlock();
@@ -645,6 +655,9 @@ status_t CameraSource::start(MetaData *meta) {
mStartTimeUs = 0;
mNumInputBuffers = 0;
+ mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ mEncoderDataSpace = HAL_DATASPACE_BT709;
+
if (meta) {
int64_t startTimeUs;
if (meta->findInt64(kKeyTime, &startTimeUs)) {
@@ -656,6 +669,10 @@ status_t CameraSource::start(MetaData *meta) {
CHECK_GT(nBuffers, 0);
mNumInputBuffers = nBuffers;
}
+
+ // TODO: Read in format/dataspace from somewhere
+ // Uncomment to test SW encoders until TODO is resolved
+ // mEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
}
status_t err;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 65d8a04..62612c7 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -1586,13 +1586,13 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
break;
}
- // ©xyz
+ // \xA9xyz
case FOURCC(0xA9, 'x', 'y', 'z'):
{
*offset += chunk_size;
- // Best case the total data length inside "©xyz" box
- // would be 8, for instance "©xyz" + "\x00\x04\x15\xc7" + "0+0/",
+ // Best case the total data length inside "\xA9xyz" box
+ // would be 8, for instance "\xA9xyz" + "\x00\x04\x15\xc7" + "0+0/",
// where "\x00\x04" is the text string length with value = 4,
// "\0x15\xc7" is the language code = en, and "0+0" is a
// location (string) value with longitude = 0 and latitude = 0.
@@ -3294,16 +3294,24 @@ status_t MPEG4Source::start(MetaData *params) {
mWantsNALFragments = false;
}
- mGroup = new MediaBufferGroup;
-
- int32_t max_size;
- CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size));
+ int32_t tmp;
+ CHECK(mFormat->findInt32(kKeyMaxInputSize, &tmp));
+ size_t max_size = tmp;
+ // A somewhat arbitrary limit that should be sufficient for 8k video frames
+ // If you see the message below for a valid input stream: increase the limit
+ if (max_size > 64 * 1024 * 1024) {
+ ALOGE("bogus max input size: %zu", max_size);
+ return ERROR_MALFORMED;
+ }
+ mGroup = new MediaBufferGroup;
mGroup->add_buffer(new MediaBuffer(max_size));
mSrcBuffer = new (std::nothrow) uint8_t[max_size];
if (mSrcBuffer == NULL) {
// file probably specified a bad max size
+ delete mGroup;
+ mGroup = NULL;
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 95f361e..47f114a 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -113,6 +113,8 @@ private:
mCurrTableEntriesElement(NULL) {
CHECK_GT(mElementCapacity, 0);
CHECK_GT(mEntryCapacity, 0);
+ // Ensure no integer overflow on allocation in add().
+ CHECK_LT(mEntryCapacity, UINT32_MAX / mElementCapacity);
}
// Free the allocated memory.
@@ -385,6 +387,13 @@ MPEG4Writer::MPEG4Writer(int fd)
mStartTimeOffsetMs(-1),
mMetaKeys(new AMessage()) {
addDeviceMeta();
+
+ // Verify mFd is seekable
+ off64_t off = lseek64(mFd, 0, SEEK_SET);
+ if (off < 0) {
+ ALOGE("cannot seek mFd: %s (%d)", strerror(errno), errno);
+ release();
+ }
}
MPEG4Writer::~MPEG4Writer() {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index f366b1f..d48ede9 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -46,8 +46,6 @@ const char *kMaxEncoderInputBuffers = "max-video-encoder-input-buffers";
static Mutex sInitMutex;
-static MediaCodecList *gCodecList = NULL;
-
static bool parseBoolean(const char *s) {
if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
return true;
@@ -57,64 +55,76 @@ static bool parseBoolean(const char *s) {
return *s != '\0' && *end == '\0' && res > 0;
}
+static bool isProfilingNeeded() {
+ bool profilingNeeded = true;
+ FILE *resultsFile = fopen(kProfilingResults, "r");
+ if (resultsFile) {
+ AString currentVersion = getProfilingVersionString();
+ size_t currentVersionSize = currentVersion.size();
+ char *versionString = new char[currentVersionSize + 1];
+ fgets(versionString, currentVersionSize + 1, resultsFile);
+ if (strcmp(versionString, currentVersion.c_str()) == 0) {
+ // profiling result up to date
+ profilingNeeded = false;
+ }
+ fclose(resultsFile);
+ delete[] versionString;
+ }
+ return profilingNeeded;
+}
+
// static
sp<IMediaCodecList> MediaCodecList::sCodecList;
// static
-sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
- bool profilingNeeded = false;
+void *MediaCodecList::profilerThreadWrapper(void * /*arg*/) {
+ ALOGV("Enter profilerThreadWrapper.");
+ MediaCodecList *codecList = new MediaCodecList();
+ if (codecList->initCheck() != OK) {
+ ALOGW("Failed to create a new MediaCodecList, skipping codec profiling.");
+ delete codecList;
+ return NULL;
+ }
+
Vector<sp<MediaCodecInfo>> infos;
+ for (size_t i = 0; i < codecList->countCodecs(); ++i) {
+ infos.push_back(codecList->getCodecInfo(i));
+ }
+ ALOGV("Codec profiling started.");
+ profileCodecs(infos);
+ ALOGV("Codec profiling completed.");
+ codecList->parseTopLevelXMLFile(kProfilingResults, true /* ignore_errors */);
{
Mutex::Autolock autoLock(sInitMutex);
+ sCodecList = codecList;
+ }
+ return NULL;
+}
- if (gCodecList == NULL) {
- gCodecList = new MediaCodecList;
- if (gCodecList->initCheck() == OK) {
- sCodecList = gCodecList;
-
- FILE *resultsFile = fopen(kProfilingResults, "r");
- if (resultsFile) {
- AString currentVersion = getProfilingVersionString();
- size_t currentVersionSize = currentVersion.size();
- char *versionString = new char[currentVersionSize];
- fgets(versionString, currentVersionSize, resultsFile);
- if (strncmp(versionString, currentVersion.c_str(), currentVersionSize) != 0) {
- // profiling result out of date
- profilingNeeded = true;
- }
- fclose(resultsFile);
- delete[] versionString;
- } else {
- // profiling results doesn't existed
- profilingNeeded = true;
- }
-
- if (profilingNeeded) {
- for (size_t i = 0; i < gCodecList->countCodecs(); ++i) {
- infos.push_back(gCodecList->getCodecInfo(i));
- }
+// static
+sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
+ Mutex::Autolock autoLock(sInitMutex);
+
+ if (sCodecList == NULL) {
+ MediaCodecList *codecList = new MediaCodecList;
+ if (codecList->initCheck() == OK) {
+ sCodecList = codecList;
+
+ if (isProfilingNeeded()) {
+ ALOGV("Codec profiling needed, will be run in separated thread.");
+ pthread_t profiler;
+ if (pthread_create(&profiler, NULL, profilerThreadWrapper, NULL) != 0) {
+ ALOGW("Failed to create thread for codec profiling.");
}
- } else {
- // failure to initialize may be temporary. retry on next call.
- delete gCodecList;
- gCodecList = NULL;
}
+ } else {
+ // failure to initialize may be temporary. retry on next call.
+ delete codecList;
}
}
- if (profilingNeeded) {
- profileCodecs(infos);
- }
-
- {
- Mutex::Autolock autoLock(sInitMutex);
- if (profilingNeeded) {
- gCodecList->parseTopLevelXMLFile(kProfilingResults, true /* ignore_errors */);
- }
-
- return sCodecList;
- }
+ return sCodecList;
}
static Mutex sRemoteInitMutex;
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 927cc6c..96aa808 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -116,12 +116,15 @@ struct OMXCodecObserver : public BnOMXObserver {
}
// from IOMXObserver
- virtual void onMessage(const omx_message &msg) {
+ virtual void onMessages(const std::list<omx_message> &messages) {
sp<OMXCodec> codec = mTarget.promote();
if (codec.get() != NULL) {
Mutex::Autolock autoLock(codec->mLock);
- codec->on_message(msg);
+ for (std::list<omx_message>::const_iterator it = messages.cbegin();
+ it != messages.cend(); ++it) {
+ codec->on_message(*it);
+ }
codec.clear();
}
}
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 4297549..1c663a3 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -21,6 +21,7 @@
#include "include/OggExtractor.h"
#include <cutils/properties.h>
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaBuffer.h>
@@ -65,24 +66,28 @@ private:
OggSource &operator=(const OggSource &);
};
-struct MyVorbisExtractor {
- MyVorbisExtractor(const sp<DataSource> &source);
- virtual ~MyVorbisExtractor();
+struct MyOggExtractor {
+ MyOggExtractor(
+ const sp<DataSource> &source,
+ const char *mimeType,
+ size_t numHeaders,
+ int64_t seekPreRollUs);
+ virtual ~MyOggExtractor();
sp<MetaData> getFormat() const;
// Returns an approximate bitrate in bits per second.
- uint64_t approxBitrate();
+ virtual uint64_t approxBitrate() const = 0;
status_t seekToTime(int64_t timeUs);
status_t seekToOffset(off64_t offset);
- status_t readNextPacket(MediaBuffer **buffer, bool conf);
+ virtual status_t readNextPacket(MediaBuffer **buffer) = 0;
status_t init();
sp<MetaData> getFileMetaData() { return mFileMeta; }
-private:
+protected:
struct Page {
uint64_t mGranulePosition;
int32_t mPrevPacketSize;
@@ -102,12 +107,17 @@ private:
sp<DataSource> mSource;
off64_t mOffset;
Page mCurrentPage;
+ uint64_t mCurGranulePosition;
uint64_t mPrevGranulePosition;
size_t mCurrentPageSize;
bool mFirstPacketInPage;
uint64_t mCurrentPageSamples;
size_t mNextLaceIndex;
+ const char *mMimeType;
+ size_t mNumHeaders;
+ int64_t mSeekPreRollUs;
+
off64_t mFirstDataOffset;
vorbis_info mVi;
@@ -121,10 +131,26 @@ private:
ssize_t readPage(off64_t offset, Page *page);
status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
- status_t verifyHeader(
- MediaBuffer *buffer, uint8_t type);
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const = 0;
+
+ // Extract codec format, metadata tags, and various codec specific data;
+ // the format and CSD's are required to setup the decoders for the enclosed media content.
+ //
+ // Valid values for `type` are:
+ // 1 - bitstream identification header
+ // 3 - comment header
+ // 5 - codec setup header (Vorbis only)
+ virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type) = 0;
+
+ // Read the next ogg packet from the underlying data source; optionally
+ // calculate the timestamp for the output packet whilst pretending
+ // that we are parsing an Ogg Vorbis stream.
+ //
+ // *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
+ // clients are responsible for releasing the original buffer.
+ status_t _readNextPacket(MediaBuffer **buffer, bool calcVorbisTimestamp);
- int32_t packetBlockSize(MediaBuffer *buffer);
+ int32_t getPacketBlockSize(MediaBuffer *buffer);
void parseFileMetaData();
@@ -132,8 +158,61 @@ private:
void buildTableOfContents();
- MyVorbisExtractor(const MyVorbisExtractor &);
- MyVorbisExtractor &operator=(const MyVorbisExtractor &);
+ MyOggExtractor(const MyOggExtractor &);
+ MyOggExtractor &operator=(const MyOggExtractor &);
+};
+
+struct MyVorbisExtractor : public MyOggExtractor {
+ MyVorbisExtractor(const sp<DataSource> &source)
+ : MyOggExtractor(source,
+ MEDIA_MIMETYPE_AUDIO_VORBIS,
+ /* numHeaders */ 3,
+ /* seekPreRollUs */ 0) {
+ }
+
+ virtual uint64_t approxBitrate() const;
+
+ virtual status_t readNextPacket(MediaBuffer **buffer) {
+ return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
+ }
+
+protected:
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+ return granulePos * 1000000ll / mVi.rate;
+ }
+
+ virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
+};
+
+struct MyOpusExtractor : public MyOggExtractor {
+ static const int32_t kOpusSampleRate = 48000;
+ static const int64_t kOpusSeekPreRollUs = 80000; // 80 ms
+
+ MyOpusExtractor(const sp<DataSource> &source)
+ : MyOggExtractor(source, MEDIA_MIMETYPE_AUDIO_OPUS, /*numHeaders*/ 2, kOpusSeekPreRollUs),
+ mChannelCount(0),
+ mCodecDelay(0),
+ mStartGranulePosition(-1) {
+ }
+
+ virtual uint64_t approxBitrate() const {
+ return 0;
+ }
+
+ virtual status_t readNextPacket(MediaBuffer **buffer);
+
+protected:
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
+ virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
+
+private:
+ status_t verifyOpusHeader(MediaBuffer *buffer);
+ status_t verifyOpusComments(MediaBuffer *buffer);
+ uint32_t getNumSamplesInPacket(MediaBuffer *buffer) const;
+
+ uint8_t mChannelCount;
+ uint16_t mCodecDelay;
+ int64_t mStartGranulePosition;
};
static void extractAlbumArt(
@@ -179,13 +258,14 @@ status_t OggSource::read(
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- if (mExtractor->mImpl->seekToTime(seekTimeUs) != OK) {
- return ERROR_END_OF_STREAM;
+ status_t err = mExtractor->mImpl->seekToTime(seekTimeUs);
+ if (err != OK) {
+ return err;
}
}
MediaBuffer *packet;
- status_t err = mExtractor->mImpl->readNextPacket(&packet, /* conf = */ false);
+ status_t err = mExtractor->mImpl->readNextPacket(&packet);
if (err != OK) {
return err;
@@ -209,14 +289,22 @@ status_t OggSource::read(
////////////////////////////////////////////////////////////////////////////////
-MyVorbisExtractor::MyVorbisExtractor(const sp<DataSource> &source)
+MyOggExtractor::MyOggExtractor(
+ const sp<DataSource> &source,
+ const char *mimeType,
+ size_t numHeaders,
+ int64_t seekPreRollUs)
: mSource(source),
mOffset(0),
+ mCurGranulePosition(0),
mPrevGranulePosition(0),
mCurrentPageSize(0),
mFirstPacketInPage(true),
mCurrentPageSamples(0),
mNextLaceIndex(0),
+ mMimeType(mimeType),
+ mNumHeaders(numHeaders),
+ mSeekPreRollUs(seekPreRollUs),
mFirstDataOffset(-1) {
mCurrentPage.mNumSegments = 0;
@@ -224,16 +312,16 @@ MyVorbisExtractor::MyVorbisExtractor(const sp<DataSource> &source)
vorbis_comment_init(&mVc);
}
-MyVorbisExtractor::~MyVorbisExtractor() {
+MyOggExtractor::~MyOggExtractor() {
vorbis_comment_clear(&mVc);
vorbis_info_clear(&mVi);
}
-sp<MetaData> MyVorbisExtractor::getFormat() const {
+sp<MetaData> MyOggExtractor::getFormat() const {
return mMeta;
}
-status_t MyVorbisExtractor::findNextPage(
+status_t MyOggExtractor::findNextPage(
off64_t startOffset, off64_t *pageOffset) {
*pageOffset = startOffset;
@@ -264,7 +352,7 @@ status_t MyVorbisExtractor::findNextPage(
// it (if any) and return its granule position.
// To do this we back up from the "current" page's offset until we find any
// page preceding it and then scan forward to just before the current page.
-status_t MyVorbisExtractor::findPrevGranulePosition(
+status_t MyOggExtractor::findPrevGranulePosition(
off64_t pageOffset, uint64_t *granulePos) {
*granulePos = 0;
@@ -280,7 +368,11 @@ status_t MyVorbisExtractor::findPrevGranulePosition(
ALOGV("backing up %lld bytes", (long long)(pageOffset - prevGuess));
status_t err = findNextPage(prevGuess, &prevPageOffset);
- if (err != OK) {
+ if (err == ERROR_END_OF_STREAM) {
+ // We are at the last page and didn't back off enough;
+ // back off 5000 bytes more and try again.
+ continue;
+ } else if (err != OK) {
return err;
}
@@ -314,11 +406,20 @@ status_t MyVorbisExtractor::findPrevGranulePosition(
}
}
-status_t MyVorbisExtractor::seekToTime(int64_t timeUs) {
+status_t MyOggExtractor::seekToTime(int64_t timeUs) {
+ timeUs -= mSeekPreRollUs;
+ if (timeUs < 0) {
+ timeUs = 0;
+ }
+
if (mTableOfContents.isEmpty()) {
// Perform approximate seeking based on avg. bitrate.
+ uint64_t bps = approxBitrate();
+ if (bps <= 0) {
+ return INVALID_OPERATION;
+ }
- off64_t pos = timeUs * approxBitrate() / 8000000ll;
+ off64_t pos = timeUs * bps / 8000000ll;
ALOGV("seeking to offset %lld", (long long)pos);
return seekToOffset(pos);
@@ -353,7 +454,7 @@ status_t MyVorbisExtractor::seekToTime(int64_t timeUs) {
return seekToOffset(entry.mPageOffset);
}
-status_t MyVorbisExtractor::seekToOffset(off64_t offset) {
+status_t MyOggExtractor::seekToOffset(off64_t offset) {
if (mFirstDataOffset >= 0 && offset < mFirstDataOffset) {
// Once we know where the actual audio data starts (past the headers)
// don't ever seek to anywhere before that.
@@ -386,7 +487,7 @@ status_t MyVorbisExtractor::seekToOffset(off64_t offset) {
return OK;
}
-ssize_t MyVorbisExtractor::readPage(off64_t offset, Page *page) {
+ssize_t MyOggExtractor::readPage(off64_t offset, Page *page) {
uint8_t header[27];
ssize_t n;
if ((n = mSource->readAt(offset, header, sizeof(header)))
@@ -457,7 +558,110 @@ ssize_t MyVorbisExtractor::readPage(off64_t offset, Page *page) {
return sizeof(header) + page->mNumSegments + totalSize;
}
-status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out, bool conf) {
+status_t MyOpusExtractor::readNextPacket(MediaBuffer **out) {
+ if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
+ // The first sample might not start at time 0; find out where by subtracting
+ // the number of samples on the first page from the granule position
+ // (position of last complete sample) of the first page. This happens
+ // the first time before we attempt to read a packet from the first page.
+ MediaBuffer *mBuf;
+ uint32_t numSamples = 0;
+ uint64_t curGranulePosition = 0;
+ while (true) {
+ status_t err = _readNextPacket(&mBuf, /* calcVorbisTimestamp = */false);
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ return err;
+ }
+ // First two pages are header pages.
+ if (err == ERROR_END_OF_STREAM || mCurrentPage.mPageNo > 2) {
+ break;
+ }
+ curGranulePosition = mCurrentPage.mGranulePosition;
+ numSamples += getNumSamplesInPacket(mBuf);
+ mBuf->release();
+ mBuf = NULL;
+ }
+
+ if (curGranulePosition > numSamples) {
+ mStartGranulePosition = curGranulePosition - numSamples;
+ } else {
+ mStartGranulePosition = 0;
+ }
+ seekToOffset(0);
+ }
+
+ status_t err = _readNextPacket(out, /* calcVorbisTimestamp = */false);
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t currentPageSamples;
+ // Calculate timestamps by accumulating durations starting from the first sample of a page;
+ // We assume that we only seek to page boundaries.
+ if ((*out)->meta_data()->findInt32(kKeyValidSamples, &currentPageSamples)) {
+ // first packet in page
+ if (mOffset == mFirstDataOffset) {
+ currentPageSamples -= mStartGranulePosition;
+ (*out)->meta_data()->setInt32(kKeyValidSamples, currentPageSamples);
+ }
+ mCurGranulePosition = mCurrentPage.mGranulePosition - currentPageSamples;
+ }
+
+ int64_t timeUs = getTimeUsOfGranule(mCurGranulePosition);
+ (*out)->meta_data()->setInt64(kKeyTime, timeUs);
+
+ uint32_t frames = getNumSamplesInPacket(*out);
+ mCurGranulePosition += frames;
+ return OK;
+}
+
+uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBuffer *buffer) const {
+ if (buffer == NULL || buffer->range_length() < 1) {
+ return 0;
+ }
+
+ uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
+ uint8_t toc = data[0];
+ uint8_t config = (toc >> 3) & 0x1f;
+ uint32_t frameSizesUs[] = {
+ 10000, 20000, 40000, 60000, // 0...3
+ 10000, 20000, 40000, 60000, // 4...7
+ 10000, 20000, 40000, 60000, // 8...11
+ 10000, 20000, // 12...13
+ 10000, 20000, // 14...15
+ 2500, 5000, 10000, 20000, // 16...19
+ 2500, 5000, 10000, 20000, // 20...23
+ 2500, 5000, 10000, 20000, // 24...27
+ 2500, 5000, 10000, 20000 // 28...31
+ };
+ uint32_t frameSizeUs = frameSizesUs[config];
+
+ uint32_t numFrames;
+ uint8_t c = toc & 3;
+ switch (c) {
+ case 0:
+ numFrames = 1;
+ break;
+ case 1:
+ case 2:
+ numFrames = 2;
+ break;
+ case 3:
+ if (buffer->range_length() < 3) {
+ numFrames = 0;
+ } else {
+ numFrames = data[2] & 0x3f;
+ }
+ break;
+ default:
+ TRESPASS();
+ }
+
+ uint32_t numSamples = frameSizeUs * numFrames * kOpusSampleRate / 1000000;
+ return numSamples;
+}
+
+status_t MyOggExtractor::_readNextPacket(MediaBuffer **out, bool calcVorbisTimestamp) {
*out = NULL;
MediaBuffer *buffer = NULL;
@@ -523,9 +727,8 @@ status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out, bool conf) {
mFirstPacketInPage = false;
}
- // ignore timestamp for configuration packets
- if (!conf) {
- int32_t curBlockSize = packetBlockSize(buffer);
+ if (calcVorbisTimestamp) {
+ int32_t curBlockSize = getPacketBlockSize(buffer);
if (mCurrentPage.mPrevPacketSize < 0) {
mCurrentPage.mPrevPacketSize = curBlockSize;
mCurrentPage.mPrevPacketPos =
@@ -597,43 +800,24 @@ status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out, bool conf) {
}
}
-status_t MyVorbisExtractor::init() {
+status_t MyOggExtractor::init() {
mMeta = new MetaData;
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
+ mMeta->setCString(kKeyMIMEType, mMimeType);
- MediaBuffer *packet;
status_t err;
- if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
- return err;
- }
- ALOGV("read packet of size %zu\n", packet->range_length());
- err = verifyHeader(packet, 1);
- packet->release();
- packet = NULL;
- if (err != OK) {
- return err;
- }
-
- if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
- return err;
- }
- ALOGV("read packet of size %zu\n", packet->range_length());
- err = verifyHeader(packet, 3);
- packet->release();
- packet = NULL;
- if (err != OK) {
- return err;
- }
-
- if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
- return err;
- }
- ALOGV("read packet of size %zu\n", packet->range_length());
- err = verifyHeader(packet, 5);
- packet->release();
- packet = NULL;
- if (err != OK) {
- return err;
+ MediaBuffer *packet;
+ for (size_t i = 0; i < mNumHeaders; ++i) {
+ // ignore timestamp for configuration packets
+ if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != OK) {
+ return err;
+ }
+ ALOGV("read packet of size %zu\n", packet->range_length());
+ err = verifyHeader(packet, /* type = */ i * 2 + 1);
+ packet->release();
+ packet = NULL;
+ if (err != OK) {
+ return err;
+ }
}
mFirstDataOffset = mOffset + mCurrentPageSize;
@@ -649,7 +833,7 @@ status_t MyVorbisExtractor::init() {
// we can only approximate using avg. bitrate if seeking to
// the end is too expensive or impossible (live streaming).
- int64_t durationUs = lastGranulePosition * 1000000ll / mVi.rate;
+ int64_t durationUs = getTimeUsOfGranule(lastGranulePosition);
mMeta->setInt64(kKeyDuration, durationUs);
@@ -659,7 +843,7 @@ status_t MyVorbisExtractor::init() {
return OK;
}
-void MyVorbisExtractor::buildTableOfContents() {
+void MyOggExtractor::buildTableOfContents() {
off64_t offset = mFirstDataOffset;
Page page;
ssize_t pageSize;
@@ -670,7 +854,7 @@ void MyVorbisExtractor::buildTableOfContents() {
mTableOfContents.editItemAt(mTableOfContents.size() - 1);
entry.mPageOffset = offset;
- entry.mTimeUs = page.mGranulePosition * 1000000ll / mVi.rate;
+ entry.mTimeUs = getTimeUsOfGranule(page.mGranulePosition);
offset += (size_t)pageSize;
}
@@ -698,7 +882,7 @@ void MyVorbisExtractor::buildTableOfContents() {
}
}
-int32_t MyVorbisExtractor::packetBlockSize(MediaBuffer *buffer) {
+int32_t MyOggExtractor::getPacketBlockSize(MediaBuffer *buffer) {
const uint8_t *data =
(const uint8_t *)buffer->data() + buffer->range_offset();
@@ -727,6 +911,144 @@ int32_t MyVorbisExtractor::packetBlockSize(MediaBuffer *buffer) {
return vorbis_packet_blocksize(&mVi, &pack);
}
+int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
+ uint64_t pcmSamplePosition = 0;
+ if (granulePos > mCodecDelay) {
+ pcmSamplePosition = granulePos - mCodecDelay;
+ }
+ return pcmSamplePosition * 1000000ll / kOpusSampleRate;
+}
+
+status_t MyOpusExtractor::verifyHeader(MediaBuffer *buffer, uint8_t type) {
+ switch (type) {
+ // there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
+ // header and comments such that we can share code with MyVorbisExtractor.
+ case 1:
+ return verifyOpusHeader(buffer);
+ case 3:
+ return verifyOpusComments(buffer);
+ default:
+ return INVALID_OPERATION;
+ }
+}
+
+status_t MyOpusExtractor::verifyOpusHeader(MediaBuffer *buffer) {
+ const size_t kOpusHeaderSize = 19;
+ const uint8_t *data =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ size_t size = buffer->range_length();
+
+ if (size < kOpusHeaderSize
+ || memcmp(data, "OpusHead", 8)
+ || /* version = */ data[8] != 1) {
+ return ERROR_MALFORMED;
+ }
+
+ mChannelCount = data[9];
+ mCodecDelay = U16LE_AT(&data[10]);
+
+ mMeta->setData(kKeyOpusHeader, 0, data, size);
+ mMeta->setInt32(kKeySampleRate, kOpusSampleRate);
+ mMeta->setInt32(kKeyChannelCount, mChannelCount);
+ mMeta->setInt64(kKeyOpusSeekPreRoll /* ns */, kOpusSeekPreRollUs * 1000 /* = 80 ms*/);
+ mMeta->setInt64(kKeyOpusCodecDelay /* ns */,
+ mCodecDelay /* sample/s */ * 1000000000 / kOpusSampleRate);
+
+ return OK;
+}
+
+status_t MyOpusExtractor::verifyOpusComments(MediaBuffer *buffer) {
+ // add artificial framing bit so we can reuse _vorbis_unpack_comment
+ int32_t commentSize = buffer->range_length() + 1;
+ sp<ABuffer> aBuf = new ABuffer(commentSize);
+ if (aBuf->capacity() <= buffer->range_length()) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t* commentData = aBuf->data();
+ memcpy(commentData,
+ (uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+
+ ogg_buffer buf;
+ buf.data = commentData;
+ buf.size = commentSize;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ogg_reference ref;
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = commentSize;
+ ref.next = NULL;
+
+ oggpack_buffer bits;
+ oggpack_readinit(&bits, &ref);
+
+ // skip 'OpusTags'
+ const char *OpusTags = "OpusTags";
+ const int32_t headerLen = strlen(OpusTags);
+ int32_t framingBitOffset = headerLen;
+ for (int i = 0; i < headerLen; ++i) {
+ char chr = oggpack_read(&bits, 8);
+ if (chr != OpusTags[i]) {
+ return ERROR_MALFORMED;
+ }
+ }
+
+ int32_t vendorLen = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (vendorLen < 0 || vendorLen > commentSize - 8) {
+ return ERROR_MALFORMED;
+ }
+ // skip vendor string
+ framingBitOffset += vendorLen;
+ for (int i = 0; i < vendorLen; ++i) {
+ oggpack_read(&bits, 8);
+ }
+
+ int32_t n = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (n < 0 || n > ((commentSize - oggpack_bytes(&bits)) >> 2)) {
+ return ERROR_MALFORMED;
+ }
+ for (int i = 0; i < n; ++i) {
+ int32_t len = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (len < 0 || len > (commentSize - oggpack_bytes(&bits))) {
+ return ERROR_MALFORMED;
+ }
+ framingBitOffset += len;
+ for (int j = 0; j < len; ++j) {
+ oggpack_read(&bits, 8);
+ }
+ }
+ if (framingBitOffset < 0 || framingBitOffset >= commentSize) {
+ return ERROR_MALFORMED;
+ }
+ commentData[framingBitOffset] = 1;
+
+ buf.data = commentData + headerLen;
+ buf.size = commentSize - headerLen;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = commentSize - headerLen;
+ ref.next = NULL;
+
+ oggpack_readinit(&bits, &ref);
+ int err = _vorbis_unpack_comment(&mVc, &bits);
+ if (0 != err) {
+ return ERROR_MALFORMED;
+ }
+
+ parseFileMetaData();
+ return OK;
+}
+
status_t MyVorbisExtractor::verifyHeader(
MediaBuffer *buffer, uint8_t type) {
const uint8_t *data =
@@ -814,7 +1136,7 @@ status_t MyVorbisExtractor::verifyHeader(
return OK;
}
-uint64_t MyVorbisExtractor::approxBitrate() {
+uint64_t MyVorbisExtractor::approxBitrate() const {
if (mVi.bitrate_nominal != 0) {
return mVi.bitrate_nominal;
}
@@ -822,7 +1144,7 @@ uint64_t MyVorbisExtractor::approxBitrate() {
return (mVi.bitrate_lower + mVi.bitrate_upper) / 2;
}
-void MyVorbisExtractor::parseFileMetaData() {
+void MyOggExtractor::parseFileMetaData() {
mFileMeta = new MetaData;
mFileMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_OGG);
@@ -1026,11 +1348,23 @@ OggExtractor::OggExtractor(const sp<DataSource> &source)
: mDataSource(source),
mInitCheck(NO_INIT),
mImpl(NULL) {
- mImpl = new MyVorbisExtractor(mDataSource);
- mInitCheck = mImpl->seekToOffset(0);
+ for (int i = 0; i < 2; ++i) {
+ if (mImpl != NULL) {
+ delete mImpl;
+ }
+ if (i == 0) {
+ mImpl = new MyVorbisExtractor(mDataSource);
+ } else {
+ mImpl = new MyOpusExtractor(mDataSource);
+ }
+ mInitCheck = mImpl->seekToOffset(0);
- if (mInitCheck == OK) {
- mInitCheck = mImpl->init();
+ if (mInitCheck == OK) {
+ mInitCheck = mImpl->init();
+ if (mInitCheck == OK) {
+ break;
+ }
+ }
}
}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 7f98485..40df34d 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -335,7 +335,7 @@ status_t SampleTable::setTimeToSampleParams(
}
mTimeToSampleCount = U32_AT(&header[4]);
- uint64_t allocSize = mTimeToSampleCount * 2 * (uint64_t)sizeof(uint32_t);
+ uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
if (allocSize > SIZE_MAX) {
return ERROR_OUT_OF_RANGE;
}
@@ -383,7 +383,7 @@ status_t SampleTable::setCompositionTimeToSampleParams(
}
mNumCompositionTimeDeltaEntries = numEntries;
- uint64_t allocSize = numEntries * 2 * (uint64_t)sizeof(uint32_t);
+ uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(uint32_t);
if (allocSize > SIZE_MAX) {
return ERROR_OUT_OF_RANGE;
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 449d195..a00f324 100755
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -634,6 +634,10 @@ OMX_ERRORTYPE SoftAVC::initEncoder() {
}
if (mConversionBuffer == NULL) {
+ if (((uint64_t)mStride * mHeight) > (((uint64_t)INT32_MAX / 3) * 2)) {
+ ALOGE("Buffer size is too big.");
+ return OMX_ErrorUndefined;
+ }
mConversionBuffer = (uint8_t *)malloc(mStride * mHeight * 3 / 2);
if (mConversionBuffer == NULL) {
ALOGE("Allocating conversion buffer failed.");
@@ -679,6 +683,10 @@ OMX_ERRORTYPE SoftAVC::initEncoder() {
}
/* Allocate array to hold memory records */
+ if (mNumMemRecords > SIZE_MAX / sizeof(iv_mem_rec_t)) {
+ ALOGE("requested memory size is too big.");
+ return OMX_ErrorUndefined;
+ }
mMemRecords = (iv_mem_rec_t *)malloc(mNumMemRecords * sizeof(iv_mem_rec_t));
if (NULL == mMemRecords) {
ALOGE("Unable to allocate memory for hold memory records: Size %zu",
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index 90d7c6b..af19bfe 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -95,6 +95,11 @@ OSCL_EXPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf
#ifdef DEC_INTERNAL_MEMORY_OPT
video->vol = (Vol **) IMEM_VOL;
#else
+ if ((size_t)nLayers > SIZE_MAX / sizeof(Vol *)) {
+ status = PV_FALSE;
+ goto fail;
+ }
+
video->vol = (Vol **) oscl_malloc(nLayers * sizeof(Vol *));
#endif
if (video->vol == NULL) status = PV_FALSE;
@@ -128,6 +133,11 @@ OSCL_EXPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf
else oscl_memset(video->prevVop, 0, sizeof(Vop));
video->memoryUsage += (sizeof(Vop) * 2);
+ if ((size_t)nLayers > SIZE_MAX / sizeof(Vop *)) {
+ status = PV_FALSE;
+ goto fail;
+ }
+
video->vopHeader = (Vop **) oscl_malloc(sizeof(Vop *) * nLayers);
#endif
if (video->vopHeader == NULL) status = PV_FALSE;
@@ -277,6 +287,7 @@ OSCL_EXPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf
status = PV_FALSE;
}
+fail:
if (status == PV_FALSE) PVCleanUpVideoDecoder(decCtrl);
return status;
@@ -305,6 +316,10 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
video->nMBPerRow * video->nMBPerCol;
}
+ if (((uint64_t)video->width * video->height) > (uint64_t)INT32_MAX / sizeof(PIXEL)) {
+ return PV_FALSE;
+ }
+
size = (int32)sizeof(PIXEL) * video->width * video->height;
#ifdef PV_MEMORY_POOL
decCtrl->size = size;
@@ -320,6 +335,9 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
video->prevVop->uChan = video->prevVop->yChan + size;
video->prevVop->vChan = video->prevVop->uChan + (size >> 2);
#else
+ if (size > INT32_MAX / 3 * 2) {
+ return PV_FALSE;
+ }
video->currVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
if (video->currVop->yChan == NULL) status = PV_FALSE;
@@ -347,6 +365,10 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
{
oscl_memset(video->prevEnhcVop, 0, sizeof(Vop));
#ifndef PV_MEMORY_POOL
+ if (size > INT32_MAX / 3 * 2) {
+ return PV_FALSE;
+ }
+
video->prevEnhcVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/
if (video->prevEnhcVop->yChan == NULL) status = PV_FALSE;
video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;
@@ -403,10 +425,17 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
if (video->acPredFlag == NULL) status = PV_FALSE;
video->memoryUsage += (nTotalMB);
+ if ((size_t)nTotalMB > SIZE_MAX / sizeof(typeDCStore)) {
+ return PV_FALSE;
+ }
video->predDC = (typeDCStore *) oscl_malloc(nTotalMB * sizeof(typeDCStore));
if (video->predDC == NULL) status = PV_FALSE;
video->memoryUsage += (nTotalMB * sizeof(typeDCStore));
+ if (nMBPerRow > INT32_MAX - 1
+ || (size_t)(nMBPerRow + 1) > SIZE_MAX / sizeof(typeDCACStore)) {
+ return PV_FALSE;
+ }
video->predDCAC_col = (typeDCACStore *) oscl_malloc((nMBPerRow + 1) * sizeof(typeDCACStore));
if (video->predDCAC_col == NULL) status = PV_FALSE;
video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore));
@@ -422,6 +451,10 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
video->headerInfo.CBP = (uint8 *) oscl_malloc(nTotalMB);
if (video->headerInfo.CBP == NULL) status = PV_FALSE;
video->memoryUsage += nTotalMB;
+
+ if ((size_t)nTotalMB > SIZE_MAX / sizeof(int16)) {
+ return PV_FALSE;
+ }
video->QPMB = (int16 *) oscl_malloc(nTotalMB * sizeof(int16));
if (video->QPMB == NULL) status = PV_FALSE;
video->memoryUsage += (nTotalMB * sizeof(int));
@@ -439,6 +472,9 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
video->memoryUsage += sizeof(MacroBlock);
}
/* Allocating motion vector space */
+ if ((size_t)nTotalMB > SIZE_MAX / (sizeof(MOT) * 4)) {
+ return PV_FALSE;
+ }
video->motX = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
if (video->motX == NULL) status = PV_FALSE;
video->motY = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);
@@ -472,6 +508,9 @@ Bool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLay
}
#else
+ if (nTotalMB > INT32_MAX / 6) {
+ return PV_FALSE;
+ }
video->pstprcTypCur = (uint8 *) oscl_malloc(nTotalMB * 6);
video->memoryUsage += (nTotalMB * 6);
if (video->pstprcTypCur == NULL)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index 946e3d0..da27377 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -610,6 +610,10 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
max = temp_w * temp_h;
max_width = ((temp_w + 15) >> 4) << 4;
max_height = ((temp_h + 15) >> 4) << 4;
+ if (((uint64_t)max_width * max_height) > (uint64_t)INT32_MAX
+ || temp_w > INT32_MAX - 15 || temp_h > INT32_MAX - 15) {
+ goto CLEAN_UP;
+ }
nTotalMB = ((max_width * max_height) >> 8);
}
@@ -654,6 +658,9 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
/* Allocating motion vector space and interpolation memory*/
+ if ((size_t)nTotalMB > SIZE_MAX / sizeof(MOT *)) {
+ goto CLEAN_UP;
+ }
video->mot = (MOT **)M4VENC_MALLOC(sizeof(MOT *) * nTotalMB);
if (video->mot == NULL) goto CLEAN_UP;
@@ -676,11 +683,17 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
/* so that compilers can generate faster code to indexing the */
/* data inside (by using << instead of *). 04/14/2000. */
/* 5/29/01, use decoder lib ACDC prediction memory scheme. */
+ if ((size_t)nTotalMB > SIZE_MAX / sizeof(typeDCStore)) {
+ goto CLEAN_UP;
+ }
video->predDC = (typeDCStore *) M4VENC_MALLOC(nTotalMB * sizeof(typeDCStore));
if (video->predDC == NULL) goto CLEAN_UP;
if (!video->encParams->H263_Enabled)
{
+ if ((size_t)((max_width >> 4) + 1) > SIZE_MAX / sizeof(typeDCACStore)) {
+ goto CLEAN_UP;
+ }
video->predDCAC_col = (typeDCACStore *) M4VENC_MALLOC(((max_width >> 4) + 1) * sizeof(typeDCACStore));
if (video->predDCAC_col == NULL) goto CLEAN_UP;
@@ -688,6 +701,9 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
/* the rest will be used for storing horizontal (row) AC coefficients */
video->predDCAC_row = video->predDCAC_col + 1; /* ACDC */
+ if ((size_t)nTotalMB > SIZE_MAX / sizeof(Int)) {
+ goto CLEAN_UP;
+ }
video->acPredFlag = (Int *) M4VENC_MALLOC(nTotalMB * sizeof(Int)); /* Memory for acPredFlag */
if (video->acPredFlag == NULL) goto CLEAN_UP;
}
@@ -741,8 +757,15 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
offset = (pitch << 4) + 16;
max_height += 32;
}
+ if (((uint64_t)pitch * max_height) > (uint64_t)INT32_MAX) {
+ goto CLEAN_UP;
+ }
size = pitch * max_height;
+ if (size > INT32_MAX - (size >> 1)
+ || (size_t)(size + (size >> 1)) > SIZE_MAX / sizeof(PIXEL)) {
+ goto CLEAN_UP;
+ }
video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */
if (video->currVop->yChan == NULL) goto CLEAN_UP;
video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */
@@ -841,6 +864,9 @@ OSCL_EXPORT_REF Bool PVInitVideoEncoder(VideoEncControls *encoderControl, Vid
/* /// End /////////////////////////////////////// */
+ if ((size_t)nLayers > SIZE_MAX / sizeof(Vol *)) {
+ goto CLEAN_UP;
+ }
video->vol = (Vol **)M4VENC_MALLOC(nLayers * sizeof(Vol *)); /* Memory for VOL pointers */
/* Memory allocation and Initialization of Vols and writing of headers */
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 21da707..695cfc8 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -197,7 +197,7 @@ void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) {
}
void SoftwareRenderer::render(
- const void *data, size_t /*size*/, int64_t timestampNs,
+ const void *data, size_t size, int64_t timestampNs,
void* /*platformPrivate*/, const sp<AMessage>& format) {
resetFormatIfChanged(format);
@@ -228,6 +228,9 @@ void SoftwareRenderer::render(
buf->stride, buf->height,
0, 0, mCropWidth - 1, mCropHeight - 1);
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar) {
+ if ((size_t)mWidth * mHeight * 3 / 2 > size) {
+ goto skip_copying;
+ }
const uint8_t *src_y = (const uint8_t *)data;
const uint8_t *src_u =
(const uint8_t *)data + mWidth * mHeight;
@@ -258,6 +261,9 @@ void SoftwareRenderer::render(
}
} else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ if ((size_t)mWidth * mHeight * 3 / 2 > size) {
+ goto skip_copying;
+ }
const uint8_t *src_y = (const uint8_t *)data;
const uint8_t *src_uv = (const uint8_t *)data
+ mWidth * (mHeight - mCropTop / 2);
@@ -289,6 +295,9 @@ void SoftwareRenderer::render(
dst_v += dst_c_stride;
}
} else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
+ if ((size_t)mWidth * mHeight * 3 > size) {
+ goto skip_copying;
+ }
uint8_t* srcPtr = (uint8_t*)data;
uint8_t* dstPtr = (uint8_t*)dst;
@@ -298,6 +307,9 @@ void SoftwareRenderer::render(
dstPtr += buf->stride * 3;
}
} else if (mColorFormat == OMX_COLOR_Format32bitARGB8888) {
+ if ((size_t)mWidth * mHeight * 4 > size) {
+ goto skip_copying;
+ }
uint8_t *srcPtr, *dstPtr;
for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
@@ -312,6 +324,9 @@ void SoftwareRenderer::render(
}
}
} else if (mColorFormat == OMX_COLOR_Format32BitRGBA8888) {
+ if ((size_t)mWidth * mHeight * 4 > size) {
+ goto skip_copying;
+ }
uint8_t* srcPtr = (uint8_t*)data;
uint8_t* dstPtr = (uint8_t*)dst;
@@ -324,6 +339,7 @@ void SoftwareRenderer::render(
LOG_ALWAYS_FATAL("bad color format %#x", mColorFormat);
}
+skip_copying:
CHECK_EQ(0, mapper.unlock(buf->handle));
if ((err = native_window_set_buffers_timestamp(mNativeWindow.get(),
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 2d9b3d4..801ff26 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -53,7 +53,10 @@ status_t MediaHTTP::connect(
if (headers != NULL) {
extHeaders = *headers;
}
- extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+
+ if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
+ extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+ }
bool success = mHTTPConnection->connect(uri, &extHeaders);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 76df815..f68e0a9 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -125,6 +125,8 @@ struct OMXNodeInstance {
const void *data,
size_t size);
+ // handles messages and removes them from the list
+ void onMessages(std::list<omx_message> &messages);
void onMessage(const omx_message &msg);
void onObserverDied(OMXMaster *master);
void onGetHandleFailed();
@@ -231,6 +233,10 @@ private:
sp<GraphicBufferSource> getGraphicBufferSource();
void setGraphicBufferSource(const sp<GraphicBufferSource>& bufferSource);
+ // Handles |msg|, and may modify it. Returns true iff completely handled it and
+ // |msg| does not need to be sent to the event listener.
+ bool handleMessage(omx_message &msg);
+
OMXNodeInstance(const OMXNodeInstance &);
OMXNodeInstance &operator=(const OMXNodeInstance &);
};
diff --git a/media/libstagefright/include/OggExtractor.h b/media/libstagefright/include/OggExtractor.h
index e97c8cd..c647cbb 100644
--- a/media/libstagefright/include/OggExtractor.h
+++ b/media/libstagefright/include/OggExtractor.h
@@ -27,7 +27,7 @@ struct AMessage;
class DataSource;
class String8;
-struct MyVorbisExtractor;
+struct MyOggExtractor;
struct OggSource;
struct OggExtractor : public MediaExtractor {
@@ -48,7 +48,7 @@ private:
sp<DataSource> mDataSource;
status_t mInitCheck;
- MyVorbisExtractor *mImpl;
+ MyOggExtractor *mImpl;
OggExtractor(const OggExtractor &);
OggExtractor &operator=(const OggExtractor &);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 53423ec..db429f6 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -1397,6 +1397,11 @@ status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
unsigned adaptation_field_length = br->getBits(8);
if (adaptation_field_length > 0) {
+ if (adaptation_field_length * 8 > br->numBitsLeft()) {
+ ALOGV("Adaptation field should be included in a single TS packet.");
+ return ERROR_MALFORMED;
+ }
+
unsigned discontinuity_indicator = br->getBits(1);
if (discontinuity_indicator) {
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index d30bba5..ac6bf0d 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -38,6 +38,72 @@ namespace android {
static const bool EXTRA_CHECK = true;
+GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
+ const wp<IGraphicBufferConsumer> &consumer,
+ const wp<ConsumerListener>& consumerListener) :
+ mConsumerListener(consumerListener),
+ mConsumer(consumer) {}
+
+GraphicBufferSource::PersistentProxyListener::~PersistentProxyListener() {}
+
+void GraphicBufferSource::PersistentProxyListener::onFrameAvailable(
+ const BufferItem& item) {
+ sp<ConsumerListener> listener(mConsumerListener.promote());
+ if (listener != NULL) {
+ listener->onFrameAvailable(item);
+ } else {
+ sp<IGraphicBufferConsumer> consumer(mConsumer.promote());
+ if (consumer == NULL) {
+ return;
+ }
+ BufferItem bi;
+ status_t err = consumer->acquireBuffer(&bi, 0);
+ if (err != OK) {
+ ALOGE("PersistentProxyListener: acquireBuffer failed (%d)", err);
+ return;
+ }
+
+ err = consumer->detachBuffer(bi.mBuf);
+ if (err != OK) {
+ ALOGE("PersistentProxyListener: detachBuffer failed (%d)", err);
+ return;
+ }
+
+ err = consumer->attachBuffer(&bi.mBuf, bi.mGraphicBuffer);
+ if (err != OK) {
+ ALOGE("PersistentProxyListener: attachBuffer failed (%d)", err);
+ return;
+ }
+
+ err = consumer->releaseBuffer(bi.mBuf, 0,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
+ if (err != OK) {
+ ALOGE("PersistentProxyListener: releaseBuffer failed (%d)", err);
+ }
+ }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onFrameReplaced(
+ const BufferItem& item) {
+ sp<ConsumerListener> listener(mConsumerListener.promote());
+ if (listener != NULL) {
+ listener->onFrameReplaced(item);
+ }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onBuffersReleased() {
+ sp<ConsumerListener> listener(mConsumerListener.promote());
+ if (listener != NULL) {
+ listener->onBuffersReleased();
+ }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onSidebandStreamChanged() {
+ sp<ConsumerListener> listener(mConsumerListener.promote());
+ if (listener != NULL) {
+ listener->onSidebandStreamChanged();
+ }
+}
GraphicBufferSource::GraphicBufferSource(
OMXNodeInstance* nodeInstance,
@@ -101,7 +167,12 @@ GraphicBufferSource::GraphicBufferSource(
// dropping to 0 at the end of the ctor. Since all we need is a wp<...>
// that's what we create.
wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
- sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
+ sp<IConsumerListener> proxy;
+ if (!mIsPersistent) {
+ proxy = new BufferQueue::ProxyConsumerListener(listener);
+ } else {
+ proxy = new PersistentProxyListener(mConsumer, listener);
+ }
mInitCheck = mConsumer->consumerConnect(proxy, false);
if (mInitCheck != NO_ERROR) {
@@ -312,6 +383,7 @@ void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int f
mConsumer->attachBuffer(&outSlot, mBufferSlot[id]);
mConsumer->releaseBuffer(outSlot, 0,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+ mBufferSlot[id] = NULL;
} else {
mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
@@ -400,6 +472,7 @@ void GraphicBufferSource::suspend(bool suspend) {
if (mIsPersistent) {
mConsumer->detachBuffer(item.mBuf);
+ mBufferSlot[item.mBuf] = NULL;
mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
mConsumer->releaseBuffer(item.mBuf, 0,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
@@ -488,6 +561,7 @@ bool GraphicBufferSource::fillCodecBuffer_l() {
ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
if (mIsPersistent) {
mConsumer->detachBuffer(item.mBuf);
+ mBufferSlot[item.mBuf] = NULL;
mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
mConsumer->releaseBuffer(item.mBuf, 0,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
@@ -578,9 +652,9 @@ void GraphicBufferSource::setLatestBuffer_l(
int outSlot;
mConsumer->attachBuffer(&outSlot, mBufferSlot[mLatestBufferId]);
-
mConsumer->releaseBuffer(outSlot, 0,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, mLatestBufferFence);
+ mBufferSlot[mLatestBufferId] = NULL;
} else {
mConsumer->releaseBuffer(
mLatestBufferId, mLatestBufferFrameNum,
@@ -803,6 +877,7 @@ void GraphicBufferSource::onFrameAvailable(const BufferItem& /*item*/) {
if (mIsPersistent) {
mConsumer->detachBuffer(item.mBuf);
+ mBufferSlot[item.mBuf] = NULL;
mConsumer->attachBuffer(&item.mBuf, item.mGraphicBuffer);
mConsumer->releaseBuffer(item.mBuf, 0,
EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 555bbec..2a8c218 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -160,6 +160,31 @@ protected:
virtual void onSidebandStreamChanged();
private:
+ // PersistentProxyListener is similar to BufferQueue::ProxyConsumerListener
+ // except that it returns (acquire/detach/re-attache/release) buffers
+ // in onFrameAvailable() if the actual consumer object is no longer valid.
+ //
+ // This class is used in persistent input surface case to prevent buffer
+ // loss when onFrameAvailable() is received while we don't have a valid
+ // consumer around.
+ class PersistentProxyListener : public BnConsumerListener {
+ public:
+ PersistentProxyListener(
+ const wp<IGraphicBufferConsumer> &consumer,
+ const wp<ConsumerListener>& consumerListener);
+ virtual ~PersistentProxyListener();
+ virtual void onFrameAvailable(const BufferItem& item) override;
+ virtual void onFrameReplaced(const BufferItem& item) override;
+ virtual void onBuffersReleased() override;
+ virtual void onSidebandStreamChanged() override;
+ private:
+ // mConsumerListener is a weak reference to the IConsumerListener.
+ wp<ConsumerListener> mConsumerListener;
+ // mConsumer is a weak reference to the IGraphicBufferConsumer, use
+ // a weak ref to avoid circular ref between mConsumer and this class
+ wp<IGraphicBufferConsumer> mConsumer;
+ };
+
// Keep track of codec input buffers. They may either be available
// (mGraphicBuffer == NULL) or in use by the codec.
struct CodecBuffer {
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 76217ec..e94adbd 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -61,7 +61,11 @@ private:
struct OMX::CallbackDispatcher : public RefBase {
CallbackDispatcher(OMXNodeInstance *owner);
- void post(const omx_message &msg);
+ // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
+ // that a new message is available on the queue. Otherwise, the message stays on the queue, but
+ // the listener is not notified of it. It will process this message when a subsequent message
+ // is posted with |realTime| set to true.
+ void post(const omx_message &msg, bool realTime = true);
bool loop();
@@ -74,11 +78,11 @@ private:
OMXNodeInstance *mOwner;
bool mDone;
Condition mQueueChanged;
- List<omx_message> mQueue;
+ std::list<omx_message> mQueue;
sp<CallbackDispatcherThread> mThread;
- void dispatch(const omx_message &msg);
+ void dispatch(std::list<omx_message> &messages);
CallbackDispatcher(const CallbackDispatcher &);
CallbackDispatcher &operator=(const CallbackDispatcher &);
@@ -109,24 +113,26 @@ OMX::CallbackDispatcher::~CallbackDispatcher() {
}
}
-void OMX::CallbackDispatcher::post(const omx_message &msg) {
+void OMX::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
Mutex::Autolock autoLock(mLock);
mQueue.push_back(msg);
- mQueueChanged.signal();
+ if (realTime) {
+ mQueueChanged.signal();
+ }
}
-void OMX::CallbackDispatcher::dispatch(const omx_message &msg) {
+void OMX::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
if (mOwner == NULL) {
ALOGV("Would have dispatched a message to a node that's already gone.");
return;
}
- mOwner->onMessage(msg);
+ mOwner->onMessages(messages);
}
bool OMX::CallbackDispatcher::loop() {
for (;;) {
- omx_message msg;
+ std::list<omx_message> messages;
{
Mutex::Autolock autoLock(mLock);
@@ -138,11 +144,10 @@ bool OMX::CallbackDispatcher::loop() {
break;
}
- msg = *mQueue.begin();
- mQueue.erase(mQueue.begin());
+ messages.swap(mQueue);
}
- dispatch(msg);
+ dispatch(messages);
}
return false;
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 9e399f9..7e92da8 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -1357,7 +1357,7 @@ status_t OMXNodeInstance::setInternalOption(
}
}
-void OMXNodeInstance::onMessage(const omx_message &msg) {
+bool OMXNodeInstance::handleMessage(omx_message &msg) {
const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
if (msg.type == omx_message::FILL_BUFFER_DONE) {
@@ -1384,10 +1384,7 @@ void OMXNodeInstance::onMessage(const omx_message &msg) {
// fix up the buffer info (especially timestamp) if needed
bufferSource->codecBufferFilled(buffer);
- omx_message newMsg = msg;
- newMsg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
- mObserver->onMessage(newMsg);
- return;
+ msg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
}
} else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
@@ -1408,11 +1405,23 @@ void OMXNodeInstance::onMessage(const omx_message &msg) {
// know that anyone asked to have the buffer emptied and will
// be very confused.
bufferSource->codecBufferEmptied(buffer, msg.fenceFd);
- return;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void OMXNodeInstance::onMessages(std::list<omx_message> &messages) {
+ for (std::list<omx_message>::iterator it = messages.begin(); it != messages.end(); ) {
+ if (handleMessage(*it)) {
+ messages.erase(it++);
+ } else {
+ ++it;
}
}
- mObserver->onMessage(msg);
+ mObserver->onMessages(messages);
}
void OMXNodeInstance::onObserverDied(OMXMaster *master) {
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 294b2ed..644b6ed 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -64,9 +64,11 @@ status_t Harness::initOMX() {
return mOMX != 0 ? OK : NO_INIT;
}
-void Harness::onMessage(const omx_message &msg) {
+void Harness::onMessages(const std::list<omx_message> &messages) {
Mutex::Autolock autoLock(mLock);
- mMessageQueue.push_back(msg);
+ for (std::list<omx_message>::const_iterator it = messages.cbegin(); it != messages.cend(); ) {
+ mMessageQueue.push_back(*it++);
+ }
mMessageAddedCondition.signal();
}
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index bb8fd0c..1ebf3aa 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -74,7 +74,7 @@ struct Harness : public BnOMXObserver {
status_t testAll();
- virtual void onMessage(const omx_message &msg);
+ virtual void onMessages(const std::list<omx_message> &messages);
protected:
virtual ~Harness();