summaryrefslogtreecommitdiffstats
path: root/media/libmedia/AudioTrack.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'media/libmedia/AudioTrack.cpp')
-rw-r--r--media/libmedia/AudioTrack.cpp359
1 files changed, 182 insertions, 177 deletions
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 735db5c..ce30c62 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -33,11 +33,16 @@
#define WAIT_PERIOD_MS 10
#define WAIT_STREAM_END_TIMEOUT_SEC 120
-
+static const int kMaxLoopCountNotifications = 32;
namespace android {
// ---------------------------------------------------------------------------
+template <typename T>
+const T &min(const T &x, const T &y) {
+ return x < y ? x : y;
+}
+
static int64_t convertTimespecToUs(const struct timespec &tv)
{
return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
@@ -61,12 +66,11 @@ status_t AudioTrack::getMinFrameCount(
return BAD_VALUE;
}
- // FIXME merge with similar code in createTrack_l(), except we're missing
- // some information here that is available in createTrack_l():
+ // FIXME handle in server, like createTrack_l(), possible missing info:
// audio_io_handle_t output
// audio_format_t format
// audio_channel_mask_t channelMask
- // audio_output_flags_t flags
+ // audio_output_flags_t flags (FAST)
uint32_t afSampleRate;
status_t status;
status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
@@ -96,16 +100,16 @@ status_t AudioTrack::getMinFrameCount(
minBufCount = 2;
}
- *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
- afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
- // The formula above should always produce a non-zero value, but return an error
- // in the unlikely event that it does not, as that's part of the API contract.
+ *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
+ // The formula above should always produce a non-zero value under normal circumstances:
+ // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
+ // Return error in the unlikely event that it does not, as that's part of the API contract.
if (*frameCount == 0) {
- ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+ ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
streamType, sampleRate);
return BAD_VALUE;
}
- ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
+ ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
*frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
return NO_ERROR;
}
@@ -199,8 +203,8 @@ AudioTrack::~AudioTrack()
mCblkMemory.clear();
mSharedBuffer.clear();
IPCThreadState::self()->flushCommands();
- ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
- IPCThreadState::self()->getCallingPid(), mClientPid);
+ ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
+ mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
}
}
@@ -225,9 +229,9 @@ status_t AudioTrack::set(
const audio_attributes_t* pAttributes)
{
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
- "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+ "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
- sessionId, transferType);
+ sessionId, transferType, uid, pid);
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -270,8 +274,6 @@ status_t AudioTrack::set(
ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
- AutoMutex lock(mLock);
-
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
ALOGE("Track already in use");
@@ -295,6 +297,9 @@ status_t AudioTrack::set(
ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
mStreamType = AUDIO_STREAM_DEFAULT;
+ if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+ }
}
// these below should probably come from the audioFlinger too...
@@ -317,12 +322,6 @@ status_t AudioTrack::set(
uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
- // AudioFlinger does not currently support 8-bit data in shared memory
- if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
- ALOGE("8-bit data in shared memory is not supported");
- return BAD_VALUE;
- }
-
// force direct flag if format is not linear PCM
// or offload was requested
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
@@ -346,12 +345,9 @@ status_t AudioTrack::set(
} else {
mFrameSize = sizeof(uint8_t);
}
- mFrameSizeAF = mFrameSize;
} else {
ALOG_ASSERT(audio_is_linear_pcm(format));
mFrameSize = channelCount * audio_bytes_per_sample(format);
- mFrameSizeAF = channelCount * audio_bytes_per_sample(
- format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
// createTrack will return an error if PCM format is not supported by server,
// so no need to check for specific PCM formats here
}
@@ -403,6 +399,7 @@ status_t AudioTrack::set(
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
+ // thread begins in paused state, and will not reference us until start()
}
// create the IAudioTrack
@@ -420,7 +417,10 @@ status_t AudioTrack::set(
mStatus = NO_ERROR;
mState = STATE_STOPPED;
mUserData = user;
- mLoopPeriod = 0;
+ mLoopCount = 0;
+ mLoopStart = 0;
+ mLoopEnd = 0;
+ mLoopCountNotified = 0;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
@@ -531,14 +531,12 @@ void AudioTrack::stop()
// the playback head position will reset to 0, so if a marker is set, we need
// to activate it again
mMarkerReached = false;
-#if 0
- // Force flush if a shared buffer is used otherwise audioflinger
- // will not stop before end of buffer is reached.
- // It may be needed to make sure that we stop playback, likely in case looping is on.
+
if (mSharedBuffer != 0) {
- flush_l();
+ // clear buffer position and loop count.
+ mStaticProxy->setBufferPositionAndLoop(0 /* position */,
+ 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
}
-#endif
sp<AudioTrackThread> t = mAudioTrackThread;
if (t != 0) {
@@ -669,14 +667,18 @@ void AudioTrack::getAuxEffectSendLevel(float* level) const
status_t AudioTrack::setSampleRate(uint32_t rate)
{
- if (mIsTimed || isOffloadedOrDirect()) {
+ AutoMutex lock(mLock);
+ if (rate == mSampleRate) {
+ return NO_ERROR;
+ }
+ if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
return INVALID_OPERATION;
}
-
- AutoMutex lock(mLock);
if (mOutput == AUDIO_IO_HANDLE_NONE) {
return NO_INIT;
}
+ // NOTE: it is theoretically possible, but highly unlikely, that a device change
+ // could mean a previously allowed sampling rate is no longer allowed.
uint32_t afSamplingRate;
if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
return NO_INIT;
@@ -740,10 +742,15 @@ status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount
void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
{
- // Setting the loop will reset next notification update period (like setPosition).
- mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
- mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
+ // We do not update the periodic notification point.
+ // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
+ mLoopCount = loopCount;
+ mLoopEnd = loopEnd;
+ mLoopStart = loopStart;
+ mLoopCountNotified = loopCount;
mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
+
+ // Waking the AudioTrackThread is not needed as this cannot be called when active.
}
status_t AudioTrack::setMarkerPosition(uint32_t marker)
@@ -757,6 +764,10 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker)
mMarkerPosition = marker;
mMarkerReached = false;
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (t != 0) {
+ t->wake();
+ }
return NO_ERROR;
}
@@ -786,6 +797,10 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
mNewPosition = updateAndGetPosition_l() + updatePeriod;
mUpdatePeriod = updatePeriod;
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (t != 0) {
+ t->wake();
+ }
return NO_ERROR;
}
@@ -823,12 +838,11 @@ status_t AudioTrack::setPosition(uint32_t position)
if (mState == STATE_ACTIVE) {
return INVALID_OPERATION;
}
+ // After setting the position, use full update period before notification.
mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
- mLoopPeriod = 0;
- // FIXME Check whether loops and setting position are incompatible in old code.
- // If we use setLoop for both purposes we lose the capability to set the position while looping.
- mStaticProxy->setLoop(position, mFrameCount, 0);
+ mStaticProxy->setBufferPosition(position);
+ // Waking the AudioTrackThread is not needed as this cannot be called when active.
return NO_ERROR;
}
@@ -893,10 +907,18 @@ status_t AudioTrack::reload()
return INVALID_OPERATION;
}
mNewPosition = mUpdatePeriod;
- mLoopPeriod = 0;
- // FIXME The new code cannot reload while keeping a loop specified.
- // Need to check how the old code handled this, and whether it's a significant change.
- mStaticProxy->setLoop(0, mFrameCount, 0);
+ (void) updateAndGetPosition_l();
+ mPosition = 0;
+#if 0
+ // The documentation is not clear on the behavior of reload() and the restoration
+ // of loop count. Historically we have not restored loop count, start, end,
+ // but it makes sense if one desires to repeat playing a particular sound.
+ if (mLoopCount != 0) {
+ mLoopCountNotified = mLoopCount;
+ mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
+ }
+#endif
+ mStaticProxy->setBufferPosition(0);
return NO_ERROR;
}
@@ -945,9 +967,9 @@ status_t AudioTrack::createTrack_l()
if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
- ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
+ ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
" channel mask %#x, flags %#x",
- streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
+ mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
return BAD_VALUE;
}
{
@@ -962,6 +984,7 @@ status_t AudioTrack::createTrack_l()
ALOGE("getLatency(%d) failed status %d", output, status);
goto release;
}
+ ALOGV("createTrack_l() output %d afLatency %u", output, afLatency);
size_t afFrameCount;
status = AudioSystem::getFrameCount(output, &afFrameCount);
@@ -986,23 +1009,23 @@ status_t AudioTrack::createTrack_l()
// use case 1: shared buffer
(mSharedBuffer != 0) ||
// use case 2: callback transfer mode
- (mTransfer == TRANSFER_CALLBACK)) &&
+ (mTransfer == TRANSFER_CALLBACK) ||
+ // use case 3: obtain/release mode
+ (mTransfer == TRANSFER_OBTAIN)) &&
// matching sample rate
(mSampleRate == afSampleRate))) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
+ mTransfer, mSampleRate, afSampleRate);
// once denied, do not request again if IAudioTrack is re-created
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
- ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
// The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
// n = 1 fast track with single buffering; nBuffering is ignored
// n = 2 fast track with double buffering
- // n = 2 normal track, no sample rate conversion
- // n = 3 normal track, with sample rate conversion
- // (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
- // n > 3 very high latency or very small notification interval; nBuffering is ignored
- const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
+ // n = 2 normal track, (including those with sample rate conversion)
+ // n >= 3 very high latency or very small notification interval (unused).
+ const uint32_t nBuffering = 2;
mNotificationFramesAct = mNotificationFramesReq;
@@ -1019,12 +1042,12 @@ status_t AudioTrack::createTrack_l()
mNotificationFramesAct = frameCount;
}
} else if (mSharedBuffer != 0) {
-
- // Ensure that buffer alignment matches channel count
- // 8-bit data in shared memory is not currently supported by AudioFlinger
- size_t alignment = audio_bytes_per_sample(
- mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
+ // FIXME: Ensure client side memory buffers need
+ // not have additional alignment beyond sample
+ // (e.g. 16 bit stereo accessed as 32 bit frame).
+ size_t alignment = audio_bytes_per_sample(mFormat);
if (alignment & 1) {
+ // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
alignment = 1;
}
if (mChannelCount > 1) {
@@ -1042,40 +1065,10 @@ status_t AudioTrack::createTrack_l()
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
- frameCount = mSharedBuffer->size() / mFrameSizeAF;
-
- } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
-
- // FIXME move these calculations and associated checks to server
-
- // Ensure that buffer depth covers at least audio hardware latency
- uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
- ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
- afFrameCount, minBufCount, afSampleRate, afLatency);
- if (minBufCount <= nBuffering) {
- minBufCount = nBuffering;
- }
-
- size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
- ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
- ", afLatency=%d",
- minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
-
- if (frameCount == 0) {
- frameCount = minFrameCount;
- } else if (frameCount < minFrameCount) {
- // not ALOGW because it happens all the time when playing key clicks over A2DP
- ALOGV("Minimum buffer size corrected from %zu to %zu",
- frameCount, minFrameCount);
- frameCount = minFrameCount;
- }
- // Make sure that application is notified with sufficient margin before underrun
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
-
+ frameCount = mSharedBuffer->size() / mFrameSize;
} else {
- // For fast tracks, the frame count calculations and checks are done by server
+ // For fast and normal streaming tracks,
+ // the frame count calculations and checks are done by server
}
IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1101,12 +1094,10 @@ status_t AudioTrack::createTrack_l()
size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
// but we will still need the original value also
+ int originalSessionId = mSessionId;
sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
mSampleRate,
- // AudioFlinger only sees 16-bit PCM
- mFormat == AUDIO_FORMAT_PCM_8_BIT &&
- !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
- AUDIO_FORMAT_PCM_16_BIT : mFormat,
+ mFormat,
mChannelMask,
&temp,
&trackFlags,
@@ -1116,6 +1107,8 @@ status_t AudioTrack::createTrack_l()
&mSessionId,
mClientUid,
&status);
+ ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
+ "session ID changed from %d to %d", originalSessionId, mSessionId);
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create track, status: %d", status);
@@ -1161,23 +1154,10 @@ status_t AudioTrack::createTrack_l()
if (trackFlags & IAudioFlinger::TRACK_FAST) {
ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
mAwaitBoost = true;
- if (mSharedBuffer == 0) {
- // Theoretically double-buffering is not required for fast tracks,
- // due to tighter scheduling. But in practice, to accommodate kernels with
- // scheduling jitter, and apps with computation jitter, we use double-buffering.
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
- }
} else {
ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
// once denied, do not request again if IAudioTrack is re-created
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
- if (mSharedBuffer == 0) {
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
- }
}
}
if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
@@ -1200,6 +1180,16 @@ status_t AudioTrack::createTrack_l()
//return NO_INIT;
}
}
+ // Make sure that application is notified with sufficient margin before underrun
+ if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
+ // Theoretically double-buffering is not required for fast tracks,
+ // due to tighter scheduling. But in practice, to accommodate kernels with
+ // scheduling jitter, and apps with computation jitter, we use double-buffering
+ // for fast tracks just like normal streaming tracks.
+ if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
+ mNotificationFramesAct = frameCount / nBuffering;
+ }
+ }
// We retain a copy of the I/O handle, but don't own the reference
mOutput = output;
@@ -1211,9 +1201,13 @@ status_t AudioTrack::createTrack_l()
// address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
void* buffers;
if (mSharedBuffer == 0) {
- buffers = (char*)cblk + sizeof(audio_track_cblk_t);
+ buffers = cblk + 1;
} else {
buffers = mSharedBuffer->pointer();
+ if (buffers == NULL) {
+ ALOGE("Could not get buffer pointer");
+ return NO_INIT;
+ }
}
mAudioTrack->attachAuxEffect(mAuxEffectId);
@@ -1230,9 +1224,9 @@ status_t AudioTrack::createTrack_l()
// update proxy
if (mSharedBuffer == 0) {
mStaticProxy.clear();
- mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+ mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
} else {
- mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+ mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
mProxy = mStaticProxy;
}
@@ -1258,7 +1252,7 @@ release:
return status;
}
-status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
{
if (audioBuffer == NULL) {
return BAD_VALUE;
@@ -1285,7 +1279,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
ALOGE("%s invalid waitCount %d", __func__, waitCount);
requested = NULL;
}
- return obtainBuffer(audioBuffer, requested);
+ return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
}
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
@@ -1352,7 +1346,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *re
} while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
- audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
+ audioBuffer->size = buffer.mFrameCount * mFrameSize;
audioBuffer->raw = buffer.mRaw;
if (nonContig != NULL) {
*nonContig = buffer.mNonContig;
@@ -1360,13 +1354,14 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *re
return status;
}
-void AudioTrack::releaseBuffer(Buffer* audioBuffer)
+void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
{
+ // FIXME add error checking on mode, by adding an internal version
if (mTransfer == TRANSFER_SHARED) {
return;
}
- size_t stepCount = audioBuffer->size / mFrameSizeAF;
+ size_t stepCount = audioBuffer->size / mFrameSize;
if (stepCount == 0) {
return;
}
@@ -1431,15 +1426,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
return ssize_t(err);
}
- size_t toWrite;
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- // Divide capacity by 2 to take expansion into account
- toWrite = audioBuffer.size >> 1;
- memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
- } else {
- toWrite = audioBuffer.size;
- memcpy(audioBuffer.i8, buffer, toWrite);
- }
+ size_t toWrite = audioBuffer.size;
+ memcpy(audioBuffer.i8, buffer, toWrite);
buffer = ((const char *) buffer) + toWrite;
userSize -= toWrite;
written += toWrite;
@@ -1559,9 +1547,8 @@ nsecs_t AudioTrack::processAudioBuffer()
// that the upper layers can recreate the track
if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
status_t status = restoreTrack_l("processAudioBuffer");
- mLock.unlock();
- // Run again immediately, but with a new IAudioTrack
- return 0;
+ // after restoration, continue below to make sure that the loop and buffer events
+ // are notified because they have been cleared from mCblk->mFlags above.
}
}
@@ -1610,7 +1597,6 @@ nsecs_t AudioTrack::processAudioBuffer()
}
// Cache other fields that will be needed soon
- uint32_t loopPeriod = mLoopPeriod;
uint32_t sampleRate = mSampleRate;
uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
@@ -1622,8 +1608,30 @@ nsecs_t AudioTrack::processAudioBuffer()
uint32_t sequence = mSequence;
sp<AudioTrackClientProxy> proxy = mProxy;
+ // Determine the number of new loop callback(s) that will be needed, while locked.
+ int loopCountNotifications = 0;
+ uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
+
+ if (mLoopCount > 0) {
+ int loopCount;
+ size_t bufferPosition;
+ mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+ loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
+ loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
+ mLoopCountNotified = loopCount; // discard any excess notifications
+ } else if (mLoopCount < 0) {
+ // FIXME: We're not accurate with notification count and position with infinite looping
+ // since loopCount from server side will always return -1 (we could decrement it).
+ size_t bufferPosition = mStaticProxy->getBufferPosition();
+ loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
+ loopPeriod = mLoopEnd - bufferPosition;
+ } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
+ size_t bufferPosition = mStaticProxy->getBufferPosition();
+ loopPeriod = mFrameCount - bufferPosition;
+ }
+
// These fields don't need to be cached, because they are assigned only by set():
- // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
+ // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
// mFlags is also assigned by createTrack_l(), but not the bit we care about.
mLock.unlock();
@@ -1662,10 +1670,9 @@ nsecs_t AudioTrack::processAudioBuffer()
if (newUnderrun) {
mCbf(EVENT_UNDERRUN, mUserData, NULL);
}
- // FIXME we will miss loops if loop cycle was signaled several times since last call
- // to processAudioBuffer()
- if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
+ while (loopCountNotifications > 0) {
mCbf(EVENT_LOOP_END, mUserData, NULL);
+ --loopCountNotifications;
}
if (flags & CBLK_BUFFER_END) {
mCbf(EVENT_BUFFER_END, mUserData, NULL);
@@ -1701,10 +1708,11 @@ nsecs_t AudioTrack::processAudioBuffer()
minFrames = markerPosition - position;
}
if (loopPeriod > 0 && loopPeriod < minFrames) {
+ // loopPeriod is already adjusted for actual position.
minFrames = loopPeriod;
}
- if (updatePeriod > 0 && updatePeriod < minFrames) {
- minFrames = updatePeriod;
+ if (updatePeriod > 0) {
+ minFrames = min(minFrames, uint32_t(newPosition - position));
}
// If > 0, poll periodically to recover from a stuck server. A good value is 2.
@@ -1767,13 +1775,6 @@ nsecs_t AudioTrack::processAudioBuffer()
}
}
- // Divide buffer size by 2 to take into account the expansion
- // due to 8 to 16 bit conversion: the callback must fill only half
- // of the destination buffer
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- audioBuffer.size >>= 1;
- }
-
size_t reqSize = audioBuffer.size;
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
size_t writtenSize = audioBuffer.size;
@@ -1793,13 +1794,7 @@ nsecs_t AudioTrack::processAudioBuffer()
return WAIT_PERIOD_MS * 1000000LL;
}
- if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
- // 8 to 16 bit conversion, note that source and destination are the same address
- memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
- audioBuffer.size <<= 1;
- }
-
- size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
+ size_t releasedFrames = writtenSize / mFrameSize;
audioBuffer.frameCount = releasedFrames;
mRemainingFrames -= releasedFrames;
if (misalignment >= releasedFrames) {
@@ -1844,7 +1839,6 @@ status_t AudioTrack::restoreTrack_l(const char *from)
ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
++mSequence;
- status_t result;
// refresh the audio configuration cache in this process to make sure we get new
// output parameters and new IAudioFlinger in createTrack_l()
@@ -1856,39 +1850,39 @@ status_t AudioTrack::restoreTrack_l(const char *from)
}
// save the old static buffer position
- size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+ size_t bufferPosition = 0;
+ int loopCount = 0;
+ if (mStaticProxy != 0) {
+ mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+ }
// If a new IAudioTrack is successfully created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
// It will also delete the strong references on previous IAudioTrack and IMemory.
// If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
- result = createTrack_l();
+ status_t result = createTrack_l();
// take the frames that will be lost by track recreation into account in saved position
+ // For streaming tracks, this is the amount we obtained from the user/client
+ // (not the number actually consumed at the server - those are already lost).
(void) updateAndGetPosition_l();
- mPosition = mReleased;
+ if (mStaticProxy == 0) {
+ mPosition = mReleased;
+ }
if (result == NO_ERROR) {
- // continue playback from last known position, but
- // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
- if (mStaticProxy != NULL) {
- mLoopPeriod = 0;
- mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
- }
- // FIXME How do we simulate the fact that all frames present in the buffer at the time of
- // track destruction have been played? This is critical for SoundPool implementation
- // This must be broken, and needs to be tested/debugged.
-#if 0
- // restore write index and set other indexes to reflect empty buffer status
- if (!strcmp(from, "start")) {
- // Make sure that a client relying on callback events indicating underrun or
- // the actual amount of audio frames played (e.g SoundPool) receives them.
- if (mSharedBuffer == 0) {
- // restart playback even if buffer is not completely filled.
- android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+ // Continue playback from last known position and restore loop.
+ if (mStaticProxy != 0) {
+ if (loopCount != 0) {
+ mStaticProxy->setBufferPositionAndLoop(bufferPosition,
+ mLoopStart, mLoopEnd, loopCount);
+ } else {
+ mStaticProxy->setBufferPosition(bufferPosition);
+ if (bufferPosition == mFrameCount) {
+ ALOGD("restoring track at end of static buffer");
+ }
}
}
-#endif
if (mState == STATE_ACTIVE) {
result = mAudioTrack->start();
}
@@ -2148,8 +2142,8 @@ bool AudioTrack::AudioTrackThread::threadLoop()
case NS_NEVER:
return false;
case NS_WHENEVER:
- // FIXME increase poll interval, or make event-driven
- ns = 1000000000LL;
+ // Event driven: call wake() when callback notifications conditions change.
+ ns = INT64_MAX;
// fall through
default:
LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
@@ -2182,6 +2176,17 @@ void AudioTrack::AudioTrackThread::resume()
}
}
+void AudioTrack::AudioTrackThread::wake()
+{
+ AutoMutex _l(mMyLock);
+ if (!mPaused && mPausedInt && mPausedNs > 0) {
+ // audio track is active and internally paused with timeout.
+ mIgnoreNextPausedInt = true;
+ mPausedInt = false;
+ mMyCond.signal();
+ }
+}
+
void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
{
AutoMutex _l(mMyLock);
@@ -2189,4 +2194,4 @@ void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
mPausedNs = ns;
}
-}; // namespace android
+} // namespace android