summaryrefslogtreecommitdiffstats
path: root/media/libmedia/AudioTrack.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'media/libmedia/AudioTrack.cpp')
-rw-r--r--media/libmedia/AudioTrack.cpp451
1 files changed, 241 insertions, 210 deletions
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 3f3a88c..aaaa3f1 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -44,9 +44,6 @@ status_t AudioTrack::getMinFrameCount(
return BAD_VALUE;
}
- // default to 0 in case of error
- *frameCount = 0;
-
// FIXME merge with similar code in createTrack_l(), except we're missing
// some information here that is available in createTrack_l():
// audio_io_handle_t output
@@ -54,16 +51,26 @@ status_t AudioTrack::getMinFrameCount(
// audio_channel_mask_t channelMask
// audio_output_flags_t flags
uint32_t afSampleRate;
- if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
- return NO_INIT;
+ status_t status;
+ status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
+ if (status != NO_ERROR) {
+ ALOGE("Unable to query output sample rate for stream type %d; status %d",
+ streamType, status);
+ return status;
}
size_t afFrameCount;
- if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
- return NO_INIT;
+ status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
+ if (status != NO_ERROR) {
+ ALOGE("Unable to query output frame count for stream type %d; status %d",
+ streamType, status);
+ return status;
}
uint32_t afLatency;
- if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
- return NO_INIT;
+ status = AudioSystem::getOutputLatency(&afLatency, streamType);
+ if (status != NO_ERROR) {
+ ALOGE("Unable to query output latency for stream type %d; status %d",
+ streamType, status);
+ return status;
}
// Ensure that buffer depth covers at least audio hardware latency
@@ -74,6 +81,13 @@ status_t AudioTrack::getMinFrameCount(
*frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
afFrameCount * minBufCount * sampleRate / afSampleRate;
+ // The formula above should always produce a non-zero value, but return an error
+ // in the unlikely event that it does not, as that's part of the API contract.
+ if (*frameCount == 0) {
+ ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+ streamType, sampleRate);
+ return BAD_VALUE;
+ }
ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
*frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
return NO_ERROR;
@@ -95,15 +109,16 @@ AudioTrack::AudioTrack(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid)
+ int uid,
+ pid_t pid)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -113,7 +128,7 @@ AudioTrack::AudioTrack(
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
- offloadInfo, uid);
+ offloadInfo, uid, pid);
}
AudioTrack::AudioTrack(
@@ -125,11 +140,12 @@ AudioTrack::AudioTrack(
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid)
+ int uid,
+ pid_t pid)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -138,7 +154,8 @@ AudioTrack::AudioTrack(
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
- sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
+ sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
+ uid, pid);
}
AudioTrack::~AudioTrack()
@@ -157,7 +174,9 @@ AudioTrack::~AudioTrack()
mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
mAudioTrack.clear();
IPCThreadState::self()->flushCommands();
- AudioSystem::releaseAudioSessionId(mSessionId);
+ ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
+ IPCThreadState::self()->getCallingPid(), mClientPid);
+ AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
}
}
@@ -166,18 +185,24 @@ status_t AudioTrack::set(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCountInt,
+ size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid)
+ int uid,
+ pid_t pid)
{
+ ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
+ "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+ streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
+ sessionId, transferType);
+
switch (transferType) {
case TRANSFER_DEFAULT:
if (sharedBuffer != 0) {
@@ -211,15 +236,9 @@ status_t AudioTrack::set(
ALOGE("Invalid transfer type %d", transferType);
return BAD_VALUE;
}
+ mSharedBuffer = sharedBuffer;
mTransfer = transferType;
- // FIXME "int" here is legacy and will be replaced by size_t later
- if (frameCountInt < 0) {
- ALOGE("Invalid frame count %d", frameCountInt);
- return BAD_VALUE;
- }
- size_t frameCount = frameCountInt;
-
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
sharedBuffer->size());
@@ -233,19 +252,24 @@ status_t AudioTrack::set(
return INVALID_OPERATION;
}
- mOutput = 0;
-
// handle default values first.
if (streamType == AUDIO_STREAM_DEFAULT) {
streamType = AUDIO_STREAM_MUSIC;
}
+ if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
+ ALOGE("Invalid stream type %d", streamType);
+ return BAD_VALUE;
+ }
+ mStreamType = streamType;
+ status_t status;
if (sampleRate == 0) {
- uint32_t afSampleRate;
- if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
- return NO_INIT;
+ status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
+ if (status != NO_ERROR) {
+ ALOGE("Could not get output sample rate for stream type %d; status %d",
+ streamType, status);
+ return status;
}
- sampleRate = afSampleRate;
}
mSampleRate = sampleRate;
@@ -253,15 +277,21 @@ status_t AudioTrack::set(
if (format == AUDIO_FORMAT_DEFAULT) {
format = AUDIO_FORMAT_PCM_16_BIT;
}
- if (channelMask == 0) {
- channelMask = AUDIO_CHANNEL_OUT_STEREO;
- }
// validate parameters
if (!audio_is_valid_format(format)) {
- ALOGE("Invalid format %d", format);
+ ALOGE("Invalid format %#x", format);
return BAD_VALUE;
}
+ mFormat = format;
+
+ if (!audio_is_output_channel(channelMask)) {
+ ALOGE("Invalid channel mask %#x", channelMask);
+ return BAD_VALUE;
+ }
+ mChannelMask = channelMask;
+ uint32_t channelCount = popcount(channelMask);
+ mChannelCount = channelCount;
// AudioFlinger does not currently support 8-bit data in shared memory
if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
@@ -285,46 +315,52 @@ status_t AudioTrack::set(
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
- if (!audio_is_output_channel(channelMask)) {
- ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
- }
- mChannelMask = channelMask;
- uint32_t channelCount = popcount(channelMask);
- mChannelCount = channelCount;
-
- if (audio_is_linear_pcm(format)) {
+ if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if (audio_is_linear_pcm(format)) {
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ } else {
+ mFrameSize = sizeof(uint8_t);
+ }
+ mFrameSizeAF = mFrameSize;
+ } else {
+ ALOG_ASSERT(audio_is_linear_pcm(format));
mFrameSize = channelCount * audio_bytes_per_sample(format);
- mFrameSizeAF = channelCount * sizeof(int16_t);
+ mFrameSizeAF = channelCount * audio_bytes_per_sample(
+ format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
+ // createTrack will return an error if PCM format is not supported by server,
+ // so no need to check for specific PCM formats here
+ }
+
+ // Make copy of input parameter offloadInfo so that in the future:
+ // (a) createTrack_l doesn't need it as an input parameter
+ // (b) we can support re-creation of offloaded tracks
+ if (offloadInfo != NULL) {
+ mOffloadInfoCopy = *offloadInfo;
+ mOffloadInfo = &mOffloadInfoCopy;
} else {
- mFrameSize = sizeof(uint8_t);
- mFrameSizeAF = sizeof(uint8_t);
- }
-
- audio_io_handle_t output = AudioSystem::getOutput(
- streamType,
- sampleRate, format, channelMask,
- flags,
- offloadInfo);
-
- if (output == 0) {
- ALOGE("Could not get audio output for stream type %d", streamType);
- return BAD_VALUE;
+ mOffloadInfo = NULL;
}
- mVolume[LEFT] = 1.0f;
- mVolume[RIGHT] = 1.0f;
+ mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
+ mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
mSendLevel = 0.0f;
- mFrameCount = frameCount;
+ // mFrameCount is initialized in createTrack_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
mNotificationFramesAct = 0;
mSessionId = sessionId;
- if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
+ int callingpid = IPCThreadState::self()->getCallingPid();
+ int mypid = getpid();
+ if (uid == -1 || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
+ if (pid == -1 || (callingpid != mypid)) {
+ mClientPid = callingpid;
+ } else {
+ mClientPid = pid;
+ }
mAuxEffectId = 0;
mFlags = flags;
mCbf = cbf;
@@ -335,14 +371,7 @@ status_t AudioTrack::set(
}
// create the IAudioTrack
- status_t status = createTrack_l(streamType,
- sampleRate,
- format,
- frameCount,
- flags,
- sharedBuffer,
- output,
- 0 /*epoch*/);
+ status = createTrack_l(0 /*epoch*/);
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -350,17 +379,10 @@ status_t AudioTrack::set(
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- //Use of direct and offloaded output streams is ref counted by audio policy manager.
- // As getOutput was called above and resulted in an output stream to be opened,
- // we need to release it.
- AudioSystem::releaseOutput(output);
return status;
}
mStatus = NO_ERROR;
- mStreamType = streamType;
- mFormat = format;
- mSharedBuffer = sharedBuffer;
mState = STATE_STOPPED;
mUserData = user;
mLoopPeriod = 0;
@@ -368,11 +390,10 @@ status_t AudioTrack::set(
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
- AudioSystem::acquireAudioSessionId(mSessionId);
+ AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
mSequence = 1;
mObservedSequence = mSequence;
mInUnderrun = false;
- mOutput = output;
return NO_ERROR;
}
@@ -448,12 +469,11 @@ status_t AudioTrack::start()
void AudioTrack::stop()
{
AutoMutex lock(mLock);
- // FIXME pause then stop should not be a nop
- if (mState != STATE_ACTIVE) {
+ if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
return;
}
- if (isOffloaded()) {
+ if (isOffloaded_l()) {
mState = STATE_STOPPING;
} else {
mState = STATE_STOPPED;
@@ -475,7 +495,7 @@ void AudioTrack::stop()
sp<AudioTrackThread> t = mAudioTrackThread;
if (t != 0) {
- if (!isOffloaded()) {
+ if (!isOffloaded_l()) {
t->pause();
}
} else {
@@ -513,7 +533,7 @@ void AudioTrack::flush_l()
mRefreshRemaining = true;
mState = STATE_FLUSHED;
- if (isOffloaded()) {
+ if (isOffloaded_l()) {
mProxy->interrupt();
}
mProxy->flush();
@@ -533,8 +553,8 @@ void AudioTrack::pause()
mProxy->interrupt();
mAudioTrack->pause();
- if (isOffloaded()) {
- if (mOutput != 0) {
+ if (isOffloaded_l()) {
+ if (mOutput != AUDIO_IO_HANDLE_NONE) {
uint32_t halFrames;
// OffloadThread sends HAL pause in its threadLoop.. time saved
// here can be slightly off
@@ -551,12 +571,12 @@ status_t AudioTrack::setVolume(float left, float right)
}
AutoMutex lock(mLock);
- mVolume[LEFT] = left;
- mVolume[RIGHT] = right;
+ mVolume[AUDIO_INTERLEAVE_LEFT] = left;
+ mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
- if (isOffloaded()) {
+ if (isOffloaded_l()) {
mAudioTrack->signal();
}
return NO_ERROR;
@@ -620,8 +640,8 @@ uint32_t AudioTrack::getSampleRate() const
// sample rate can be updated during playback by the offloaded decoder so we need to
// query the HAL and update if needed.
// FIXME use Proxy return channel to update the rate from server and avoid polling here
- if (isOffloaded()) {
- if (mOutput != 0) {
+ if (isOffloaded_l()) {
+ if (mOutput != AUDIO_IO_HANDLE_NONE) {
uint32_t sampleRate = 0;
status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
if (status == NO_ERROR) {
@@ -704,6 +724,7 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
AutoMutex lock(mLock);
mNewPosition = mProxy->getPosition() + updatePeriod;
mUpdatePeriod = updatePeriod;
+
return NO_ERROR;
}
@@ -757,7 +778,7 @@ status_t AudioTrack::getPosition(uint32_t *position) const
}
AutoMutex lock(mLock);
- if (isOffloaded()) {
+ if (isOffloaded_l()) {
uint32_t dspFrames = 0;
if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
@@ -766,7 +787,7 @@ status_t AudioTrack::getPosition(uint32_t *position) const
return NO_ERROR;
}
- if (mOutput != 0) {
+ if (mOutput != AUDIO_IO_HANDLE_NONE) {
uint32_t halFrames;
AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
}
@@ -812,23 +833,12 @@ status_t AudioTrack::reload()
return NO_ERROR;
}
-audio_io_handle_t AudioTrack::getOutput()
+audio_io_handle_t AudioTrack::getOutput() const
{
AutoMutex lock(mLock);
return mOutput;
}
-// must be called with mLock held
-audio_io_handle_t AudioTrack::getOutput_l()
-{
- if (mOutput) {
- return mOutput;
- } else {
- return AudioSystem::getOutput(mStreamType,
- mSampleRate, mFormat, mChannelMask, mFlags);
- }
-}
-
status_t AudioTrack::attachAuxEffect(int effectId)
{
AutoMutex lock(mLock);
@@ -842,15 +852,7 @@ status_t AudioTrack::attachAuxEffect(int effectId)
// -------------------------------------------------------------------------
// must be called with mLock held
-status_t AudioTrack::createTrack_l(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- size_t frameCount,
- audio_output_flags_t flags,
- const sp<IMemory>& sharedBuffer,
- audio_io_handle_t output,
- size_t epoch)
+status_t AudioTrack::createTrack_l(size_t epoch)
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -859,50 +861,57 @@ status_t AudioTrack::createTrack_l(
return NO_INIT;
}
+ audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
+ mChannelMask, mFlags, mOffloadInfo);
+ if (output == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
+ "channel mask %#x, flags %#x",
+ mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
+ return BAD_VALUE;
+ }
+ {
+ // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
+ // we must release it ourselves if anything goes wrong.
+
// Not all of these values are needed under all conditions, but it is easier to get them all
uint32_t afLatency;
- status = AudioSystem::getLatency(output, streamType, &afLatency);
+ status = AudioSystem::getLatency(output, &afLatency);
if (status != NO_ERROR) {
ALOGE("getLatency(%d) failed status %d", output, status);
- return NO_INIT;
+ goto release;
}
size_t afFrameCount;
- status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+ status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount);
if (status != NO_ERROR) {
- ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
- return NO_INIT;
+ ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status);
+ goto release;
}
uint32_t afSampleRate;
- status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
+ status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate);
if (status != NO_ERROR) {
- ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
- return NO_INIT;
+ ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status);
+ goto release;
}
// Client decides whether the track is TIMED (see below), but can only express a preference
// for FAST. Server will perform additional tests.
- if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
+ if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
// either of these use cases:
// use case 1: shared buffer
- (sharedBuffer != 0) ||
- // use case 2: callback handler
- (mCbf != NULL))) {
+ (mSharedBuffer != 0) ||
+ // use case 2: callback transfer mode
+ (mTransfer == TRANSFER_CALLBACK)) &&
+ // matching sample rate
+ (mSampleRate == afSampleRate))) {
ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
// once denied, do not request again if IAudioTrack is re-created
- flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
- mFlags = flags;
+ mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
- if ((flags & AUDIO_OUTPUT_FLAG_FAST) && sampleRate != afSampleRate) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client due to mismatching sample rate (%d vs %d)",
- sampleRate, afSampleRate);
- flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
- }
-
// The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
// n = 1 fast track with single buffering; nBuffering is ignored
// n = 2 fast track with double buffering
@@ -910,43 +919,49 @@ status_t AudioTrack::createTrack_l(
// n = 3 normal track, with sample rate conversion
// (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
// n > 3 very high latency or very small notification interval; nBuffering is ignored
- const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
+ const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
mNotificationFramesAct = mNotificationFramesReq;
- if (!audio_is_linear_pcm(format)) {
+ size_t frameCount = mReqFrameCount;
+ if (!audio_is_linear_pcm(mFormat)) {
- if (sharedBuffer != 0) {
+ if (mSharedBuffer != 0) {
// Same comment as below about ignoring frameCount parameter for set()
- frameCount = sharedBuffer->size();
+ frameCount = mSharedBuffer->size();
} else if (frameCount == 0) {
frameCount = afFrameCount;
}
if (mNotificationFramesAct != frameCount) {
mNotificationFramesAct = frameCount;
}
- } else if (sharedBuffer != 0) {
+ } else if (mSharedBuffer != 0) {
// Ensure that buffer alignment matches channel count
// 8-bit data in shared memory is not currently supported by AudioFlinger
- size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
+ size_t alignment = audio_bytes_per_sample(
+ mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
+ if (alignment & 1) {
+ alignment = 1;
+ }
if (mChannelCount > 1) {
// More than 2 channels does not require stronger alignment than stereo
alignment <<= 1;
}
- if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+ if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
ALOGE("Invalid buffer alignment: address %p, channel count %u",
- sharedBuffer->pointer(), mChannelCount);
- return BAD_VALUE;
+ mSharedBuffer->pointer(), mChannelCount);
+ status = BAD_VALUE;
+ goto release;
}
// When initializing a shared buffer AudioTrack via constructors,
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
- frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
+ frameCount = mSharedBuffer->size() / mFrameSizeAF;
- } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
+ } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
// FIXME move these calculations and associated checks to server
@@ -958,10 +973,10 @@ status_t AudioTrack::createTrack_l(
minBufCount = nBuffering;
}
- size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
+ size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
", afLatency=%d",
- minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
+ minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
if (frameCount == 0) {
frameCount = minFrameCount;
@@ -986,52 +1001,65 @@ status_t AudioTrack::createTrack_l(
}
pid_t tid = -1;
- if (flags & AUDIO_OUTPUT_FLAG_FAST) {
+ if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
trackFlags |= IAudioFlinger::TRACK_FAST;
if (mAudioTrackThread != 0) {
tid = mAudioTrackThread->getTid();
}
}
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
}
- sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
- sampleRate,
+ size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
+ // but we will still need the original value also
+ sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
+ mSampleRate,
// AudioFlinger only sees 16-bit PCM
- format == AUDIO_FORMAT_PCM_8_BIT ?
- AUDIO_FORMAT_PCM_16_BIT : format,
+ mFormat == AUDIO_FORMAT_PCM_8_BIT &&
+ !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
+ AUDIO_FORMAT_PCM_16_BIT : mFormat,
mChannelMask,
- frameCount,
+ &temp,
&trackFlags,
- sharedBuffer,
+ mSharedBuffer,
output,
tid,
&mSessionId,
- mName,
mClientUid,
&status);
- if (track == 0) {
+ if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create track, status: %d", status);
- return status;
+ goto release;
}
+ ALOG_ASSERT(track != 0);
+
+ // AudioFlinger now owns the reference to the I/O handle,
+ // so we are no longer responsible for releasing it.
+
sp<IMemory> iMem = track->getCblk();
if (iMem == 0) {
ALOGE("Could not get control block");
return NO_INIT;
}
+ void *iMemPointer = iMem->pointer();
+ if (iMemPointer == NULL) {
+ ALOGE("Could not get control block pointer");
+ return NO_INIT;
+ }
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
mDeathNotifier.clear();
}
mAudioTrack = track;
+
mCblkMemory = iMem;
- audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
+ audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
- size_t temp = cblk->frameCount_;
+ // note that temp is the (possibly revised) value of frameCount
if (temp < frameCount || (frameCount == 0 && temp == 0)) {
// In current design, AudioTrack client checks and ensures frame count validity before
// passing it to AudioFlinger so AudioFlinger should not return a different value except
@@ -1039,12 +1067,13 @@ status_t AudioTrack::createTrack_l(
ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
}
frameCount = temp;
+
mAwaitBoost = false;
- if (flags & AUDIO_OUTPUT_FLAG_FAST) {
+ if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
if (trackFlags & IAudioFlinger::TRACK_FAST) {
ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
mAwaitBoost = true;
- if (sharedBuffer == 0) {
+ if (mSharedBuffer == 0) {
// Theoretically double-buffering is not required for fast tracks,
// due to tighter scheduling. But in practice, to accommodate kernels with
// scheduling jitter, and apps with computation jitter, we use double-buffering.
@@ -1055,26 +1084,27 @@ status_t AudioTrack::createTrack_l(
} else {
ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
// once denied, do not request again if IAudioTrack is re-created
- flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
- mFlags = flags;
- if (sharedBuffer == 0) {
+ mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
+ if (mSharedBuffer == 0) {
if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
mNotificationFramesAct = frameCount/nBuffering;
}
}
}
}
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
} else {
ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
- flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- mFlags = flags;
- return NO_INIT;
+ mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ // FIXME This is a warning, not an error, so don't return error status
+ //return NO_INIT;
}
}
+ // We retain a copy of the I/O handle, but don't own the reference
+ mOutput = output;
mRefreshRemaining = true;
// Starting address of buffers in shared memory. If there is a shared buffer, buffers
@@ -1082,15 +1112,16 @@ status_t AudioTrack::createTrack_l(
// immediately after the control block. This address is for the mapping within client
// address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
void* buffers;
- if (sharedBuffer == 0) {
+ if (mSharedBuffer == 0) {
buffers = (char*)cblk + sizeof(audio_track_cblk_t);
} else {
- buffers = sharedBuffer->pointer();
+ buffers = mSharedBuffer->pointer();
}
mAudioTrack->attachAuxEffect(mAuxEffectId);
// FIXME don't believe this lie
- mLatency = afLatency + (1000*frameCount) / sampleRate;
+ mLatency = afLatency + (1000*frameCount) / mSampleRate;
+
mFrameCount = frameCount;
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
@@ -1099,15 +1130,15 @@ status_t AudioTrack::createTrack_l(
}
// update proxy
- if (sharedBuffer == 0) {
+ if (mSharedBuffer == 0) {
mStaticProxy.clear();
mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
} else {
mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
mProxy = mStaticProxy;
}
- mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
- uint16_t(mVolume[LEFT] * 0x1000));
+ mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[AUDIO_INTERLEAVE_RIGHT] * 0x1000)) << 16) |
+ uint16_t(mVolume[AUDIO_INTERLEAVE_LEFT] * 0x1000));
mProxy->setSendLevel(mSendLevel);
mProxy->setSampleRate(mSampleRate);
mProxy->setEpoch(epoch);
@@ -1117,6 +1148,14 @@ status_t AudioTrack::createTrack_l(
mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
return NO_ERROR;
+ }
+
+release:
+ AudioSystem::releaseOutput(output);
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
+ return status;
}
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
@@ -1244,8 +1283,7 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
if (mState == STATE_ACTIVE) {
audio_track_cblk_t* cblk = mCblk;
if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
- ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
- this, mName.string());
+ ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
// FIXME ignoring status
mAudioTrack->start();
}
@@ -1254,7 +1292,7 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
// -------------------------------------------------------------------------
-ssize_t AudioTrack::write(const void* buffer, size_t userSize)
+ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
{
if (mTransfer != TRANSFER_SYNC || mIsTimed) {
return INVALID_OPERATION;
@@ -1273,7 +1311,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
while (userSize >= mFrameSize) {
audioBuffer.frameCount = userSize / mFrameSize;
- status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
+ status_t err = obtainBuffer(&audioBuffer,
+ blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
if (err < 0) {
if (written > 0) {
break;
@@ -1369,7 +1408,7 @@ status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
// -------------------------------------------------------------------------
-nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
+nsecs_t AudioTrack::processAudioBuffer()
{
// Currently the AudioTrack thread is not created if there are no callbacks.
// Would it ever make sense to run the thread, even without callbacks?
@@ -1407,7 +1446,7 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
// for offloaded tracks restoreTrack_l() will just update the sequence and clear
// AudioSystem cache. We should not exit here but after calling the callback so
// that the upper layers can recreate the track
- if (!isOffloaded() || (mSequence == mObservedSequence)) {
+ if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
status_t status = restoreTrack_l("processAudioBuffer");
mLock.unlock();
// Run again immediately, but with a new IAudioTrack
@@ -1462,7 +1501,7 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
// Cache other fields that will be needed soon
uint32_t loopPeriod = mLoopPeriod;
uint32_t sampleRate = mSampleRate;
- size_t notificationFrames = mNotificationFramesAct;
+ uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
mRefreshRemaining = false;
mRemainingFrames = notificationFrames;
@@ -1626,7 +1665,6 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
size_t reqSize = audioBuffer.size;
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
size_t writtenSize = audioBuffer.size;
- size_t writtenFrames = writtenSize / mFrameSize;
// Sanity check on returned size
if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
@@ -1692,22 +1730,19 @@ nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
status_t AudioTrack::restoreTrack_l(const char *from)
{
ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
- isOffloaded() ? "Offloaded" : "PCM", from);
+ isOffloaded_l() ? "Offloaded" : "PCM", from);
++mSequence;
status_t result;
// refresh the audio configuration cache in this process to make sure we get new
- // output parameters in getOutput_l() and createTrack_l()
+ // output parameters in createTrack_l()
AudioSystem::clearAudioConfigCache();
- if (isOffloaded()) {
+ if (isOffloaded_l()) {
+ // FIXME re-creation of offloaded tracks is not yet implemented
return DEAD_OBJECT;
}
- // force new output query from audio policy manager;
- mOutput = 0;
- audio_io_handle_t output = getOutput_l();
-
// if the new IAudioTrack is created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
// It will also delete the strong references on previous IAudioTrack and IMemory
@@ -1715,14 +1750,7 @@ status_t AudioTrack::restoreTrack_l(const char *from)
// take the frames that will be lost by track recreation into account in saved position
size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
- result = createTrack_l(mStreamType,
- mSampleRate,
- mFormat,
- mReqFrameCount, // so that frame count never goes down
- mFlags,
- mSharedBuffer,
- output,
- position /*epoch*/);
+ result = createTrack_l(position /*epoch*/);
if (result == NO_ERROR) {
// continue playback from last known position, but
@@ -1750,10 +1778,6 @@ status_t AudioTrack::restoreTrack_l(const char *from)
}
}
if (result != NO_ERROR) {
- //Use of direct and offloaded output streams is ref counted by audio policy manager.
- // As getOutput was called above and resulted in an output stream to be opened,
- // we need to release it.
- AudioSystem::releaseOutput(output);
ALOGW("restoreTrack_l() failed status %d", result);
mState = STATE_STOPPED;
}
@@ -1786,14 +1810,21 @@ status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
String8 AudioTrack::getParameters(const String8& keys)
{
- if (mOutput) {
- return AudioSystem::getParameters(mOutput, keys);
+ audio_io_handle_t output = getOutput();
+ if (output != AUDIO_IO_HANDLE_NONE) {
+ return AudioSystem::getParameters(output, keys);
} else {
return String8::empty();
}
}
-status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
+bool AudioTrack::isOffloaded() const
+{
+ AutoMutex lock(mLock);
+ return isOffloaded_l();
+}
+
+status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
{
const size_t SIZE = 256;
@@ -1802,7 +1833,7 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
result.append(" AudioTrack::dump\n");
snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
- mVolume[0], mVolume[1]);
+ mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
result.append(buffer);
snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat,
mChannelCount, mFrameCount);
@@ -1823,7 +1854,7 @@ uint32_t AudioTrack::getUnderrunFrames() const
// =========================================================================
-void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
+void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
{
sp<AudioTrack> audioTrack = mAudioTrack.promote();
if (audioTrack != 0) {
@@ -1867,7 +1898,7 @@ bool AudioTrack::AudioTrackThread::threadLoop()
return true;
}
}
- nsecs_t ns = mReceiver.processAudioBuffer(this);
+ nsecs_t ns = mReceiver.processAudioBuffer();
switch (ns) {
case 0:
return true;