summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/media/AudioResamplerPublic.h13
-rw-r--r--media/libmedia/AudioTrack.cpp82
-rw-r--r--services/audioflinger/AudioMixer.cpp50
-rw-r--r--services/audioflinger/AudioResamplerFirProcessNeon.h80
-rw-r--r--services/audioflinger/Threads.cpp31
5 files changed, 132 insertions, 124 deletions
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
index 97847a0..b705efa 100644
--- a/include/media/AudioResamplerPublic.h
+++ b/include/media/AudioResamplerPublic.h
@@ -26,4 +26,17 @@
// TODO: replace with an API
#define AUDIO_RESAMPLER_DOWN_RATIO_MAX 256
+// Returns the source frames needed to resample to destination frames. This is not a precise
+// value and depends on the resampler (and possibly how it handles rounding internally).
+// Nevertheless, this should be an upper bound on the requirements of the resampler.
+// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
+// may not be true if the resampler is asynchronous.
+static inline size_t sourceFramesNeeded(
+ uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
+ // +1 for rounding - always do this even if matched ratio (resampler may use phases not ratio)
+ // +1 for additional sample needed for interpolation
+ return srcSampleRate == dstSampleRate ? dstFramesRequired :
+ size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
+}
+
#endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index d4bacc0..1d5fc95 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -66,12 +66,11 @@ status_t AudioTrack::getMinFrameCount(
return BAD_VALUE;
}
- // FIXME merge with similar code in createTrack_l(), except we're missing
- // some information here that is available in createTrack_l():
+ // FIXME handle in server, like createTrack_l(), possible missing info:
// audio_io_handle_t output
// audio_format_t format
// audio_channel_mask_t channelMask
- // audio_output_flags_t flags
+ // audio_output_flags_t flags (FAST)
uint32_t afSampleRate;
status_t status;
status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
@@ -101,16 +100,16 @@ status_t AudioTrack::getMinFrameCount(
minBufCount = 2;
}
- *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
- afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
- // The formula above should always produce a non-zero value, but return an error
- // in the unlikely event that it does not, as that's part of the API contract.
+ *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
+ // The formula above should always produce a non-zero value under normal circumstances:
+ // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
+ // Return error in the unlikely event that it does not, as that's part of the API contract.
if (*frameCount == 0) {
- ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+ ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
streamType, sampleRate);
return BAD_VALUE;
}
- ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
+ ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
*frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
return NO_ERROR;
}
@@ -1015,11 +1014,9 @@ status_t AudioTrack::createTrack_l()
// The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
// n = 1 fast track with single buffering; nBuffering is ignored
// n = 2 fast track with double buffering
- // n = 2 normal track, no sample rate conversion
- // n = 3 normal track, with sample rate conversion
- // (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
- // n > 3 very high latency or very small notification interval; nBuffering is ignored
- const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
+ // n = 2 normal track, (including those with sample rate conversion)
+ // n >= 3 very high latency or very small notification interval (unused).
+ const uint32_t nBuffering = 2;
mNotificationFramesAct = mNotificationFramesReq;
@@ -1060,39 +1057,9 @@ status_t AudioTrack::createTrack_l()
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
frameCount = mSharedBuffer->size() / mFrameSize;
-
- } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
-
- // FIXME move these calculations and associated checks to server
-
- // Ensure that buffer depth covers at least audio hardware latency
- uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
- ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
- afFrameCount, minBufCount, afSampleRate, afLatency);
- if (minBufCount <= nBuffering) {
- minBufCount = nBuffering;
- }
-
- size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
- ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
- ", afLatency=%d",
- minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
-
- if (frameCount == 0) {
- frameCount = minFrameCount;
- } else if (frameCount < minFrameCount) {
- // not ALOGW because it happens all the time when playing key clicks over A2DP
- ALOGV("Minimum buffer size corrected from %zu to %zu",
- frameCount, minFrameCount);
- frameCount = minFrameCount;
- }
- // Make sure that application is notified with sufficient margin before underrun
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
-
} else {
- // For fast tracks, the frame count calculations and checks are done by server
+ // For fast and normal streaming tracks,
+ // the frame count calculations and checks are done by server
}
IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1175,23 +1142,10 @@ status_t AudioTrack::createTrack_l()
if (trackFlags & IAudioFlinger::TRACK_FAST) {
ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
mAwaitBoost = true;
- if (mSharedBuffer == 0) {
- // Theoretically double-buffering is not required for fast tracks,
- // due to tighter scheduling. But in practice, to accommodate kernels with
- // scheduling jitter, and apps with computation jitter, we use double-buffering.
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
- }
} else {
ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
// once denied, do not request again if IAudioTrack is re-created
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
- if (mSharedBuffer == 0) {
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
- mNotificationFramesAct = frameCount/nBuffering;
- }
- }
}
}
if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
@@ -1214,6 +1168,16 @@ status_t AudioTrack::createTrack_l()
//return NO_INIT;
}
}
+ // Make sure that application is notified with sufficient margin before underrun
+ if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
+ // Theoretically double-buffering is not required for fast tracks,
+ // due to tighter scheduling. But in practice, to accommodate kernels with
+ // scheduling jitter, and apps with computation jitter, we use double-buffering
+ // for fast tracks just like normal streaming tracks.
+ if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
+ mNotificationFramesAct = frameCount / nBuffering;
+ }
+ }
// We retain a copy of the I/O handle, but don't own the reference
mOutput = output;
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 0d4b358..836f550 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -341,11 +341,46 @@ AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputC
ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
this, format, inputChannelMask, outputChannelMask,
mInputChannels, mOutputChannels);
- // TODO: consider channel representation in index array formulation
- // We ignore channel representation, and just use the bits.
- memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
- audio_channel_mask_get_bits(outputChannelMask),
- audio_channel_mask_get_bits(inputChannelMask));
+
+ const audio_channel_representation_t inputRepresentation =
+ audio_channel_mask_get_representation(inputChannelMask);
+ const audio_channel_representation_t outputRepresentation =
+ audio_channel_mask_get_representation(outputChannelMask);
+ const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask);
+ const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask);
+
+ switch (inputRepresentation) {
+ case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+ switch (outputRepresentation) {
+ case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+ memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+ outputBits, inputBits);
+ return;
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ // TODO: output channel index mask not currently allowed
+ // fall through
+ default:
+ break;
+ }
+ break;
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ switch (outputRepresentation) {
+ case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+ memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry),
+ outputBits, inputBits);
+ return;
+ case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+ // TODO: output channel index mask not currently allowed
+ // fall through
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x",
+ inputChannelMask, outputChannelMask);
}
void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
@@ -605,7 +640,10 @@ status_t AudioMixer::track_t::prepareForDownmix()
&& mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
return NO_ERROR;
}
- if (DownmixerBufferProvider::isMultichannelCapable()) {
+ // DownmixerBufferProvider is only used for position masks.
+ if (audio_channel_mask_get_representation(channelMask)
+ == AUDIO_CHANNEL_REPRESENTATION_POSITION
+ && DownmixerBufferProvider::isMultichannelCapable()) {
DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
mMixerChannelMask,
AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h
index d4fa7ad..29ff179 100644
--- a/services/audioflinger/AudioResamplerFirProcessNeon.h
+++ b/services/audioflinger/AudioResamplerFirProcessNeon.h
@@ -115,13 +115,13 @@ inline void ProcessL<2, 16>(int32_t* const out,
"1: \n"
- "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples
- "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples
+ "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
"vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
"vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
- "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive
- "vrev64.16 q3, q3 \n"// (0 combines+) reverse right positive
+ "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// (0 combines+) reverse positive right
"vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left
"vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left
@@ -247,8 +247,8 @@ inline void Process<2, 16>(int32_t* const out,
"1: \n"
- "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples
- "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples
+ "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
"vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
"vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation
"vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
@@ -260,8 +260,8 @@ inline void Process<2, 16>(int32_t* const out,
"vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs
"vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs
- "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive
- "vrev64.16 q3, q3 \n"// (1) reverse 8 frames of the right positive
+ "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// (1) reverse 8 samples of positive right
"vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set
"vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set
@@ -323,7 +323,7 @@ inline void ProcessL<1, 16>(int32_t* const out,
"vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
"vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
- "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
"vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
@@ -331,10 +331,10 @@ inline void ProcessL<1, 16>(int32_t* const out,
"vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits
"vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples
"vadd.s32 q0, q0, q12 \n"// accumulate result
"vadd.s32 q13, q13, q14 \n"// accumulate result
@@ -380,13 +380,13 @@ inline void ProcessL<2, 16>(int32_t* const out,
"1: \n"
- "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples
- "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples
- "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs
- "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
+ "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
+ "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
- "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side
- "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
"vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
@@ -394,15 +394,15 @@ inline void ProcessL<2, 16>(int32_t* const out,
"vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits
"vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
"vadd.s32 q0, q0, q12 \n"// accumulate result
"vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result
- "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
"vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
@@ -410,15 +410,15 @@ inline void ProcessL<2, 16>(int32_t* const out,
"vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits
"vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
"vadd.s32 q4, q4, q12 \n"// accumulate result
"vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result
- "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result
+ "vadd.s32 q4, q4, q15 \n"// accumulate result
+ "vadd.s32 q4, q4, q13 \n"// accumulate result
"subs %[count], %[count], #8 \n"// update loop counter
"sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
@@ -485,7 +485,7 @@ inline void Process<1, 16>(int32_t* const out,
"vadd.s32 q10, q10, q14 \n"// interpolate (step3)
"vadd.s32 q11, q11, q15 \n"// interpolate (step3)
- "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
"vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
@@ -549,8 +549,8 @@ inline void Process<2, 16>(int32_t* const out,
"1: \n"
- "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples
- "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples
+ "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
"vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
"vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
"vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
@@ -571,8 +571,8 @@ inline void Process<2, 16>(int32_t* const out,
"vadd.s32 q10, q10, q14 \n"// interpolate (step3)
"vadd.s32 q11, q11, q15 \n"// interpolate (step3)
- "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side
- "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
"vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
@@ -587,8 +587,8 @@ inline void Process<2, 16>(int32_t* const out,
"vadd.s32 q0, q0, q12 \n"// accumulate result
"vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result
- "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
"vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
"vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
@@ -603,8 +603,8 @@ inline void Process<2, 16>(int32_t* const out,
"vadd.s32 q4, q4, q12 \n"// accumulate result
"vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result
- "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result
+ "vadd.s32 q4, q4, q15 \n"// accumulate result
+ "vadd.s32 q4, q4, q13 \n"// accumulate result
"subs %[count], %[count], #8 \n"// update loop counter
"sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 384bd25..40ab0af 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -174,18 +174,6 @@ static int sFastTrackMultiplier = kFastTrackMultiplier;
// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
static const size_t kRecordThreadReadOnlyHeapSize = 0x2000;
-// Returns the source frames needed to resample to destination frames. This is not a precise
-// value and depends on the resampler (and possibly how it handles rounding internally).
-// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
-// may not be a true if the resampler is asynchronous.
-static inline size_t sourceFramesNeeded(
- uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
- // +1 for rounding - always do this even if matched ratio
- // +1 for additional sample needed for interpolation
- return srcSampleRate == dstSampleRate ? dstFramesRequired :
- size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
-}
-
// ----------------------------------------------------------------------------
static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
@@ -1497,20 +1485,25 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac
audio_is_linear_pcm(format),
channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
*flags &= ~IAudioFlinger::TRACK_FAST;
- // For compatibility with AudioTrack calculation, buffer depth is forced
- // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
- // This is probably too conservative, but legacy application code may depend on it.
- // If you change this calculation, also review the start threshold which is related.
+ }
+ }
+ // For normal PCM streaming tracks, update minimum frame count.
+ // For compatibility with AudioTrack calculation, buffer depth is forced
+ // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
+ // This is probably too conservative, but legacy application code may depend on it.
+ // If you change this calculation, also review the start threshold which is related.
+ if (!(*flags & IAudioFlinger::TRACK_FAST)
+ && audio_is_linear_pcm(format) && sharedBuffer == 0) {
uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
if (minBufCount < 2) {
minBufCount = 2;
}
- size_t minFrameCount = mNormalFrameCount * minBufCount;
- if (frameCount < minFrameCount) {
+ size_t minFrameCount =
+ minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate);
+ if (frameCount < minFrameCount) { // including frameCount == 0
frameCount = minFrameCount;
}
- }
}
*pFrameCount = frameCount;