summaryrefslogtreecommitdiffstats
path: root/services/audioflinger
diff options
context:
space:
mode:
Diffstat (limited to 'services/audioflinger')
-rw-r--r--services/audioflinger/Android.mk4
-rw-r--r--services/audioflinger/AudioBufferProvider.cpp28
-rw-r--r--services/audioflinger/AudioBufferProvider.h11
-rw-r--r--services/audioflinger/AudioFlinger.cpp722
-rw-r--r--services/audioflinger/AudioFlinger.h131
-rw-r--r--services/audioflinger/AudioMixer.cpp61
-rw-r--r--services/audioflinger/AudioMixer.h25
-rw-r--r--services/audioflinger/AudioResampler.cpp33
-rw-r--r--services/audioflinger/AudioResampler.h8
-rw-r--r--services/audioflinger/AudioResamplerCubic.cpp11
-rw-r--r--services/audioflinger/AudioResamplerSinc.cpp4
11 files changed, 956 insertions, 82 deletions
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index fa49592..fd0609a 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -8,12 +8,14 @@ LOCAL_SRC_FILES:= \
AudioResampler.cpp.arm \
AudioResamplerSinc.cpp.arm \
AudioResamplerCubic.cpp.arm \
- AudioPolicyService.cpp
+ AudioPolicyService.cpp \
+ AudioBufferProvider.cpp
LOCAL_C_INCLUDES := \
system/media/audio_effects/include
LOCAL_SHARED_LIBRARIES := \
+ libcommon_time_client \
libcutils \
libutils \
libbinder \
diff --git a/services/audioflinger/AudioBufferProvider.cpp b/services/audioflinger/AudioBufferProvider.cpp
new file mode 100644
index 0000000..678fd58
--- /dev/null
+++ b/services/audioflinger/AudioBufferProvider.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef __STRICT_ANSI__
+#define __STDINT_LIMITS
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
+#include "AudioBufferProvider.h"
+
+namespace android {
+
+const int64_t AudioBufferProvider::kInvalidPTS = INT64_MAX;
+
+}; // namespace android
diff --git a/services/audioflinger/AudioBufferProvider.h b/services/audioflinger/AudioBufferProvider.h
index 81c5c39..62ad6bd 100644
--- a/services/audioflinger/AudioBufferProvider.h
+++ b/services/audioflinger/AudioBufferProvider.h
@@ -38,8 +38,15 @@ public:
};
virtual ~AudioBufferProvider() {}
-
- virtual status_t getNextBuffer(Buffer* buffer) = 0;
+
+ // value representing an invalid presentation timestamp
+ static const int64_t kInvalidPTS;
+
+ // pts is the local time when the next sample yielded by getNextBuffer
+ // will be rendered.
+ // Pass kInvalidPTS if the PTS is unknown or not applicable.
+ virtual status_t getNextBuffer(Buffer* buffer, int64_t pts) = 0;
+
virtual void releaseBuffer(Buffer* buffer) = 0;
};
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 05d6d72..2e7449e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -58,6 +58,9 @@
#include <powermanager/PowerManager.h>
// #define DEBUG_CPU_USAGE 10 // log statistics every n wall clock seconds
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
+
// ----------------------------------------------------------------------------
@@ -66,7 +69,6 @@ namespace android {
static const char* kDeadlockedString = "AudioFlinger may be deadlocked\n";
static const char* kHardwareLockedString = "Hardware lock is taken\n";
-//static const nsecs_t kStandbyTimeInNsecs = seconds(3);
static const float MAX_GAIN = 4096.0f;
static const float MAX_GAIN_INT = 0x1000;
@@ -94,6 +96,9 @@ static const uint32_t kMinThreadSleepTimeUs = 5000;
// maximum divider applied to the active sleep time in the mixer thread loop
static const uint32_t kMaxThreadSleepTimeShift = 2;
+nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
+bool AudioFlinger::mStaticInitDone = false;
+Mutex AudioFlinger::mStaticInitLock;
// ----------------------------------------------------------------------------
@@ -158,11 +163,37 @@ static const char *audio_interfaces[] = {
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
- mPrimaryHardwareDev(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
- mBtNrecIsOff(false)
+ mPrimaryHardwareDev(0), mMasterVolume(1.0f), mMasterVolumeSW(1.0f),
+ mMasterVolumeSupportLvl(MVS_NONE), mMasterMute(false), mNextUniqueId(1),
+ mBtNrecIsOff(false)
{
}
+bool AudioFlinger::doStaticInit() {
+ if (!mStaticInitDone) {
+ Mutex::Autolock _l(mStaticInitLock);
+
+ if (mStaticInitDone)
+ return true;
+
+ char val_str[PROPERTY_VALUE_MAX] = { 0 };
+ if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
+ uint32_t int_val;
+ if (1 == sscanf(val_str, "%u", &int_val)) {
+ mStandbyTimeInNsecs = milliseconds(int_val);
+ LOGI("Using %u mSec as standby time.", int_val);
+ } else {
+ mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
+ LOGI("Using default %u mSec as standby time.",
+ (uint32_t)(mStandbyTimeInNsecs / 1000000));
+ }
+ }
+
+ mStaticInitDone = true;
+ }
+
+ return mStaticInitDone;
+}
void AudioFlinger::onFirstRef()
{
int rc = 0;
@@ -172,6 +203,10 @@ void AudioFlinger::onFirstRef()
/* TODO: move all this work into an Init() function */
mHardwareStatus = AUDIO_HW_IDLE;
+ // Make sure all static variables are set up. Right now, this is limited to
+ // the standby time parameter.
+ doStaticInit();
+
for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
const hw_module_t *mod;
audio_hw_device_t *dev;
@@ -198,6 +233,33 @@ void AudioFlinger::onFirstRef()
return;
}
+ // Determine the level of master volume support the primary audio HAL has,
+ // and set the initial master volume at the same time.
+ float initialVolume = 1.0;
+ mMasterVolumeSupportLvl = MVS_NONE;
+ if (0 == mPrimaryHardwareDev->init_check(mPrimaryHardwareDev)) {
+ AutoMutex lock(mHardwareLock);
+ audio_hw_device_t *dev = mPrimaryHardwareDev;
+
+ mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+ if ((NULL != dev->get_master_volume) &&
+ (NO_ERROR == dev->get_master_volume(dev, &initialVolume))) {
+ mMasterVolumeSupportLvl = MVS_FULL;
+ } else {
+ mMasterVolumeSupportLvl = MVS_SETONLY;
+ initialVolume = 1.0;
+ }
+
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ if ((NULL == dev->set_master_volume) ||
+ (NO_ERROR != dev->set_master_volume(dev, initialVolume))) {
+ mMasterVolumeSupportLvl = MVS_NONE;
+ }
+ mHardwareStatus = AUDIO_HW_INIT;
+ }
+
+ // Set the mode for each audio HAL, and try to set the initial volume (if
+ // supported) for all of the non-primary audio HALs.
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
audio_hw_device_t *dev = mAudioHwDevs[i];
@@ -209,11 +271,22 @@ void AudioFlinger::onFirstRef()
mMode = AUDIO_MODE_NORMAL;
mHardwareStatus = AUDIO_HW_SET_MODE;
dev->set_mode(dev, mMode);
- mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- dev->set_master_volume(dev, 1.0f);
- mHardwareStatus = AUDIO_HW_IDLE;
+
+ if ((dev != mPrimaryHardwareDev) &&
+ (NULL != dev->set_master_volume)) {
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ dev->set_master_volume(dev, initialVolume);
+ }
+
+ mHardwareStatus = AUDIO_HW_INIT;
}
}
+
+ mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
+ ? initialVolume
+ : 1.0;
+ mMasterVolume = initialVolume;
+ mHardwareStatus = AUDIO_HW_IDLE;
}
status_t AudioFlinger::initCheck() const
@@ -292,7 +365,10 @@ status_t AudioFlinger::dumpInternals(int fd, const Vector<String16>& args)
String8 result;
int hardwareStatus = mHardwareStatus;
- snprintf(buffer, SIZE, "Hardware status: %d\n", hardwareStatus);
+ snprintf(buffer, SIZE, "Hardware status: %d\n"
+ "Standby Time mSec: %u\n",
+ hardwareStatus,
+ (uint32_t)(mStandbyTimeInNsecs / 1000000));
result.append(buffer);
write(fd, result.string(), result.size());
return NO_ERROR;
@@ -384,6 +460,7 @@ sp<IAudioTrack> AudioFlinger::createTrack(
uint32_t flags,
const sp<IMemory>& sharedBuffer,
int output,
+ bool isTimed,
int *sessionId,
status_t *status)
{
@@ -447,7 +524,7 @@ sp<IAudioTrack> AudioFlinger::createTrack(
LOGV("createTrack() lSessionId: %d", lSessionId);
track = thread->createTrack_l(client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, lSessionId, &lStatus);
+ channelMask, frameCount, sharedBuffer, lSessionId, isTimed, &lStatus);
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
@@ -540,20 +617,29 @@ status_t AudioFlinger::setMasterVolume(float value)
return PERMISSION_DENIED;
}
+ float swmv = value;
+
// when hw supports master volume, don't scale in sw mixer
- { // scope for the lock
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- if (mPrimaryHardwareDev->set_master_volume(mPrimaryHardwareDev, value) == NO_ERROR) {
- value = 1.0f;
+ if (MVS_NONE != mMasterVolumeSupportLvl) {
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ AutoMutex lock(mHardwareLock);
+ audio_hw_device_t *dev = mAudioHwDevs[i];
+
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ if (NULL != dev->set_master_volume) {
+ dev->set_master_volume(dev, value);
+ }
+ mHardwareStatus = AUDIO_HW_IDLE;
}
- mHardwareStatus = AUDIO_HW_IDLE;
+
+ swmv = 1.0;
}
Mutex::Autolock _l(mLock);
- mMasterVolume = value;
+ mMasterVolume = value;
+ mMasterVolumeSW = swmv;
for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setMasterVolume(value);
+ mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
return NO_ERROR;
}
@@ -641,9 +727,27 @@ status_t AudioFlinger::setMasterMute(bool muted)
float AudioFlinger::masterVolume() const
{
+ if (MVS_FULL == mMasterVolumeSupportLvl) {
+ float ret_val;
+ AutoMutex lock(mHardwareLock);
+
+ mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+ assert(NULL != mPrimaryHardwareDev);
+ assert(NULL != mPrimaryHardwareDev->get_master_volume);
+
+ mPrimaryHardwareDev->get_master_volume(mPrimaryHardwareDev, &ret_val);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ return ret_val;
+ }
+
return mMasterVolume;
}
+float AudioFlinger::masterVolumeSW() const
+{
+ return mMasterVolumeSW;
+}
+
bool AudioFlinger::masterMute() const
{
return mMasterMute;
@@ -820,7 +924,7 @@ String8 AudioFlinger::getParameters(int ioHandle, const String8& keys)
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
audio_hw_device_t *dev = mAudioHwDevs[i];
char *s = dev->get_parameters(dev, keys.string());
- out_s8 += String8(s);
+ out_s8 += String8(s ? s : "");
free(s);
}
return out_s8;
@@ -1375,7 +1479,8 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge
readOutputParameters();
- mMasterVolume = mAudioFlinger->masterVolume();
+ mMasterVolume = mAudioFlinger->masterVolumeSW();
+
mMasterMute = mAudioFlinger->masterMute();
for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
@@ -1486,6 +1591,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTra
int frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ bool isTimed,
status_t *status)
{
sp<Track> track;
@@ -1535,9 +1641,14 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTra
}
}
- track = new Track(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId);
- if (track->getCblk() == NULL || track->name() < 0) {
+ if (!isTimed) {
+ track = new Track(this, client, streamType, sampleRate, format,
+ channelMask, frameCount, sharedBuffer, sessionId);
+ } else {
+ track = TimedTrack::create(this, client, streamType, sampleRate, format,
+ channelMask, frameCount, sharedBuffer, sessionId);
+ }
+ if (track == NULL || track->getCblk() == NULL || track->name() < 0) {
lStatus = NO_MEMORY;
goto Exit;
}
@@ -1874,6 +1985,10 @@ bool AudioFlinger::MixerThread::threadLoop()
acquireWakeLock();
+ LocalClock lc;
+ uint64_t localTimeFreq;
+ localTimeFreq = lc.getLocalFreq();
+
while (!exitPending())
{
#ifdef DEBUG_CPU_USAGE
@@ -1956,7 +2071,7 @@ bool AudioFlinger::MixerThread::threadLoop()
}
}
- standbyTime = systemTime() + kStandbyTimeInNsecs;
+ standbyTime = systemTime() + mStandbyTimeInNsecs;
sleepTime = idleSleepTime;
sleepTimeShift = 0;
continue;
@@ -1972,8 +2087,21 @@ bool AudioFlinger::MixerThread::threadLoop()
}
if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
+ // obtain the presentation timestamp of the next output buffer
+ int64_t pts;
+ status_t status = INVALID_OPERATION;
+
+ if (NULL != mOutput->stream->get_next_write_timestamp) {
+ status = mOutput->stream->get_next_write_timestamp(
+ mOutput->stream, &pts);
+ }
+
+ if (status != NO_ERROR) {
+ pts = AudioBufferProvider::kInvalidPTS;
+ }
+
// mix buffers...
- mAudioMixer->process();
+ mAudioMixer->process(pts);
// increase sleep time progressively when application underrun condition clears.
// Only increase sleep time if the mixer is ready for two consecutive times to avoid
// that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -1982,7 +2110,7 @@ bool AudioFlinger::MixerThread::threadLoop()
sleepTimeShift--;
}
sleepTime = 0;
- standbyTime = systemTime() + kStandbyTimeInNsecs;
+ standbyTime = systemTime() + mStandbyTimeInNsecs;
//TODO: delay standby when effects have a tail
} else {
// If no tracks are ready, sleep once for the duration of an output
@@ -2127,7 +2255,7 @@ uint32_t AudioFlinger::MixerThread::prepareTracks_l(const SortedVector< wp<Track
LOG_ASSERT(minFrames <= cblk->frameCount);
}
}
- if ((cblk->framesReady() >= minFrames) && track->isReady() &&
+ if ((track->framesReady() >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
//LOGV("track %d u=%08x, s=%08x [OK] on thread %p", track->name(), cblk->user, cblk->server, this);
@@ -2789,7 +2917,8 @@ bool AudioFlinger::DirectOutputThread::threadLoop()
// output audio to hardware
while (frameCount) {
buffer.frameCount = frameCount;
- activeTrack->getNextBuffer(&buffer);
+ activeTrack->getNextBuffer(&buffer,
+ AudioBufferProvider::kInvalidPTS);
if (UNLIKELY(buffer.raw == 0)) {
memset(curBuf, 0, frameCount * mFrameSize);
break;
@@ -3042,7 +3171,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop()
}
}
- standbyTime = systemTime() + kStandbyTimeInNsecs;
+ standbyTime = systemTime() + mStandbyTimeInNsecs;
sleepTime = idleSleepTime;
continue;
}
@@ -3059,7 +3188,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop()
if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
// mix buffers...
if (outputsReady(outputTracks)) {
- mAudioMixer->process();
+ mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
} else {
memset(mMixBuffer, 0, mixBufferSize);
}
@@ -3096,7 +3225,7 @@ bool AudioFlinger::DuplicatingThread::threadLoop()
// enable changes in effect chain
unlockEffectChains(effectChains);
- standbyTime = systemTime() + kStandbyTimeInNsecs;
+ standbyTime = systemTime() + mStandbyTimeInNsecs;
for (size_t i = 0; i < outputTracks.size(); i++) {
outputTracks[i]->write(mMixBuffer, writeFrames);
}
@@ -3454,7 +3583,8 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
(int)mAuxBuffer);
}
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer, int64_t pts)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesReady;
@@ -3495,10 +3625,14 @@ getNextBuffer_exit:
return NOT_ENOUGH_DATA;
}
+uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const{
+ return mCblk->framesReady();
+}
+
bool AudioFlinger::PlaybackThread::Track::isReady() const {
if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
- if (mCblk->framesReady() >= mCblk->frameCount ||
+ if (framesReady() >= mCblk->frameCount ||
(mCblk->flags & CBLK_FORCEREADY_MSK)) {
mFillingUpStatus = FS_FILLED;
android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
@@ -3667,6 +3801,465 @@ void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *bu
mAuxBuffer = buffer;
}
+// timed audio tracks
+
+sp<AudioFlinger::PlaybackThread::TimedTrack>
+AudioFlinger::PlaybackThread::TimedTrack::create(
+ const wp<ThreadBase>& thread,
+ const sp<Client>& client,
+ int streamType,
+ uint32_t sampleRate,
+ uint32_t format,
+ uint32_t channelMask,
+ int frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId) {
+ if (!client->reserveTimedTrack())
+ return NULL;
+
+ sp<TimedTrack> track = new TimedTrack(
+ thread, client, streamType, sampleRate, format, channelMask, frameCount,
+ sharedBuffer, sessionId);
+
+ if (track == NULL) {
+ client->releaseTimedTrack();
+ return NULL;
+ }
+
+ return track;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
+ const wp<ThreadBase>& thread,
+ const sp<Client>& client,
+ int streamType,
+ uint32_t sampleRate,
+ uint32_t format,
+ uint32_t channelMask,
+ int frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId)
+ : Track(thread, client, streamType, sampleRate, format, channelMask,
+ frameCount, sharedBuffer, sessionId),
+ mQueueHeadInFlight(false),
+ mTrimQueueHeadOnRelease(false),
+ mTimedSilenceBuffer(NULL),
+ mTimedSilenceBufferSize(0),
+ mTimedAudioOutputOnTime(false),
+ mMediaTimeTransformValid(false)
+{
+ LocalClock lc;
+ mLocalTimeFreq = lc.getLocalFreq();
+
+ mLocalTimeToSampleTransform.a_zero = 0;
+ mLocalTimeToSampleTransform.b_zero = 0;
+ mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
+ mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
+ LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
+ &mLocalTimeToSampleTransform.a_to_b_denom);
+
+ mMediaTimeToSampleTransform.a_zero = 0;
+ mMediaTimeToSampleTransform.b_zero = 0;
+ mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
+ mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
+ LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
+ &mMediaTimeToSampleTransform.a_to_b_denom);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
+ mClient->releaseTimedTrack();
+ delete [] mTimedSilenceBuffer;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
+ size_t size, sp<IMemory>* buffer) {
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ trimTimedBufferQueue_l();
+
+ // lazily initialize the shared memory heap for timed buffers
+ if (mTimedMemoryDealer == NULL) {
+ const int kTimedBufferHeapSize = 512 << 10;
+
+ mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
+ "AudioFlingerTimed");
+ if (mTimedMemoryDealer == NULL)
+ return NO_MEMORY;
+ }
+
+ sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
+ if (newBuffer == NULL) {
+ newBuffer = mTimedMemoryDealer->allocate(size);
+ if (newBuffer == NULL)
+ return NO_MEMORY;
+ }
+
+ *buffer = newBuffer;
+ return NO_ERROR;
+}
+
+// caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
+ int64_t mediaTimeNow;
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+ if (!mMediaTimeTransformValid)
+ return;
+
+ int64_t targetTimeNow;
+ status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
+ ? mCCHelper.getCommonTime(&targetTimeNow)
+ : mCCHelper.getLocalTime(&targetTimeNow);
+
+ if (OK != res)
+ return;
+
+ if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
+ &mediaTimeNow)) {
+ return;
+ }
+ }
+
+ size_t trimIndex;
+ for (trimIndex = 0; trimIndex < mTimedBufferQueue.size(); trimIndex++) {
+ int64_t frameCount = mTimedBufferQueue[trimIndex].buffer()->size()
+ / mCblk->frameSize;
+ int64_t bufEnd;
+
+ if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
+ &bufEnd)) {
+ LOGE("Failed to convert frame count of %lld to media time duration"
+ " (scale factor %d/%u) in %s", frameCount,
+ mMediaTimeToSampleTransform.a_to_b_numer,
+ mMediaTimeToSampleTransform.a_to_b_denom,
+ __PRETTY_FUNCTION__);
+ break;
+ }
+ bufEnd += mTimedBufferQueue[trimIndex].pts();
+
+ if (bufEnd > mediaTimeNow)
+ break;
+
+ // Is the buffer we want to use in the middle of a mix operation right
+ // now? If so, don't actually trim it. Just wait for the releaseBuffer
+ // from the mixer which should be coming back shortly.
+ if (!trimIndex && mQueueHeadInFlight) {
+ mTrimQueueHeadOnRelease = true;
+ }
+ }
+
+ size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
+ if (trimStart < trimIndex) {
+ mTimedBufferQueue.removeItemsAt(trimStart, trimIndex);
+ }
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
+ const sp<IMemory>& buffer, int64_t pts) {
+
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+ if (!mMediaTimeTransformValid)
+ return INVALID_OPERATION;
+ }
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ mTimedBufferQueue.add(TimedBuffer(buffer, pts));
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
+ const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
+
+ LOGV("%s az=%lld bz=%lld n=%d d=%u tgt=%d", __PRETTY_FUNCTION__,
+ xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
+ target);
+
+ if (!(target == TimedAudioTrack::LOCAL_TIME ||
+ target == TimedAudioTrack::COMMON_TIME)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMediaTimeTransformLock);
+ mMediaTimeTransform = xform;
+ mMediaTimeTransformTarget = target;
+ mMediaTimeTransformValid = true;
+
+ return NO_ERROR;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+// implementation of getNextBuffer for tracks whose buffers have timestamps
+status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+ if (pts == AudioBufferProvider::kInvalidPTS) {
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ return INVALID_OPERATION;
+ }
+
+ // get ahold of the output stream that these samples will be written to
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread == NULL) {
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ return INVALID_OPERATION;
+ }
+ PlaybackThread* playbackThread = static_cast<PlaybackThread*>(thread.get());
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ LOG_ASSERT(!mQueueHeadInFlight,
+ "getNextBuffer called without releaseBuffer!");
+
+ while (true) {
+
+ // if we have no timed buffers, then fail
+ if (mTimedBufferQueue.isEmpty()) {
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ return NOT_ENOUGH_DATA;
+ }
+
+ TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+ // calculate the PTS of the head of the timed buffer queue expressed in
+ // local time
+ int64_t headLocalPTS;
+ {
+ Mutex::Autolock mttLock(mMediaTimeTransformLock);
+
+ assert(mMediaTimeTransformValid);
+
+ if (mMediaTimeTransform.a_to_b_denom == 0) {
+ // the transform represents a pause, so yield silence
+ timedYieldSilence_l(buffer->frameCount, buffer);
+ return NO_ERROR;
+ }
+
+ int64_t transformedPTS;
+ if (!mMediaTimeTransform.doForwardTransform(head.pts(),
+ &transformedPTS)) {
+ // the transform failed. this shouldn't happen, but if it does
+ // then just drop this buffer
+ LOGW("timedGetNextBuffer transform failed");
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ mTimedBufferQueue.removeAt(0);
+ return NO_ERROR;
+ }
+
+ if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
+ if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
+ &headLocalPTS)) {
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+ return INVALID_OPERATION;
+ }
+ } else {
+ headLocalPTS = transformedPTS;
+ }
+ }
+
+ // adjust the head buffer's PTS to reflect the portion of the head buffer
+ // that has already been consumed
+ int64_t effectivePTS = headLocalPTS +
+ ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate());
+
+ // Calculate the delta in samples between the head of the input buffer
+ // queue and the start of the next output buffer that will be written.
+ // If the transformation fails because of over or underflow, it means
+ // that the sample's position in the output stream is so far out of
+ // whack that it should just be dropped.
+ int64_t sampleDelta;
+ if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
+ LOGV("*** head buffer is too far from PTS: dropped buffer");
+ mTimedBufferQueue.removeAt(0);
+ continue;
+ }
+ if (!mLocalTimeToSampleTransform.doForwardTransform(
+ (effectivePTS - pts) << 32, &sampleDelta)) {
+ LOGV("*** too late during sample rate transform: dropped buffer");
+ mTimedBufferQueue.removeAt(0);
+ continue;
+ }
+
+ LOGV("*** %s head.pts=%lld head.pos=%d pts=%lld sampleDelta=[%d.%08x]",
+ __PRETTY_FUNCTION__, head.pts(), head.position(), pts,
+ static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) + (sampleDelta >> 32)),
+ static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
+
+ // if the delta between the ideal placement for the next input sample and
+ // the current output position is within this threshold, then we will
+ // concatenate the next input samples to the previous output
+ const int64_t kSampleContinuityThreshold =
+ (static_cast<int64_t>(sampleRate()) << 32) / 10;
+
+ // if this is the first buffer of audio that we're emitting from this track
+ // then it should be almost exactly on time.
+ const int64_t kSampleStartupThreshold = 1LL << 32;
+
+ if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
+ (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+ // the next input is close enough to being on time, so concatenate it
+ // with the last output
+ timedYieldSamples_l(buffer);
+
+ LOGV("*** on time: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+ return NO_ERROR;
+ } else if (sampleDelta > 0) {
+ // the gap between the current output position and the proper start of
+ // the next input sample is too big, so fill it with silence
+ uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
+
+ timedYieldSilence_l(framesUntilNextInput, buffer);
+ LOGV("*** silence: frameCount=%u", buffer->frameCount);
+ return NO_ERROR;
+ } else {
+ // the next input sample is late
+ uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
+ size_t onTimeSamplePosition =
+ head.position() + lateFrames * mCblk->frameSize;
+
+ if (onTimeSamplePosition > head.buffer()->size()) {
+ // all the remaining samples in the head are too late, so
+ // drop it and move on
+ LOGV("*** too late: dropped buffer");
+ mTimedBufferQueue.removeAt(0);
+ continue;
+ } else {
+ // skip over the late samples
+ head.setPosition(onTimeSamplePosition);
+
+ // yield the available samples
+ timedYieldSamples_l(buffer);
+
+ LOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+ return NO_ERROR;
+ }
+ }
+ }
+}
+
+// Yield samples from the timed buffer queue head up to the given output
+// buffer's capacity.
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
+ AudioBufferProvider::Buffer* buffer) {
+
+ const TimedBuffer& head = mTimedBufferQueue[0];
+
+ buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
+ head.position());
+
+ uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
+ mCblk->frameSize);
+ size_t framesRequested = buffer->frameCount;
+ buffer->frameCount = min(framesLeftInHead, framesRequested);
+
+ mQueueHeadInFlight = true;
+ mTimedAudioOutputOnTime = true;
+}
+
+// Yield samples of silence up to the given output buffer's capacity
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
+ uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
+
+ // lazily allocate a buffer filled with silence
+ if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) {
+ delete [] mTimedSilenceBuffer;
+ mTimedSilenceBufferSize = numFrames * mCblk->frameSize;
+ mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
+ memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+ }
+
+ buffer->raw = mTimedSilenceBuffer;
+ size_t framesRequested = buffer->frameCount;
+ buffer->frameCount = min(numFrames, framesRequested);
+
+ mTimedAudioOutputOnTime = false;
+}
+
+void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
+ AudioBufferProvider::Buffer* buffer) {
+
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ // If the buffer which was just released is part of the buffer at the head
+ // of the queue, be sure to update the amt of the buffer which has been
+ // consumed. If the buffer being returned is not part of the head of the
+ // queue, its either because the buffer is part of the silence buffer, or
+ // because the head of the timed queue was trimmed after the mixer called
+ // getNextBuffer but before the mixer called releaseBuffer.
+ if (buffer->raw == mTimedSilenceBuffer) {
+ LOG_ASSERT(!mQueueHeadInFlight,
+ "Queue head in flight during release of silence buffer!");
+ goto done;
+ }
+
+ LOG_ASSERT(mQueueHeadInFlight,
+ "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
+ " head in flight.");
+
+ if (mTimedBufferQueue.size()) {
+ TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+ void* start = head.buffer()->pointer();
+ void* end = reinterpret_cast<void*>(
+ reinterpret_cast<uint8_t*>(head.buffer()->pointer())
+ + head.buffer()->size());
+
+ LOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
+ "released buffer not within the head of the timed buffer"
+ " queue; qHead = [%p, %p], released buffer = %p",
+ start, end, buffer->raw);
+
+ head.setPosition(head.position() +
+ (buffer->frameCount * mCblk->frameSize));
+ mQueueHeadInFlight = false;
+
+ if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
+ || mTrimQueueHeadOnRelease) {
+ mTimedBufferQueue.removeAt(0);
+ mTrimQueueHeadOnRelease = false;
+ }
+ } else {
+ LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
+ " buffers in the timed buffer queue");
+ }
+
+done:
+ buffer->raw = 0;
+ buffer->frameCount = 0;
+}
+
+uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+ Mutex::Autolock _l(mTimedBufferQueueLock);
+
+ uint32_t frames = 0;
+ for (size_t i = 0; i < mTimedBufferQueue.size(); i++) {
+ const TimedBuffer& tb = mTimedBufferQueue[i];
+ frames += (tb.buffer()->size() - tb.position()) / mCblk->frameSize;
+ }
+
+ return frames;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
+ : mPTS(0), mPosition(0) {}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
+ const sp<IMemory>& buffer, int64_t pts)
+ : mBuffer(buffer), mPTS(pts), mPosition(0) {}
+
// ----------------------------------------------------------------------------
// RecordTrack constructor must be called with AudioFlinger::mLock held
@@ -3703,7 +4296,7 @@ AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
}
}
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesAvail;
@@ -4025,7 +4618,8 @@ AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
: RefBase(),
mAudioFlinger(audioFlinger),
mMemoryDealer(new MemoryDealer(1024*1024, "AudioFlinger::Client")),
- mPid(pid)
+ mPid(pid),
+ mTimedTrackCount(0)
{
// 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer
}
@@ -4041,6 +4635,31 @@ const sp<MemoryDealer>& AudioFlinger::Client::heap() const
return mMemoryDealer;
}
+// Reserve one of the limited slots for a timed audio track associated
+// with this client
+bool AudioFlinger::Client::reserveTimedTrack()
+{
+ const int kMaxTimedTracksPerClient = 4;
+
+ Mutex::Autolock _l(mTimedTrackLock);
+
+ if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
+ LOGW("can not create timed track - pid %d has exceeded the limit",
+ mPid);
+ return false;
+ }
+
+ mTimedTrackCount++;
+ return true;
+}
+
+// Release a slot for a timed audio track
+void AudioFlinger::Client::releaseTimedTrack()
+{
+ Mutex::Autolock _l(mTimedTrackLock);
+ mTimedTrackCount--;
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
@@ -4112,6 +4731,38 @@ status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
return mTrack->attachAuxEffect(EffectId);
}
+status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
+ sp<IMemory>* buffer) {
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->allocateTimedBuffer(size, buffer);
+}
+
+status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
+ int64_t pts) {
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->queueTimedBuffer(buffer, pts);
+}
+
+status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
+ const LinearTransform& xform, int target) {
+
+ if (!mTrack->isTimedTrack())
+ return INVALID_OPERATION;
+
+ PlaybackThread::TimedTrack* tt =
+ reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+ return tt->setMediaTimeTransform(
+ xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
+}
+
status_t AudioFlinger::TrackHandle::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
@@ -4349,7 +5000,8 @@ bool AudioFlinger::RecordThread::threadLoop()
}
buffer.frameCount = mFrameCount;
- if (LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+ if (LIKELY(mActiveTrack->getNextBuffer(
+ &buffer, AudioBufferProvider::kInvalidPTS) == NO_ERROR)) {
size_t framesOut = buffer.frameCount;
if (mResampler == 0) {
// no resampling
@@ -4628,7 +5280,7 @@ status_t AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
return NO_ERROR;
}
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
{
size_t framesReq = buffer->frameCount;
size_t framesReady = mFrameCount - mRsmpInIndex;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 9bd2c7f..0eed5f9 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -22,6 +22,8 @@
#include <sys/types.h>
#include <limits.h>
+#include <common_time/cc_helper.h>
+
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioTrack.h>
@@ -61,7 +63,7 @@ class AudioResampler;
// ----------------------------------------------------------------------------
-static const nsecs_t kStandbyTimeInNsecs = seconds(3);
+static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
class AudioFlinger :
public BinderService<AudioFlinger>,
@@ -84,6 +86,7 @@ public:
uint32_t flags,
const sp<IMemory>& sharedBuffer,
int output,
+ bool isTimed,
int *sessionId,
status_t *status);
@@ -97,6 +100,7 @@ public:
virtual status_t setMasterMute(bool muted);
virtual float masterVolume() const;
+ virtual float masterVolumeSW() const;
virtual bool masterMute() const;
virtual status_t setStreamVolume(int stream, float value, int output);
@@ -188,6 +192,7 @@ public:
AUDIO_HW_SET_MIC_MUTE,
AUDIO_SET_VOICE_VOLUME,
AUDIO_SET_PARAMETER,
+ AUDIO_HW_GET_MASTER_VOLUME,
};
// record interface
@@ -221,6 +226,11 @@ private:
audio_hw_device_t* findSuitableHwDev_l(uint32_t devices);
void purgeStaleEffects_l();
+ static Mutex mStaticInitLock;
+ static bool mStaticInitDone;
+ static bool doStaticInit();
+ static nsecs_t mStandbyTimeInNsecs;
+
// Internal dump utilites.
status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
status_t dumpClients(int fd, const Vector<String16>& args);
@@ -235,12 +245,18 @@ private:
pid_t pid() const { return mPid; }
sp<AudioFlinger> audioFlinger() { return mAudioFlinger; }
+ bool reserveTimedTrack();
+ void releaseTimedTrack();
+
private:
Client(const Client&);
Client& operator = (const Client&);
sp<AudioFlinger> mAudioFlinger;
sp<MemoryDealer> mMemoryDealer;
pid_t mPid;
+
+ Mutex mTimedTrackLock;
+ int mTimedTrackCount;
};
// --- Notification Client ---
@@ -346,7 +362,9 @@ private:
TrackBase(const TrackBase&);
TrackBase& operator = (const TrackBase&);
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
+ virtual status_t getNextBuffer(
+ AudioBufferProvider::Buffer* buffer,
+ int64_t pts) = 0;
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
uint32_t format() const {
@@ -611,7 +629,6 @@ private:
int16_t *mainBuffer() { return mMainBuffer; }
int auxEffectId() { return mAuxEffectId; }
-
protected:
friend class ThreadBase;
friend class TrackHandle;
@@ -622,7 +639,11 @@ private:
Track(const Track&);
Track& operator = (const Track&);
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ virtual status_t getNextBuffer(
+ AudioBufferProvider::Buffer* buffer,
+ int64_t pts);
+ virtual uint32_t framesReady() const;
+
bool isMuted() { return mMute; }
bool isPausing() const {
return mState == PAUSING;
@@ -638,6 +659,8 @@ private:
return (mStreamType == AUDIO_STREAM_CNT);
}
+ virtual bool isTimedTrack() const { return false; }
+
// we don't really need a lock for these
float mVolume[2];
volatile bool mMute;
@@ -655,6 +678,84 @@ private:
bool mHasVolumeController;
}; // end of Track
+ class TimedTrack : public Track {
+ public:
+ static sp<TimedTrack> create(const wp<ThreadBase>& thread,
+ const sp<Client>& client,
+ int streamType,
+ uint32_t sampleRate,
+ uint32_t format,
+ uint32_t channelMask,
+ int frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId);
+ ~TimedTrack();
+
+ class TimedBuffer {
+ public:
+ TimedBuffer();
+ TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+ const sp<IMemory>& buffer() const { return mBuffer; }
+ int64_t pts() const { return mPTS; }
+ int position() const { return mPosition; }
+ void setPosition(int pos) { mPosition = pos; }
+ private:
+ sp<IMemory> mBuffer;
+ int64_t mPTS;
+ int mPosition;
+ };
+
+ virtual bool isTimedTrack() const { return true; }
+
+ virtual uint32_t framesReady() const;
+
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts);
+ virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+ void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
+ void timedYieldSilence_l(uint32_t numFrames,
+ AudioBufferProvider::Buffer* buffer);
+
+ status_t allocateTimedBuffer(size_t size,
+ sp<IMemory>* buffer);
+ status_t queueTimedBuffer(const sp<IMemory>& buffer,
+ int64_t pts);
+ status_t setMediaTimeTransform(const LinearTransform& xform,
+ TimedAudioTrack::TargetTimeline target);
+ void trimTimedBufferQueue_l();
+
+ private:
+ TimedTrack(const wp<ThreadBase>& thread,
+ const sp<Client>& client,
+ int streamType,
+ uint32_t sampleRate,
+ uint32_t format,
+ uint32_t channelMask,
+ int frameCount,
+ const sp<IMemory>& sharedBuffer,
+ int sessionId);
+
+ uint64_t mLocalTimeFreq;
+ LinearTransform mLocalTimeToSampleTransform;
+ LinearTransform mMediaTimeToSampleTransform;
+ sp<MemoryDealer> mTimedMemoryDealer;
+
+ Vector<TimedBuffer> mTimedBufferQueue;
+ bool mQueueHeadInFlight;
+ bool mTrimQueueHeadOnRelease;
+
+ uint8_t* mTimedSilenceBuffer;
+ uint32_t mTimedSilenceBufferSize;
+ mutable Mutex mTimedBufferQueueLock;
+ bool mTimedAudioOutputOnTime;
+ CCHelper mCCHelper;
+
+ Mutex mMediaTimeTransformLock;
+ LinearTransform mMediaTimeTransform;
+ bool mMediaTimeTransformValid;
+ TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
+ };
+
// playback track
class OutputTrack : public Track {
@@ -728,6 +829,7 @@ private:
int frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
+ bool isTimed,
status_t *status);
AudioStreamOut* getOutput();
@@ -917,6 +1019,12 @@ private:
virtual void setVolume(float left, float right);
virtual sp<IMemory> getCblk() const;
virtual status_t attachAuxEffect(int effectId);
+ virtual status_t allocateTimedBuffer(size_t size,
+ sp<IMemory>* buffer);
+ virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
+ int64_t pts);
+ virtual status_t setMediaTimeTransform(const LinearTransform& xform,
+ int target);
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
private:
@@ -964,7 +1072,9 @@ private:
RecordTrack(const RecordTrack&);
RecordTrack& operator = (const RecordTrack&);
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ virtual status_t getNextBuffer(
+ AudioBufferProvider::Buffer* buffer,
+ int64_t pts);
bool mOverflow;
};
@@ -1000,7 +1110,8 @@ private:
AudioStreamIn* clearInput();
virtual audio_stream_t* stream();
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+ int64_t pts);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
virtual bool checkForNewParameters_l();
virtual String8 getParameters(const String8& keys);
@@ -1384,6 +1495,12 @@ private:
friend class RecordThread;
friend class PlaybackThread;
+ enum master_volume_support {
+ MVS_NONE,
+ MVS_SETONLY,
+ MVS_FULL,
+ };
+
mutable Mutex mLock;
DefaultKeyedVector< pid_t, wp<Client> > mClients;
@@ -1397,6 +1514,8 @@ private:
DefaultKeyedVector< int, sp<PlaybackThread> > mPlaybackThreads;
PlaybackThread::stream_type_t mStreamTypes[AUDIO_STREAM_CNT];
float mMasterVolume;
+ float mMasterVolumeSW;
+ master_volume_support mMasterVolumeSupportLvl;
bool mMasterMute;
DefaultKeyedVector< int, sp<RecordThread> > mRecordThreads;
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 1200f75..98415b1 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "AudioMixer"
//#define LOG_NDEBUG 0
+#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
@@ -30,6 +31,9 @@
#include <system/audio.h>
+#include <common_time/local_clock.h>
+#include <common_time/cc_helper.h>
+
#include "AudioMixer.h"
namespace android {
@@ -47,6 +51,9 @@ static inline int16_t clamp16(int32_t sample)
AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate)
: mActiveTrack(0), mTrackNames(0), mSampleRate(sampleRate)
{
+ LocalClock lc;
+ mLocalTimeFreq = lc.getLocalFreq();
+
mState.enabledTracks= 0;
mState.needsChanged = 0;
mState.frameCount = frameCount;
@@ -74,6 +81,7 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate)
t->in = 0;
t->mainBuffer = NULL;
t->auxBuffer = NULL;
+ t->localTimeFreq = mLocalTimeFreq;
t++;
}
}
@@ -293,6 +301,7 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
if (resampler == 0) {
resampler = AudioResampler::create(
format, channelCount, devSampleRate);
+ resampler->setLocalTimeFreq(localTimeFreq);
}
return true;
}
@@ -357,13 +366,13 @@ status_t AudioMixer::setBufferProvider(AudioBufferProvider* buffer)
-void AudioMixer::process()
+void AudioMixer::process(int64_t pts)
{
- mState.hook(&mState);
+ mState.hook(&mState, pts);
}
-void AudioMixer::process__validate(state_t* state)
+void AudioMixer::process__validate(state_t* state, int64_t pts)
{
LOGW_IF(!state->needsChanged,
"in process__validate() but nothing's invalid");
@@ -467,7 +476,7 @@ void AudioMixer::process__validate(state_t* state)
countActiveTracks, state->enabledTracks,
all16BitsStereoNoResample, resampling, volumeRamp);
- state->hook(state);
+ state->hook(state, pts);
// Now that the volume ramp has been done, set optimal state and
// track hooks for subsequent mixer process
@@ -876,7 +885,7 @@ void AudioMixer::ditherAndClamp(int32_t* out, int32_t const *sums, size_t c)
}
// no-op case
-void AudioMixer::process__nop(state_t* state)
+void AudioMixer::process__nop(state_t* state, int64_t pts)
{
uint32_t e0 = state->enabledTracks;
size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
@@ -906,7 +915,9 @@ void AudioMixer::process__nop(state_t* state)
size_t outFrames = state->frameCount;
while (outFrames) {
t1.buffer.frameCount = outFrames;
- t1.bufferProvider->getNextBuffer(&t1.buffer);
+ int64_t outputPTS = calculateOutputPTS(
+ t1, pts, state->frameCount - outFrames);
+ t1.bufferProvider->getNextBuffer(&t1.buffer, outputPTS);
if (!t1.buffer.raw) break;
outFrames -= t1.buffer.frameCount;
t1.bufferProvider->releaseBuffer(&t1.buffer);
@@ -916,7 +927,7 @@ void AudioMixer::process__nop(state_t* state)
}
// generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
+void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
{
int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
@@ -928,7 +939,7 @@ void AudioMixer::process__genericNoResampling(state_t* state)
e0 &= ~(1<<i);
track_t& t = state->tracks[i];
t.buffer.frameCount = state->frameCount;
- t.bufferProvider->getNextBuffer(&t.buffer);
+ t.bufferProvider->getNextBuffer(&t.buffer, pts);
t.frameCount = t.buffer.frameCount;
t.in = t.buffer.raw;
// t.in == NULL can happen if the track was flushed just after having
@@ -982,7 +993,9 @@ void AudioMixer::process__genericNoResampling(state_t* state)
if (t.frameCount == 0 && outFrames) {
t.bufferProvider->releaseBuffer(&t.buffer);
t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
- t.bufferProvider->getNextBuffer(&t.buffer);
+ int64_t outputPTS = calculateOutputPTS(
+ t, pts, numFrames + (BLOCKSIZE - outFrames));
+ t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
t.in = t.buffer.raw;
if (t.in == NULL) {
enabledTracks &= ~(1<<i);
@@ -1011,7 +1024,7 @@ void AudioMixer::process__genericNoResampling(state_t* state)
// generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
+void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
{
int32_t* const outTemp = state->outputTemp;
const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount;
@@ -1050,6 +1063,7 @@ void AudioMixer::process__genericResampling(state_t* state)
// acquire/release the buffers because it's done by
// the resampler.
if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
+ t.resampler->setPTS(pts);
(t.hook)(&t, outTemp, numFrames, state->resampleTemp, aux);
} else {
@@ -1057,7 +1071,8 @@ void AudioMixer::process__genericResampling(state_t* state)
while (outFrames < numFrames) {
t.buffer.frameCount = numFrames - outFrames;
- t.bufferProvider->getNextBuffer(&t.buffer);
+ int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
+ t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
t.in = t.buffer.raw;
// t.in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
@@ -1077,7 +1092,8 @@ void AudioMixer::process__genericResampling(state_t* state)
}
// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
+ int64_t pts)
{
const int i = 31 - __builtin_clz(state->enabledTracks);
const track_t& t = state->tracks[i];
@@ -1092,7 +1108,8 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
const uint32_t vrl = t.volumeRL;
while (numFrames) {
b.frameCount = numFrames;
- t.bufferProvider->getNextBuffer(&b);
+ int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
+ t.bufferProvider->getNextBuffer(&b, outputPTS);
int16_t const *in = b.i16;
// in == NULL can happen if the track was flushed just after having
@@ -1135,7 +1152,8 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
// 2 tracks is also a common case
// NEVER used in current implementation of process__validate()
// only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
+ int64_t pts)
{
int i;
uint32_t en = state->enabledTracks;
@@ -1169,7 +1187,9 @@ void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
if (frameCount0 == 0) {
b0.frameCount = numFrames;
- t0.bufferProvider->getNextBuffer(&b0);
+ int64_t outputPTS = calculateOutputPTS(t0, pts,
+ out - t0.mainBuffer);
+ t0.bufferProvider->getNextBuffer(&b0, outputPTS);
if (b0.i16 == NULL) {
if (buff == NULL) {
buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1183,7 +1203,9 @@ void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
}
if (frameCount1 == 0) {
b1.frameCount = numFrames;
- t1.bufferProvider->getNextBuffer(&b1);
+ int64_t outputPTS = calculateOutputPTS(t1, pts,
+ out - t0.mainBuffer);
+ t1.bufferProvider->getNextBuffer(&b1, outputPTS);
if (b1.i16 == NULL) {
if (buff == NULL) {
buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1230,6 +1252,11 @@ void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
}
}
+int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
+ int outputFrameIndex)
+{
+ return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate);
+}
+
// ----------------------------------------------------------------------------
}; // namespace android
-
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 0137185..200ad03 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -85,7 +85,7 @@ public:
status_t setParameter(int target, int name, void *value);
status_t setBufferProvider(AudioBufferProvider* bufferProvider);
- void process();
+ void process(int64_t pts);
uint32_t trackNames() const { return mTrackNames; }
@@ -127,7 +127,7 @@ private:
struct state_t;
struct track_t;
- typedef void (*mix_t)(state_t* state);
+ typedef void (*mix_t)(state_t* state, int64_t pts);
typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
static const int BLOCKSIZE = 16; // 4 cache lines
@@ -165,6 +165,8 @@ private:
int32_t* mainBuffer;
int32_t* auxBuffer;
+ uint64_t localTimeFreq;
+
bool setResampler(uint32_t sampleRate, uint32_t devSampleRate);
bool doesResample() const;
void resetResampler();
@@ -188,6 +190,8 @@ private:
uint32_t mTrackNames;
const uint32_t mSampleRate;
+ int64_t mLocalTimeFreq;
+
state_t mState __attribute__((aligned(32)));
void invalidateState(uint32_t mask);
@@ -199,12 +203,17 @@ private:
static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
- static void process__validate(state_t* state);
- static void process__nop(state_t* state);
- static void process__genericNoResampling(state_t* state);
- static void process__genericResampling(state_t* state);
- static void process__OneTrack16BitsStereoNoResampling(state_t* state);
- static void process__TwoTracks16BitsStereoNoResampling(state_t* state);
+ static void process__validate(state_t* state, int64_t pts);
+ static void process__nop(state_t* state, int64_t pts);
+ static void process__genericNoResampling(state_t* state, int64_t pts);
+ static void process__genericResampling(state_t* state, int64_t pts);
+ static void process__OneTrack16BitsStereoNoResampling(state_t* state,
+ int64_t pts);
+ static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
+ int64_t pts);
+
+ static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
+ int outputFrameIndex);
};
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 9ee5a30..cd2857f 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -59,10 +59,12 @@ private:
#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
void AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement);
+ uint32_t &phaseFraction, uint32_t phaseIncrement)
+ __attribute__((noinline));
void AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement);
+ uint32_t &phaseFraction, uint32_t phaseIncrement)
+ __attribute__((noinline));
#endif // ASM_ARM_RESAMP1
static inline int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
@@ -118,7 +120,8 @@ AudioResampler::AudioResampler(int bitDepth, int inChannelCount,
int32_t sampleRate) :
mBitDepth(bitDepth), mChannelCount(inChannelCount),
mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
- mPhaseFraction(0) {
+ mPhaseFraction(0), mLocalTimeFreq(0),
+ mPTS(AudioBufferProvider::kInvalidPTS) {
// sanity check on format
if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) {
LOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
@@ -152,6 +155,23 @@ void AudioResampler::setVolume(int16_t left, int16_t right) {
mVolume[1] = right;
}
+void AudioResampler::setLocalTimeFreq(uint64_t freq) {
+ mLocalTimeFreq = freq;
+}
+
+void AudioResampler::setPTS(int64_t pts) {
+ mPTS = pts;
+}
+
+int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
+
+ if (mPTS == AudioBufferProvider::kInvalidPTS) {
+ return AudioBufferProvider::kInvalidPTS;
+ } else {
+ return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
+ }
+}
+
void AudioResampler::reset() {
mInputIndex = 0;
mPhaseFraction = 0;
@@ -198,7 +218,8 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer,
+ calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
goto resampleStereo16_exit;
}
@@ -292,7 +313,8 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount,
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer,
+ calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL) {
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
@@ -602,4 +624,3 @@ void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32
// ----------------------------------------------------------------------------
}
; // namespace android
-
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index ffa690a..2b1488b 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -49,6 +49,10 @@ public:
virtual void init() = 0;
virtual void setSampleRate(int32_t inSampleRate);
virtual void setVolume(int16_t left, int16_t right);
+ virtual void setLocalTimeFreq(uint64_t freq);
+
+ // set the PTS of the next buffer output by the resampler
+ virtual void setPTS(int64_t pts);
virtual void resample(int32_t* out, size_t outFrameCount,
AudioBufferProvider* provider) = 0;
@@ -73,6 +77,8 @@ protected:
AudioResampler(const AudioResampler&);
AudioResampler& operator=(const AudioResampler&);
+ int64_t calculateOutputPTS(int outputFrameIndex);
+
int32_t mBitDepth;
int32_t mChannelCount;
int32_t mSampleRate;
@@ -87,6 +93,8 @@ protected:
size_t mInputIndex;
int32_t mPhaseIncrement;
uint32_t mPhaseFraction;
+ uint64_t mLocalTimeFreq;
+ int64_t mPTS;
};
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 4d721f6..e864b30 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -65,7 +65,7 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer, mPTS);
if (mBuffer.raw == NULL)
return;
// LOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -95,7 +95,8 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer,
+ calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL)
goto save_state; // ugly, but efficient
in = mBuffer.i16;
@@ -130,7 +131,7 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer, mPTS);
if (mBuffer.raw == NULL)
return;
// LOGW("New buffer: offset=%p, frames=%d\n", mBuffer.raw, mBuffer.frameCount);
@@ -160,7 +161,8 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
+ provider->getNextBuffer(&mBuffer,
+ calculateOutputPTS(outputIndex / 2));
if (mBuffer.raw == NULL)
goto save_state; // ugly, but efficient
// LOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -181,4 +183,3 @@ save_state:
// ----------------------------------------------------------------------------
}
; // namespace android
-
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 9e5e254..6184795 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -204,7 +204,8 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
// buffer is empty, fetch a new one
while (buffer.frameCount == 0) {
buffer.frameCount = inFrameCount;
- provider->getNextBuffer(&buffer);
+ provider->getNextBuffer(&buffer,
+ calculateOutputPTS(outputIndex / 2));
if (buffer.raw == NULL) {
goto resample_exit;
}
@@ -355,4 +356,3 @@ void AudioResamplerSinc::interpolate(
// ----------------------------------------------------------------------------
}; // namespace android
-