diff options
Diffstat (limited to 'services/audioflinger')
56 files changed, 13284 insertions, 5161 deletions
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk index b895027..697fb37 100644 --- a/services/audioflinger/Android.mk +++ b/services/audioflinger/Android.mk @@ -13,18 +13,28 @@ include $(BUILD_STATIC_LIBRARY) include $(CLEAR_VARS) +LOCAL_SRC_FILES := \ + ServiceUtilities.cpp + +# FIXME Move this library to frameworks/native +LOCAL_MODULE := libserviceutility + +include $(BUILD_STATIC_LIBRARY) + +include $(CLEAR_VARS) + LOCAL_SRC_FILES:= \ AudioFlinger.cpp \ Threads.cpp \ Tracks.cpp \ Effects.cpp \ AudioMixer.cpp.arm \ - AudioPolicyService.cpp \ - ServiceUtilities.cpp \ + PatchPanel.cpp LOCAL_SRC_FILES += StateQueue.cpp LOCAL_C_INCLUDES := \ + $(TOPDIR)frameworks/av/services/audiopolicy \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) @@ -46,12 +56,15 @@ LOCAL_SHARED_LIBRARIES := \ LOCAL_STATIC_LIBRARIES := \ libscheduling_policy \ libcpustats \ - libmedia_helper + libmedia_helper \ + libserviceutility LOCAL_MODULE:= libaudioflinger LOCAL_32_BIT_ONLY := true LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp +LOCAL_SRC_FILES += FastThread.cpp FastThreadState.cpp +LOCAL_SRC_FILES += FastCapture.cpp FastCaptureState.cpp LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"' @@ -72,10 +85,21 @@ include $(BUILD_SHARED_LIBRARY) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ - test-resample.cpp \ + test-resample.cpp \ + +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) + +LOCAL_STATIC_LIBRARIES := \ + libsndfile LOCAL_SHARED_LIBRARIES := \ libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog LOCAL_MODULE:= test-resample @@ -88,7 +112,11 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ AudioResampler.cpp.arm \ AudioResamplerCubic.cpp.arm \ - AudioResamplerSinc.cpp.arm + AudioResamplerSinc.cpp.arm \ + AudioResamplerDyn.cpp.arm + +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) LOCAL_SHARED_LIBRARIES := \ libcutils \ diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index 33b19a4..e48af20 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -82,6 +82,7 @@ namespace android { static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n"; static const char kHardwareLockedString[] = "Hardware lock is taken\n"; +static const char kClientLockedString[] = "Client lock is taken\n"; nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs; @@ -104,6 +105,36 @@ static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200); // ---------------------------------------------------------------------------- +const char *formatToString(audio_format_t format) { + switch (format & AUDIO_FORMAT_MAIN_MASK) { + case AUDIO_FORMAT_PCM: + switch (format) { + case AUDIO_FORMAT_PCM_16_BIT: return "pcm16"; + case AUDIO_FORMAT_PCM_8_BIT: return "pcm8"; + case AUDIO_FORMAT_PCM_32_BIT: return "pcm32"; + case AUDIO_FORMAT_PCM_8_24_BIT: return "pcm8.24"; + case AUDIO_FORMAT_PCM_FLOAT: return "pcmfloat"; + case AUDIO_FORMAT_PCM_24_BIT_PACKED: return "pcm24"; + default: + break; + } + break; + case AUDIO_FORMAT_MP3: return "mp3"; + case AUDIO_FORMAT_AMR_NB: return "amr-nb"; + case AUDIO_FORMAT_AMR_WB: return "amr-wb"; + case AUDIO_FORMAT_AAC: return "aac"; + case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1"; + case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2"; + case AUDIO_FORMAT_VORBIS: return "vorbis"; + case AUDIO_FORMAT_OPUS: return "opus"; + case AUDIO_FORMAT_AC3: return "ac-3"; + case AUDIO_FORMAT_E_AC3: return "e-ac-3"; + default: + break; + } + return "unknown"; +} + static int load_audio_interface(const char *if_name, audio_hw_device_t **dev) { const hw_module_t *mod; @@ -121,7 +152,7 @@ static int load_audio_interface(const char *if_name, audio_hw_device_t **dev) if (rc) { goto out; } - if ((*dev)->common.version != AUDIO_DEVICE_API_VERSION_CURRENT) { + if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) { ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version); rc = BAD_VALUE; goto out; @@ -138,6 +169,7 @@ out: AudioFlinger::AudioFlinger() : BnAudioFlinger(), mPrimaryHardwareDev(NULL), + mAudioHwDevs(NULL), mHardwareStatus(AUDIO_HW_IDLE), mMasterVolume(1.0f), mMasterMute(false), @@ -146,14 +178,16 @@ AudioFlinger::AudioFlinger() mBtNrecIsOff(false), mIsLowRamDevice(true), mIsDeviceTypeKnown(false), - mGlobalEffectEnableTime(0) + mGlobalEffectEnableTime(0), + mPrimaryOutputSampleRate(0) { getpid_cached = getpid(); char value[PROPERTY_VALUE_MAX]; bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); if (doLog) { - mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters"); + mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY); } + #ifdef TEE_SINK (void) property_get("ro.debuggable", value, "0"); int debuggable = atoi(value); @@ -162,12 +196,16 @@ AudioFlinger::AudioFlinger() (void) property_get("af.tee", value, "0"); teeEnabled = atoi(value); } - if (teeEnabled & 1) + // FIXME symbolic constants here + if (teeEnabled & 1) { mTeeSinkInputEnabled = true; - if (teeEnabled & 2) + } + if (teeEnabled & 2) { mTeeSinkOutputEnabled = true; - if (teeEnabled & 4) + } + if (teeEnabled & 4) { mTeeSinkTrackEnabled = true; + } #endif } @@ -191,6 +229,8 @@ void AudioFlinger::onFirstRef() } } + mPatchPanel = new PatchPanel(this); + mMode = AUDIO_MODE_NORMAL; } @@ -210,6 +250,18 @@ AudioFlinger::~AudioFlinger() audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice()); delete mAudioHwDevs.valueAt(i); } + + // Tell media.log service about any old writers that still need to be unregistered + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory()); + mUnregisteredWriters.pop(); + mediaLogService->unregisterWriter(iMemory); + } + } + } static const char * const audio_interfaces[] = { @@ -249,7 +301,7 @@ AudioFlinger::AudioHwDevice* AudioFlinger::findSuitableHwDev_l( return NULL; } -void AudioFlinger::dumpClients(int fd, const Vector<String16>& args) +void AudioFlinger::dumpClients(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -271,17 +323,17 @@ void AudioFlinger::dumpClients(int fd, const Vector<String16>& args) } result.append("Global session refs:\n"); - result.append(" session pid count\n"); + result.append(" session pid count\n"); for (size_t i = 0; i < mAudioSessionRefs.size(); i++) { AudioSessionRef *r = mAudioSessionRefs[i]; - snprintf(buffer, SIZE, " %7d %3d %3d\n", r->mSessionid, r->mPid, r->mCnt); + snprintf(buffer, SIZE, " %7d %5d %5d\n", r->mSessionid, r->mPid, r->mCnt); result.append(buffer); } write(fd, result.string(), result.size()); } -void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args) +void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -296,7 +348,7 @@ void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args) write(fd, result.string(), result.size()); } -void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args) +void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -344,7 +396,16 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) write(fd, result.string(), result.size()); } + bool clientLocked = dumpTryLock(mClientLock); + if (!clientLocked) { + String8 result(kClientLockedString); + write(fd, result.string(), result.size()); + } dumpClients(fd, args); + if (clientLocked) { + mClientLock.unlock(); + } + dumpInternals(fd, args); // dump playback threads @@ -357,6 +418,13 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) mRecordThreads.valueAt(i)->dump(fd, args); } + // dump orphan effect chains + if (mOrphanEffectChains.size() != 0) { + write(fd, " Orphan Effect Chains\n", strlen(" Orphan Effect Chains\n")); + for (size_t i = 0; i < mOrphanEffectChains.size(); i++) { + mOrphanEffectChains.valueAt(i)->dump(fd, args); + } + } // dump all hardware devs for (size_t i = 0; i < mAudioHwDevs.size(); i++) { audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice(); @@ -388,8 +456,9 @@ status_t AudioFlinger::dump(int fd, const Vector<String16>& args) return NO_ERROR; } -sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid) +sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid) { + Mutex::Autolock _cl(mClientLock); // If pid is already in the mClients wp<> map, then use that entry // (for which promote() is always != 0), otherwise create a new entry and Client. sp<Client> client = mClients.valueFor(pid).promote(); @@ -403,16 +472,44 @@ sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid) sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name) { + // If there is no memory allocated for logs, return a dummy writer that does nothing if (mLogMemoryDealer == 0) { return new NBLog::Writer(); } - sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); - sp<NBLog::Writer> writer = new NBLog::Writer(size, shared); sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->registerWriter(shared, size, name); + // Similarly if we can't contact the media.log service, also return a dummy writer + if (binder == 0) { + return new NBLog::Writer(); + } + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + // If allocation fails, consult the vector of previously unregistered writers + // and garbage-collect one or more them until an allocation succeeds + if (shared == 0) { + Mutex::Autolock _l(mUnregisteredWritersLock); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + { + // Pick the oldest stale writer to garbage-collect + sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory()); + mUnregisteredWriters.removeAt(0); + mediaLogService->unregisterWriter(iMemory); + // Now the media.log remote reference to IMemory is gone. When our last local + // reference to IMemory also drops to zero at end of this block, + // the IMemory destructor will deallocate the region from mLogMemoryDealer. + } + // Re-attempt the allocation + shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + if (shared != 0) { + goto success; + } + } + // Even after garbage-collecting all old writers, there is still not enough memory, + // so return a dummy writer + return new NBLog::Writer(); } - return writer; +success: + mediaLogService->registerWriter(shared, size, name); + return new NBLog::Writer(size, shared); } void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) @@ -424,13 +521,10 @@ void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) if (iMemory == 0) { return; } - sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->unregisterWriter(iMemory); - // Now the media.log remote reference to IMemory is gone. - // When our last local reference to IMemory also drops to zero, - // the IMemory destructor will deallocate the region from mMemoryDealer. - } + // Rather than removing the writer immediately, append it to a queue of old writers to + // be garbage-collected later. This allows us to continue to view old logs for a while. + Mutex::Autolock _l(mUnregisteredWritersLock); + mUnregisteredWriters.push(writer); } // IAudioFlinger interface @@ -441,13 +535,12 @@ sp<IAudioTrack> AudioFlinger::createTrack( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, int *sessionId, - String8& name, int clientUid, status_t *status) { @@ -465,10 +558,29 @@ sp<IAudioTrack> AudioFlinger::createTrack( goto Exit; } - // client is responsible for conversion of 8-bit PCM to 16-bit PCM, - // and we don't yet support 8.24 or 32-bit PCM - if (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("createTrack() invalid format %d", format); + // further sample rate checks are performed by createTrack_l() depending on the thread type + if (sampleRate == 0) { + ALOGE("createTrack() invalid sample rate %u", sampleRate); + lStatus = BAD_VALUE; + goto Exit; + } + + // further channel mask checks are performed by createTrack_l() depending on the thread type + if (!audio_is_output_channel(channelMask)) { + ALOGE("createTrack() invalid channel mask %#x", channelMask); + lStatus = BAD_VALUE; + goto Exit; + } + + // further format checks are performed by createTrack_l() depending on the thread type + if (!audio_is_valid_format(format)) { + ALOGE("createTrack() invalid format %#x", format); + lStatus = BAD_VALUE; + goto Exit; + } + + if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) { + ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()"); lStatus = BAD_VALUE; goto Exit; } @@ -476,7 +588,6 @@ sp<IAudioTrack> AudioFlinger::createTrack( { Mutex::Autolock _l(mLock); PlaybackThread *thread = checkPlaybackThread_l(output); - PlaybackThread *effectThread = NULL; if (thread == NULL) { ALOGE("no playback thread found for output handle %d", output); lStatus = BAD_VALUE; @@ -484,24 +595,23 @@ sp<IAudioTrack> AudioFlinger::createTrack( } pid_t pid = IPCThreadState::self()->getCallingPid(); + client = registerPid(pid); - client = registerPid_l(pid); - - ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId); - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + PlaybackThread *effectThread = NULL; + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { + lSessionId = *sessionId; // check if an effect chain with the same session ID is present on another // output thread and move it here. for (size_t i = 0; i < mPlaybackThreads.size(); i++) { sp<PlaybackThread> t = mPlaybackThreads.valueAt(i); if (mPlaybackThreads.keyAt(i) != output) { - uint32_t sessions = t->hasAudioSession(*sessionId); + uint32_t sessions = t->hasAudioSession(lSessionId); if (sessions & PlaybackThread::EFFECT_SESSION) { effectThread = t.get(); break; } } } - lSessionId = *sessionId; } else { // if no audio session id is provided, create one here lSessionId = nextUniqueId(); @@ -519,6 +629,7 @@ sp<IAudioTrack> AudioFlinger::createTrack( // move effect chain to this output thread if an effect on same session was waiting // for a track to be created if (lStatus == NO_ERROR && effectThread != NULL) { + // no risk of deadlock because AudioFlinger::mLock is held Mutex::Autolock _dl(thread->mLock); Mutex::Autolock _sl(effectThread->mLock); moveEffectChain_l(lSessionId, effectThread, thread, true); @@ -538,23 +649,27 @@ sp<IAudioTrack> AudioFlinger::createTrack( } } } + } - if (lStatus == NO_ERROR) { - // s for server's pid, n for normal mixer name, f for fast index - name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0, - track->fastIndex()); - trackHandle = new TrackHandle(track); - } else { - // remove local strong reference to Client before deleting the Track so that the Client - // destructor is called by the TrackBase destructor with mLock held - client.clear(); + + if (lStatus != NO_ERROR) { + // remove local strong reference to Client before deleting the Track so that the + // Client destructor is called by the TrackBase destructor with mClientLock held + // Don't hold mClientLock when releasing the reference on the track as the + // destructor will acquire it. + { + Mutex::Autolock _cl(mClientLock); + client.clear(); + } track.clear(); + goto Exit; } + // return handle to client + trackHandle = new TrackHandle(track); + Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return trackHandle; } @@ -569,17 +684,6 @@ uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const return thread->sampleRate(); } -int AudioFlinger::channelCount(audio_io_handle_t output) const -{ - Mutex::Autolock _l(mLock); - PlaybackThread *thread = checkPlaybackThread_l(output); - if (thread == NULL) { - ALOGW("channelCount() unknown thread %d", output); - return 0; - } - return thread->channelCount(); -} - audio_format_t AudioFlinger::format(audio_io_handle_t output) const { Mutex::Autolock _l(mLock); @@ -699,9 +803,14 @@ status_t AudioFlinger::setMicMute(bool state) } AutoMutex lock(mHardwareLock); - audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice(); mHardwareStatus = AUDIO_HW_SET_MIC_MUTE; - ret = dev->set_mic_mute(dev, state); + for (size_t i = 0; i < mAudioHwDevs.size(); i++) { + audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice(); + status_t result = dev->set_mic_mute(dev, state); + if (result != NO_ERROR) { + ret = result; + } + } mHardwareStatus = AUDIO_HW_IDLE; return ret; } @@ -796,7 +905,7 @@ status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value, AutoMutex lock(mLock); PlaybackThread *thread = NULL; - if (output) { + if (output != AUDIO_IO_HANDLE_NONE) { thread = checkPlaybackThread_l(output); if (thread == NULL) { return BAD_VALUE; @@ -845,7 +954,7 @@ float AudioFlinger::streamVolume(audio_stream_type_t stream, audio_io_handle_t o AutoMutex lock(mLock); float volume; - if (output) { + if (output != AUDIO_IO_HANDLE_NONE) { PlaybackThread *thread = checkPlaybackThread_l(output); if (thread == NULL) { return 0.0f; @@ -878,8 +987,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& return PERMISSION_DENIED; } - // ioHandle == 0 means the parameters are global to the audio hardware interface - if (ioHandle == 0) { + // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface + if (ioHandle == AUDIO_IO_HANDLE_NONE) { Mutex::Autolock _l(mLock); status_t final_result = NO_ERROR; { @@ -961,7 +1070,7 @@ String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& k Mutex::Autolock _l(mLock); - if (ioHandle == 0) { + if (ioHandle == AUDIO_IO_HANDLE_NONE) { String8 out_s8; for (size_t i = 0; i < mAudioHwDevs.size(); i++) { @@ -1000,7 +1109,7 @@ size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t form AutoMutex lock(mHardwareLock); mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE; - struct audio_config config; + audio_config_t config; memset(&config, 0, sizeof(config)); config.sample_rate = sampleRate; config.channel_mask = channelMask; @@ -1061,21 +1170,32 @@ status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrame void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client) { - Mutex::Autolock _l(mLock); + if (client == 0) { + return; + } + bool clientAdded = false; + { + Mutex::Autolock _cl(mClientLock); - pid_t pid = IPCThreadState::self()->getCallingPid(); - if (mNotificationClients.indexOfKey(pid) < 0) { - sp<NotificationClient> notificationClient = new NotificationClient(this, - client, - pid); - ALOGV("registerClient() client %p, pid %d", notificationClient.get(), pid); + pid_t pid = IPCThreadState::self()->getCallingPid(); + if (mNotificationClients.indexOfKey(pid) < 0) { + sp<NotificationClient> notificationClient = new NotificationClient(this, + client, + pid); + ALOGV("registerClient() client %p, pid %d", notificationClient.get(), pid); - mNotificationClients.add(pid, notificationClient); + mNotificationClients.add(pid, notificationClient); - sp<IBinder> binder = client->asBinder(); - binder->linkToDeath(notificationClient); + sp<IBinder> binder = client->asBinder(); + binder->linkToDeath(notificationClient); + clientAdded = true; + } + } + // mClientLock should not be held here because ThreadBase::sendIoConfigEvent() will lock the + // ThreadBase mutex and the locking order is ThreadBase::mLock then AudioFlinger::mClientLock. + if (clientAdded) { // the config change is always sent from playback or record threads to avoid deadlock // with AudioSystem::gLock for (size_t i = 0; i < mPlaybackThreads.size(); i++) { @@ -1091,8 +1211,10 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client) void AudioFlinger::removeNotificationClient(pid_t pid) { Mutex::Autolock _l(mLock); - - mNotificationClients.removeItem(pid); + { + Mutex::Autolock _cl(mClientLock); + mNotificationClients.removeItem(pid); + } ALOGV("%d died, releasing its sessions", pid); size_t num = mAudioSessionRefs.size(); @@ -1115,17 +1237,18 @@ void AudioFlinger::removeNotificationClient(pid_t pid) } } -// audioConfigChanged_l() must be called with AudioFlinger::mLock held -void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2) +void AudioFlinger::audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) { + Mutex::Autolock _l(mClientLock); size_t size = mNotificationClients.size(); for (size_t i = 0; i < size; i++) { - mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioHandle, - param2); + mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, + ioHandle, + param2); } } -// removeClient_l() must be called with AudioFlinger::mLock held +// removeClient_l() must be called with AudioFlinger::mClientLock held void AudioFlinger::removeClient_l(pid_t pid) { ALOGV("removeClient_l() pid %d, calling pid %d", pid, @@ -1163,7 +1286,7 @@ AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid) // 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer } -// Client destructor must be called with AudioFlinger::mLock held +// Client destructor must be called with AudioFlinger::mClientLock held AudioFlinger::Client::~Client() { mAudioFlinger->removeClient_l(mPid); @@ -1212,7 +1335,7 @@ AudioFlinger::NotificationClient::~NotificationClient() { } -void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who) +void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who __unused) { sp<NotificationClient> keep(this); mAudioFlinger->removeNotificationClient(mPid); @@ -1230,20 +1353,24 @@ sp<IAudioRecord> AudioFlinger::openRecord( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, + size_t *notificationFrames, + sp<IMemory>& cblk, + sp<IMemory>& buffers, status_t *status) { sp<RecordThread::RecordTrack> recordTrack; sp<RecordHandle> recordHandle; sp<Client> client; status_t lStatus; - RecordThread *thread; - size_t inFrameCount; int lSessionId; + cblk.clear(); + buffers.clear(); + // check calling permissions if (!recordingAllowed()) { ALOGE("openRecord() permission denied: recording not allowed"); @@ -1251,16 +1378,31 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } - if (format != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("openRecord() invalid format %d", format); + // further sample rate checks are performed by createRecordTrack_l() + if (sampleRate == 0) { + ALOGE("openRecord() invalid sample rate %u", sampleRate); lStatus = BAD_VALUE; goto Exit; } - // add client to list - { // scope for mLock + // we don't yet support anything other than 16-bit PCM + if (!(audio_is_valid_format(format) && + audio_is_linear_pcm(format) && format == AUDIO_FORMAT_PCM_16_BIT)) { + ALOGE("openRecord() invalid format %#x", format); + lStatus = BAD_VALUE; + goto Exit; + } + + // further channel mask checks are performed by createRecordTrack_l() + if (!audio_is_input_channel(channelMask)) { + ALOGE("openRecord() invalid channel mask %#x", channelMask); + lStatus = BAD_VALUE; + goto Exit; + } + + { Mutex::Autolock _l(mLock); - thread = checkRecordThread_l(input); + RecordThread *thread = checkRecordThread_l(input); if (thread == NULL) { ALOGE("openRecord() checkRecordThread_l failed"); lStatus = BAD_VALUE; @@ -1275,42 +1417,58 @@ sp<IAudioRecord> AudioFlinger::openRecord( } pid_t pid = IPCThreadState::self()->getCallingPid(); - client = registerPid_l(pid); + client = registerPid(pid); - // If no audio session id is provided, create one here - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { lSessionId = *sessionId; } else { + // if no audio session id is provided, create one here lSessionId = nextUniqueId(); if (sessionId != NULL) { *sessionId = lSessionId; } } - // create new record track. - // The record track uses one track in mHardwareMixerThread by convention. + ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input); + // TODO: the uid should be passed in as a parameter to openRecord recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask, - frameCount, lSessionId, + frameCount, lSessionId, notificationFrames, IPCThreadState::self()->getCallingUid(), flags, tid, &lStatus); LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0)); + + if (lStatus == NO_ERROR) { + // Check if one effect chain was awaiting for an AudioRecord to be created on this + // session and move it to this thread. + sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)lSessionId); + if (chain != 0) { + Mutex::Autolock _l(thread->mLock); + thread->addEffectChain_l(chain); + } + } } + if (lStatus != NO_ERROR) { // remove local strong reference to Client before deleting the RecordTrack so that the - // Client destructor is called by the TrackBase destructor with mLock held - client.clear(); + // Client destructor is called by the TrackBase destructor with mClientLock held + // Don't hold mClientLock when releasing the reference on the track as the + // destructor will acquire it. + { + Mutex::Autolock _cl(mClientLock); + client.clear(); + } recordTrack.clear(); goto Exit; } - // return to handle to client + cblk = recordTrack->getCblk(); + buffers = recordTrack->getBuffers(); + + // return handle to client recordHandle = new RecordHandle(recordTrack); - lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return recordHandle; } @@ -1320,6 +1478,9 @@ Exit: audio_module_handle_t AudioFlinger::loadHwModule(const char *name) { + if (name == NULL) { + return 0; + } if (!settingsAllowed()) { return 0; } @@ -1398,7 +1559,7 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name) } audio_module_handle_t handle = nextUniqueId(); - mAudioHwDevs.add(handle, new AudioHwDevice(name, dev, flags)); + mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags)); ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d", name, dev->common.module->name, dev->common.module->id, handle); @@ -1440,117 +1601,155 @@ status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice) return NO_ERROR; } -// ---------------------------------------------------------------------------- - -audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask, - uint32_t *pLatencyMs, - audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) +audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId) { - PlaybackThread *thread = NULL; - struct audio_config config; - config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; - config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; - config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; - if (offloadInfo) { - config.offload_info = *offloadInfo; + Mutex::Autolock _l(mLock); + for (size_t i = 0; i < mPlaybackThreads.size(); i++) { + sp<PlaybackThread> thread = mPlaybackThreads.valueAt(i); + if ((thread->hasAudioSession(sessionId) & ThreadBase::TRACK_SESSION) != 0) { + // A session can only be on one thread, so exit after first match + String8 reply = thread->getParameters(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC)); + AudioParameter param = AudioParameter(reply); + int value; + if (param.getInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value) == NO_ERROR) { + return value; + } + break; + } } + return AUDIO_HW_SYNC_INVALID; +} - audio_stream_out_t *outStream = NULL; - AudioHwDevice *outHwDev; +// ---------------------------------------------------------------------------- - ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x", - module, - (pDevices != NULL) ? *pDevices : 0, - config.sample_rate, - config.format, - config.channel_mask, - flags); - ALOGV("openOutput(), offloadInfo %p version 0x%04x", - offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version ); - if (pDevices == NULL || *pDevices == 0) { +sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module, + audio_io_handle_t *output, + audio_config_t *config, + audio_devices_t devices, + const String8& address, + audio_output_flags_t flags) +{ + AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices); + if (outHwDev == NULL) { return 0; } - Mutex::Autolock _l(mLock); - - outHwDev = findSuitableHwDev_l(module, *pDevices); - if (outHwDev == NULL) - return 0; - audio_hw_device_t *hwDevHal = outHwDev->hwDevice(); - audio_io_handle_t id = nextUniqueId(); + if (*output == AUDIO_IO_HANDLE_NONE) { + *output = nextUniqueId(); + } mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; + audio_stream_out_t *outStream = NULL; + + // FOR TESTING ONLY: + // This if statement allows overriding the audio policy settings + // and forcing a specific format or channel mask to the HAL/Sink device for testing. + if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) { + // Check only for Normal Mixing mode + if (kEnableExtendedPrecision) { + // Specify format (uncomment one below to choose) + //config->format = AUDIO_FORMAT_PCM_FLOAT; + //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED; + //config->format = AUDIO_FORMAT_PCM_32_BIT; + //config->format = AUDIO_FORMAT_PCM_8_24_BIT; + // ALOGV("openOutput_l() upgrading format to %#08x", config->format); + } + if (kEnableExtendedChannels) { + // Specify channel mask (uncomment one below to choose) + //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch + //config->channel_mask = audio_channel_mask_from_representation_and_bits( + // AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example + } + } + status_t status = hwDevHal->open_output_stream(hwDevHal, - id, - *pDevices, - (audio_output_flags_t)flags, - &config, - &outStream); + *output, + devices, + flags, + config, + &outStream, + address.string()); mHardwareStatus = AUDIO_HW_IDLE; - ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, " - "Channels %x, status %d", + ALOGV("openOutput_l() openOutputStream returned output %p, sampleRate %d, Format %#x, " + "channelMask %#x, status %d", outStream, - config.sample_rate, - config.format, - config.channel_mask, + config->sample_rate, + config->format, + config->channel_mask, status); if (status == NO_ERROR && outStream != NULL) { - AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags); + AudioStreamOut *outputStream = new AudioStreamOut(outHwDev, outStream, flags); + PlaybackThread *thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { - thread = new OffloadThread(this, output, id, *pDevices); - ALOGV("openOutput() created offload output: ID %d thread %p", id, thread); - } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) || - (config.format != AUDIO_FORMAT_PCM_16_BIT) || - (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) { - thread = new DirectOutputThread(this, output, id, *pDevices); - ALOGV("openOutput() created direct output: ID %d thread %p", id, thread); + thread = new OffloadThread(this, outputStream, *output, devices); + ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread); + } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) + || !isValidPcmSinkFormat(config->format) + || !isValidPcmSinkChannelMask(config->channel_mask)) { + thread = new DirectOutputThread(this, outputStream, *output, devices); + ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread); } else { - thread = new MixerThread(this, output, id, *pDevices); - ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread); + thread = new MixerThread(this, outputStream, *output, devices); + ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread); } - mPlaybackThreads.add(id, thread); + mPlaybackThreads.add(*output, thread); + return thread; + } - if (pSamplingRate != NULL) { - *pSamplingRate = config.sample_rate; - } - if (pFormat != NULL) { - *pFormat = config.format; - } - if (pChannelMask != NULL) { - *pChannelMask = config.channel_mask; - } - if (pLatencyMs != NULL) { - *pLatencyMs = thread->latency(); - } + return 0; +} + +status_t AudioFlinger::openOutput(audio_module_handle_t module, + audio_io_handle_t *output, + audio_config_t *config, + audio_devices_t *devices, + const String8& address, + uint32_t *latencyMs, + audio_output_flags_t flags) +{ + ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x", + module, + (devices != NULL) ? *devices : 0, + config->sample_rate, + config->format, + config->channel_mask, + flags); + + if (*devices == AUDIO_DEVICE_NONE) { + return BAD_VALUE; + } + + Mutex::Autolock _l(mLock); + + sp<PlaybackThread> thread = openOutput_l(module, output, config, *devices, address, flags); + if (thread != 0) { + *latencyMs = thread->latency(); // notify client processes of the new output creation - thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED); + thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED); // the first primary output opened designates the primary hw device if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) { ALOGI("Using module %d has the primary audio interface", module); - mPrimaryHardwareDev = outHwDev; + mPrimaryHardwareDev = thread->getOutput()->audioHwDev; AutoMutex lock(mHardwareLock); mHardwareStatus = AUDIO_HW_SET_MODE; - hwDevHal->set_mode(hwDevHal, mMode); + mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode); mHardwareStatus = AUDIO_HW_IDLE; + + mPrimaryOutputSampleRate = config->sample_rate; } - return id; + return NO_ERROR; } - return 0; + return NO_INIT; } audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, @@ -1563,7 +1762,7 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, if (thread1 == NULL || thread2 == NULL) { ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2); - return 0; + return AUDIO_IO_HANDLE_NONE; } audio_io_handle_t id = nextUniqueId(); @@ -1571,7 +1770,7 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, thread->addOutputTrack(thread2); mPlaybackThreads.add(id, thread); // notify client processes of the new output creation - thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED); + thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED); return id; } @@ -1621,22 +1820,35 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output) } } } - audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL); + audioConfigChanged(AudioSystem::OUTPUT_CLOSED, output, NULL); } thread->exit(); // The thread entity (active unit of execution) is no longer running here, // but the ThreadBase container still exists. if (thread->type() != ThreadBase::DUPLICATING) { - AudioStreamOut *out = thread->clearOutput(); - ALOG_ASSERT(out != NULL, "out shouldn't be NULL"); - // from now on thread->mOutput is NULL - out->hwDev()->close_output_stream(out->hwDev(), out->stream); - delete out; + closeOutputFinish(thread); } + return NO_ERROR; } +void AudioFlinger::closeOutputFinish(sp<PlaybackThread> thread) +{ + AudioStreamOut *out = thread->clearOutput(); + ALOG_ASSERT(out != NULL, "out shouldn't be NULL"); + // from now on thread->mOutput is NULL + out->hwDev()->close_output_stream(out->hwDev(), out->stream); + delete out; +} + +void AudioFlinger::closeOutputInternal_l(sp<PlaybackThread> thread) +{ + mPlaybackThreads.removeItem(thread->mId); + thread->exit(); + closeOutputFinish(thread); +} + status_t AudioFlinger::suspendOutput(audio_io_handle_t output) { Mutex::Autolock _l(mLock); @@ -1668,58 +1880,76 @@ status_t AudioFlinger::restoreOutput(audio_io_handle_t output) return NO_ERROR; } -audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask) +status_t AudioFlinger::openInput(audio_module_handle_t module, + audio_io_handle_t *input, + audio_config_t *config, + audio_devices_t *device, + const String8& address, + audio_source_t source, + audio_input_flags_t flags) { - status_t status; - RecordThread *thread = NULL; - struct audio_config config; - config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; - config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; - config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; - - uint32_t reqSamplingRate = config.sample_rate; - audio_format_t reqFormat = config.format; - audio_channel_mask_t reqChannels = config.channel_mask; - audio_stream_in_t *inStream = NULL; - AudioHwDevice *inHwDev; + Mutex::Autolock _l(mLock); - if (pDevices == NULL || *pDevices == 0) { - return 0; + if (*device == AUDIO_DEVICE_NONE) { + return BAD_VALUE; } - Mutex::Autolock _l(mLock); + sp<RecordThread> thread = openInput_l(module, input, config, *device, address, source, flags); - inHwDev = findSuitableHwDev_l(module, *pDevices); - if (inHwDev == NULL) + if (thread != 0) { + // notify client processes of the new input creation + thread->audioConfigChanged(AudioSystem::INPUT_OPENED); + return NO_ERROR; + } + return NO_INIT; +} + +sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module, + audio_io_handle_t *input, + audio_config_t *config, + audio_devices_t device, + const String8& address, + audio_source_t source, + audio_input_flags_t flags) +{ + AudioHwDevice *inHwDev = findSuitableHwDev_l(module, device); + if (inHwDev == NULL) { + *input = AUDIO_IO_HANDLE_NONE; return 0; + } - audio_hw_device_t *inHwHal = inHwDev->hwDevice(); - audio_io_handle_t id = nextUniqueId(); + if (*input == AUDIO_IO_HANDLE_NONE) { + *input = nextUniqueId(); + } - status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, - &inStream); - ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, " - "status %d", + audio_config_t halconfig = *config; + audio_hw_device_t *inHwHal = inHwDev->hwDevice(); + audio_stream_in_t *inStream = NULL; + status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig, + &inStream, flags, address.string(), source); + ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d" + ", Format %#x, Channels %x, flags %#x, status %d", inStream, - config.sample_rate, - config.format, - config.channel_mask, + halconfig.sample_rate, + halconfig.format, + halconfig.channel_mask, + flags, status); // If the input could not be opened with the requested parameters and we can handle the // conversion internally, try to open again with the proposed parameters. The AudioFlinger can // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs. if (status == BAD_VALUE && - reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT && - (config.sample_rate <= 2 * reqSamplingRate) && - (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) { - ALOGV("openInput() reopening with proposed sampling rate and channel mask"); + config->format == halconfig.format && halconfig.format == AUDIO_FORMAT_PCM_16_BIT && + (halconfig.sample_rate <= 2 * config->sample_rate) && + (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) && + (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) { + // FIXME describe the change proposed by HAL (save old values so we can log them here) + ALOGV("openInput_l() reopening with proposed sampling rate and channel mask"); inStream = NULL; - status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); + status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig, + &inStream, flags, address.string(), source); + // FIXME log this new status; HAL should not propose any further changes } if (status == NO_ERROR && inStream != NULL) { @@ -1733,17 +1963,17 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, TEE_SINK_NEW, // copy input using a new pipe TEE_SINK_OLD, // copy input using an existing pipe } kind; - NBAIO_Format format = Format_from_SR_C(inStream->common.get_sample_rate(&inStream->common), - popcount(inStream->common.get_channels(&inStream->common))); + NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate, + audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format); if (!mTeeSinkInputEnabled) { kind = TEE_SINK_NO; - } else if (format == Format_Invalid) { + } else if (!Format_isValid(format)) { kind = TEE_SINK_NO; } else if (mRecordTeeSink == 0) { kind = TEE_SINK_NEW; } else if (mRecordTeeSink->getStrongCount() != 1) { kind = TEE_SINK_NO; - } else if (format == mRecordTeeSink->format()) { + } else if (Format_isEqual(format, mRecordTeeSink->format())) { kind = TEE_SINK_OLD; } else { kind = TEE_SINK_NEW; @@ -1773,39 +2003,26 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, } #endif - AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream); + AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream); // Start record thread // RecordThread requires both input and output device indication to forward to audio // pre processing modules - thread = new RecordThread(this, - input, - reqSamplingRate, - reqChannels, - id, + sp<RecordThread> thread = new RecordThread(this, + inputStream, + *input, primaryOutputDevice_l(), - *pDevices + device #ifdef TEE_SINK , teeSink #endif ); - mRecordThreads.add(id, thread); - ALOGV("openInput() created record thread: ID %d thread %p", id, thread); - if (pSamplingRate != NULL) { - *pSamplingRate = reqSamplingRate; - } - if (pFormat != NULL) { - *pFormat = config.format; - } - if (pChannelMask != NULL) { - *pChannelMask = reqChannels; - } - - // notify client processes of the new input creation - thread->audioConfigChanged_l(AudioSystem::INPUT_OPENED); - return id; + mRecordThreads.add(*input, thread); + ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get()); + return thread; } + *input = AUDIO_IO_HANDLE_NONE; return 0; } @@ -1827,26 +2044,72 @@ status_t AudioFlinger::closeInput_nonvirtual(audio_io_handle_t input) } ALOGV("closeInput() %d", input); - audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, NULL); + + // If we still have effect chains, it means that a client still holds a handle + // on at least one effect. We must either move the chain to an existing thread with the + // same session ID or put it aside in case a new record thread is opened for a + // new capture on the same session + sp<EffectChain> chain; + { + Mutex::Autolock _sl(thread->mLock); + Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l(); + // Note: maximum one chain per record thread + if (effectChains.size() != 0) { + chain = effectChains[0]; + } + } + if (chain != 0) { + // first check if a record thread is already opened with a client on the same session. + // This should only happen in case of overlap between one thread tear down and the + // creation of its replacement + size_t i; + for (i = 0; i < mRecordThreads.size(); i++) { + sp<RecordThread> t = mRecordThreads.valueAt(i); + if (t == thread) { + continue; + } + if (t->hasAudioSession(chain->sessionId()) != 0) { + Mutex::Autolock _l(t->mLock); + ALOGV("closeInput() found thread %d for effect session %d", + t->id(), chain->sessionId()); + t->addEffectChain_l(chain); + break; + } + } + // put the chain aside if we could not find a record thread with the same session id. + if (i == mRecordThreads.size()) { + putOrphanEffectChain_l(chain); + } + } + audioConfigChanged(AudioSystem::INPUT_CLOSED, input, NULL); mRecordThreads.removeItem(input); } - thread->exit(); - // The thread entity (active unit of execution) is no longer running here, - // but the ThreadBase container still exists. + // FIXME: calling thread->exit() without mLock held should not be needed anymore now that + // we have a different lock for notification client + closeInputFinish(thread); + return NO_ERROR; +} +void AudioFlinger::closeInputFinish(sp<RecordThread> thread) +{ + thread->exit(); AudioStreamIn *in = thread->clearInput(); ALOG_ASSERT(in != NULL, "in shouldn't be NULL"); // from now on thread->mInput is NULL in->hwDev()->close_input_stream(in->hwDev(), in->stream); delete in; +} - return NO_ERROR; +void AudioFlinger::closeInputInternal_l(sp<RecordThread> thread) +{ + mRecordThreads.removeItem(thread->mId); + closeInputFinish(thread); } -status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) +status_t AudioFlinger::invalidateStream(audio_stream_type_t stream) { Mutex::Autolock _l(mLock); - ALOGV("setStreamOutput() stream %d to output %d", stream, output); + ALOGV("invalidateStream() stream %d", stream); for (size_t i = 0; i < mPlaybackThreads.size(); i++) { PlaybackThread *thread = mPlaybackThreads.valueAt(i).get(); @@ -1857,24 +2120,30 @@ status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_hand } -int AudioFlinger::newAudioSessionId() +audio_unique_id_t AudioFlinger::newAudioUniqueId() { return nextUniqueId(); } -void AudioFlinger::acquireAudioSessionId(int audioSession) +void AudioFlinger::acquireAudioSessionId(int audioSession, pid_t pid) { Mutex::Autolock _l(mLock); pid_t caller = IPCThreadState::self()->getCallingPid(); - ALOGV("acquiring %d from %d", audioSession, caller); - - // Ignore requests received from processes not known as notification client. The request - // is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be - // called from a different pid leaving a stale session reference. Also we don't know how - // to clear this reference if the client process dies. - if (mNotificationClients.indexOfKey(caller) < 0) { - ALOGV("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession); - return; + ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid); + if (pid != -1 && (caller == getpid_cached)) { + caller = pid; + } + + { + Mutex::Autolock _cl(mClientLock); + // Ignore requests received from processes not known as notification client. The request + // is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be + // called from a different pid leaving a stale session reference. Also we don't know how + // to clear this reference if the client process dies. + if (mNotificationClients.indexOfKey(caller) < 0) { + ALOGW("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession); + return; + } } size_t num = mAudioSessionRefs.size(); @@ -1890,11 +2159,14 @@ void AudioFlinger::acquireAudioSessionId(int audioSession) ALOGV(" added new entry for %d", audioSession); } -void AudioFlinger::releaseAudioSessionId(int audioSession) +void AudioFlinger::releaseAudioSessionId(int audioSession, pid_t pid) { Mutex::Autolock _l(mLock); pid_t caller = IPCThreadState::self()->getCallingPid(); - ALOGV("releasing %d from %d", audioSession, caller); + ALOGV("releasing %d from %d for %d", audioSession, caller, pid); + if (pid != -1 && (caller == getpid_cached)) { + caller = pid; + } size_t num = mAudioSessionRefs.size(); for (size_t i = 0; i< num; i++) { AudioSessionRef *ref = mAudioSessionRefs.itemAt(i); @@ -1956,7 +2228,7 @@ void AudioFlinger::purgeStaleEffects_l() { } } if (!found) { - Mutex::Autolock _l (t->mLock); + Mutex::Autolock _l(t->mLock); // remove all effects from the chain while (ec->mEffects.size()) { sp<EffectModule> effect = ec->mEffects[0]; @@ -1993,7 +2265,7 @@ AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(audio_io_handle_t uint32_t AudioFlinger::nextUniqueId() { - return android_atomic_inc(&mNextUniqueId); + return (uint32_t) android_atomic_inc(&mNextUniqueId); } AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const @@ -2023,7 +2295,7 @@ sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_even int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie) + wp<RefBase> cookie) { Mutex::Autolock _l(mLock); @@ -2185,7 +2457,7 @@ sp<IEffect> AudioFlinger::createEffect( // return effect descriptor *pDesc = desc; - if (io == 0 && sessionId == AUDIO_SESSION_OUTPUT_MIX) { + if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) { // if the output returned by getOutputForEffect() is removed before we lock the // mutex below, the call to checkPlaybackThread_l(io) below will detect it // and we will exit safely @@ -2200,7 +2472,7 @@ sp<IEffect> AudioFlinger::createEffect( // If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX // because of code checking output when entering the function. // Note: io is never 0 when creating an effect on an input - if (io == 0) { + if (io == AUDIO_IO_HANDLE_NONE) { if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) { // output must be specified by AudioPolicyManager when using session // AUDIO_SESSION_OUTPUT_STAGE @@ -2225,7 +2497,7 @@ sp<IEffect> AudioFlinger::createEffect( // If no output thread contains the requested session ID, default to // first output. The effect chain will be moved to the correct output // thread when a track with the same session ID is created - if (io == 0 && mPlaybackThreads.size()) { + if (io == AUDIO_IO_HANDLE_NONE && mPlaybackThreads.size() > 0) { io = mPlaybackThreads.keyAt(0); } ALOGV("createEffect() got io %d for effect %s", io, desc.name); @@ -2238,9 +2510,17 @@ sp<IEffect> AudioFlinger::createEffect( lStatus = BAD_VALUE; goto Exit; } + } else { + // Check if one effect chain was awaiting for an effect to be created on this + // session and used it instead of creating a new one. + sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)sessionId); + if (chain != 0) { + Mutex::Autolock _l(thread->mLock); + thread->addEffectChain_l(chain); + } } - sp<Client> client = registerPid_l(pid); + sp<Client> client = registerPid(pid); // create effect on selected output thread handle = thread->createEffect_l(client, effectClient, priority, sessionId, @@ -2248,12 +2528,15 @@ sp<IEffect> AudioFlinger::createEffect( if (handle != 0 && id != NULL) { *id = handle->id(); } + if (handle == 0) { + // remove local strong reference to Client with mClientLock held + Mutex::Autolock _cl(mClientLock); + client.clear(); + } } Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } @@ -2299,6 +2582,16 @@ status_t AudioFlinger::moveEffectChain_l(int sessionId, return INVALID_OPERATION; } + // Check whether the destination thread has a channel count of FCC_2, which is + // currently required for (most) effects. Prevent moving the effect chain here rather + // than disabling the addEffect_l() call in dstThread below. + if (dstThread->mChannelCount != FCC_2) { + ALOGW("moveEffectChain_l() effect chain failed because" + " destination thread %p channel count(%u) != %u", + dstThread, dstThread->mChannelCount, FCC_2); + return INVALID_OPERATION; + } + // remove chain first. This is useful only if reconfiguring effect chain on same output thread, // so that a new chain is created with correct parameters when first effect is added. This is // otherwise unnecessary as removeEffect_l() will remove the chain when last effect is @@ -2397,6 +2690,49 @@ void AudioFlinger::onNonOffloadableGlobalEffectEnable() } +status_t AudioFlinger::putOrphanEffectChain_l(const sp<AudioFlinger::EffectChain>& chain) +{ + audio_session_t session = (audio_session_t)chain->sessionId(); + ssize_t index = mOrphanEffectChains.indexOfKey(session); + ALOGV("putOrphanEffectChain_l session %d index %d", session, index); + if (index >= 0) { + ALOGW("putOrphanEffectChain_l chain for session %d already present", session); + return ALREADY_EXISTS; + } + mOrphanEffectChains.add(session, chain); + return NO_ERROR; +} + +sp<AudioFlinger::EffectChain> AudioFlinger::getOrphanEffectChain_l(audio_session_t session) +{ + sp<EffectChain> chain; + ssize_t index = mOrphanEffectChains.indexOfKey(session); + ALOGV("getOrphanEffectChain_l session %d index %d", session, index); + if (index >= 0) { + chain = mOrphanEffectChains.valueAt(index); + mOrphanEffectChains.removeItemsAt(index); + } + return chain; +} + +bool AudioFlinger::updateOrphanEffectChains(const sp<AudioFlinger::EffectModule>& effect) +{ + Mutex::Autolock _l(mLock); + audio_session_t session = (audio_session_t)effect->sessionId(); + ssize_t index = mOrphanEffectChains.indexOfKey(session); + ALOGV("updateOrphanEffectChains session %d index %d", session, index); + if (index >= 0) { + sp<EffectChain> chain = mOrphanEffectChains.valueAt(index); + if (chain->removeEffect_l(effect) == 0) { + ALOGV("updateOrphanEffectChains removing effect chain at index %d", index); + mOrphanEffectChains.removeItemsAt(index); + } + return true; + } + return false; +} + + struct Entry { #define MAX_NAME 32 // %Y%m%d%H%M%S_%d.wav char mName[MAX_NAME]; @@ -2473,24 +2809,26 @@ void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_hand // if 2 dumpsys are done within 1 second, and rotation didn't work, then discard 2nd int teeFd = open(teePath, O_WRONLY | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR); if (teeFd >= 0) { + // FIXME use libsndfile char wavHeader[44]; memcpy(wavHeader, "RIFF\0\0\0\0WAVEfmt \20\0\0\0\1\0\2\0\104\254\0\0\0\0\0\0\4\0\20\0data\0\0\0\0", sizeof(wavHeader)); NBAIO_Format format = teeSource->format(); unsigned channelCount = Format_channelCount(format); - ALOG_ASSERT(channelCount <= FCC_2); uint32_t sampleRate = Format_sampleRate(format); + size_t frameSize = Format_frameSize(format); wavHeader[22] = channelCount; // number of channels wavHeader[24] = sampleRate; // sample rate wavHeader[25] = sampleRate >> 8; - wavHeader[32] = channelCount * 2; // block alignment + wavHeader[32] = frameSize; // block alignment + wavHeader[33] = frameSize >> 8; write(teeFd, wavHeader, sizeof(wavHeader)); size_t total = 0; bool firstRead = true; +#define TEE_SINK_READ 1024 // frames per I/O operation + void *buffer = malloc(TEE_SINK_READ * frameSize); for (;;) { -#define TEE_SINK_READ 1024 - short buffer[TEE_SINK_READ * FCC_2]; size_t count = TEE_SINK_READ; ssize_t actual = teeSource->read(buffer, count, AudioBufferProvider::kInvalidPTS); @@ -2503,14 +2841,17 @@ void AudioFlinger::dumpTee(int fd, const sp<NBAIO_Source>& source, audio_io_hand break; } ALOG_ASSERT(actual <= (ssize_t)count); - write(teeFd, buffer, actual * channelCount * sizeof(short)); + write(teeFd, buffer, actual * frameSize); total += actual; } + free(buffer); lseek(teeFd, (off_t) 4, SEEK_SET); - uint32_t temp = 44 + total * channelCount * sizeof(short) - 8; + uint32_t temp = 44 + total * frameSize - 8; + // FIXME not big-endian safe write(teeFd, &temp, sizeof(temp)); lseek(teeFd, (off_t) 40, SEEK_SET); - temp = total * channelCount * sizeof(short); + temp = total * frameSize; + // FIXME not big-endian safe write(teeFd, &temp, sizeof(temp)); close(teeFd); if (fd >= 0) { diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 7320144..1003017 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -18,6 +18,7 @@ #ifndef ANDROID_AUDIO_FLINGER_H #define ANDROID_AUDIO_FLINGER_H +#include "Configuration.h" #include <stdint.h> #include <sys/types.h> #include <limits.h> @@ -49,9 +50,12 @@ #include <media/AudioBufferProvider.h> #include <media/ExtendedAudioBufferProvider.h> + +#include "FastCapture.h" #include "FastMixer.h" #include <media/nbaio/NBAIO.h> #include "AudioWatchdog.h" +#include "AudioMixer.h" #include <powermanager/IPowerManager.h> @@ -60,8 +64,8 @@ namespace android { -class audio_track_cblk_t; -class effect_param_cblk_t; +struct audio_track_cblk_t; +struct effect_param_cblk_t; class AudioMixer; class AudioBuffer; class AudioResampler; @@ -81,9 +85,6 @@ class ServerProxy; static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3); -#define MAX_GAIN 4096.0f -#define MAX_GAIN_INT 0x1000 - #define INCLUDING_FROM_AUDIOFLINGER_H class AudioFlinger : @@ -102,29 +103,30 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, int *sessionId, - String8& name, int clientUid, - status_t *status); + status_t *status /*non-NULL*/); virtual sp<IAudioRecord> openRecord( audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, - status_t *status); + size_t *notificationFrames, + sp<IMemory>& cblk, + sp<IMemory>& buffers, + status_t *status /*non-NULL*/); virtual uint32_t sampleRate(audio_io_handle_t output) const; - virtual int channelCount(audio_io_handle_t output) const; virtual audio_format_t format(audio_io_handle_t output) const; virtual size_t frameCount(audio_io_handle_t output) const; virtual uint32_t latency(audio_io_handle_t output) const; @@ -156,14 +158,13 @@ public: virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask) const; - virtual audio_io_handle_t openOutput(audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask, - uint32_t *pLatencyMs, - audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo); + virtual status_t openOutput(audio_module_handle_t module, + audio_io_handle_t *output, + audio_config_t *config, + audio_devices_t *devices, + const String8& address, + uint32_t *latencyMs, + audio_output_flags_t flags); virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2); @@ -174,15 +175,17 @@ public: virtual status_t restoreOutput(audio_io_handle_t output); - virtual audio_io_handle_t openInput(audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask); + virtual status_t openInput(audio_module_handle_t module, + audio_io_handle_t *input, + audio_config_t *config, + audio_devices_t *device, + const String8& address, + audio_source_t source, + audio_input_flags_t flags); virtual status_t closeInput(audio_io_handle_t input); - virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output); + virtual status_t invalidateStream(audio_stream_type_t stream); virtual status_t setVoiceVolume(float volume); @@ -191,11 +194,11 @@ public: virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const; - virtual int newAudioSessionId(); + virtual audio_unique_id_t newAudioUniqueId(); - virtual void acquireAudioSessionId(int audioSession); + virtual void acquireAudioSessionId(int audioSession, pid_t pid); - virtual void releaseAudioSessionId(int audioSession); + virtual void releaseAudioSessionId(int audioSession, pid_t pid); virtual status_t queryNumberEffects(uint32_t *numEffects) const; @@ -210,7 +213,7 @@ public: int32_t priority, audio_io_handle_t io, int sessionId, - status_t *status, + status_t *status /*non-NULL*/, int *id, int *enabled); @@ -224,6 +227,30 @@ public: virtual status_t setLowRamDevice(bool isLowRamDevice); + /* List available audio ports and their attributes */ + virtual status_t listAudioPorts(unsigned int *num_ports, + struct audio_port *ports); + + /* Get attributes for a given audio port */ + virtual status_t getAudioPort(struct audio_port *port); + + /* Create an audio patch between several source and sink ports */ + virtual status_t createAudioPatch(const struct audio_patch *patch, + audio_patch_handle_t *handle); + + /* Release an audio patch */ + virtual status_t releaseAudioPatch(audio_patch_handle_t handle); + + /* List existing audio patches */ + virtual status_t listAudioPatches(unsigned int *num_patches, + struct audio_patch *patches); + + /* Set audio port configuration */ + virtual status_t setAudioPortConfig(const struct audio_port_config *config); + + /* Get the HW synchronization source used for an audio session */ + virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId); + virtual status_t onTransact( uint32_t code, const Parcel& data, @@ -235,8 +262,12 @@ public: sp<NBLog::Writer> newWriter_l(size_t size, const char *name); void unregisterWriter(const sp<NBLog::Writer>& writer); private: - static const size_t kLogMemorySize = 10 * 1024; + static const size_t kLogMemorySize = 40 * 1024; sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled + // When a log writer is unregistered, it is done lazily so that media.log can continue to see it + // for as long as possible. The memory is only freed when it is needed for another log writer. + Vector< sp<NBLog::Writer> > mUnregisteredWriters; + Mutex mUnregisteredWritersLock; public: class SyncEvent; @@ -249,7 +280,7 @@ public: int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie) + wp<RefBase> cookie) : mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession), mCallback(callBack), mCookie(cookie) {} @@ -262,14 +293,14 @@ public: AudioSystem::sync_event_t type() const { return mType; } int triggerSession() const { return mTriggerSession; } int listenerSession() const { return mListenerSession; } - void *cookie() const { return mCookie; } + wp<RefBase> cookie() const { return mCookie; } private: const AudioSystem::sync_event_t mType; const int mTriggerSession; const int mListenerSession; sync_event_callback_t mCallback; - void * const mCookie; + const wp<RefBase> mCookie; mutable Mutex mLock; }; @@ -277,7 +308,7 @@ public: int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie); + wp<RefBase> cookie); private: class AudioHwDevice; // fwd declaration for findSuitableHwDev_l @@ -300,6 +331,49 @@ private: audio_devices_t devices); void purgeStaleEffects_l(); + // Set kEnableExtendedChannels to true to enable greater than stereo output + // for the MixerThread and device sink. Number of channels allowed is + // FCC_2 <= channels <= AudioMixer::MAX_NUM_CHANNELS. + static const bool kEnableExtendedChannels = true; + + // Returns true if channel mask is permitted for the PCM sink in the MixerThread + static inline bool isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) { + switch (audio_channel_mask_get_representation(channelMask)) { + case AUDIO_CHANNEL_REPRESENTATION_POSITION: { + uint32_t channelCount = FCC_2; // stereo is default + if (kEnableExtendedChannels) { + channelCount = audio_channel_count_from_out_mask(channelMask); + if (channelCount < FCC_2 // mono is not supported at this time + || channelCount > AudioMixer::MAX_NUM_CHANNELS) { + return false; + } + } + // check that channelMask is the "canonical" one we expect for the channelCount. + return channelMask == audio_channel_out_mask_from_count(channelCount); + } + default: + return false; + } + } + + // Set kEnableExtendedPrecision to true to use extended precision in MixerThread + static const bool kEnableExtendedPrecision = true; + + // Returns true if format is permitted for the PCM sink in the MixerThread + static inline bool isValidPcmSinkFormat(audio_format_t format) { + switch (format) { + case AUDIO_FORMAT_PCM_16_BIT: + return true; + case AUDIO_FORMAT_PCM_FLOAT: + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + case AUDIO_FORMAT_PCM_32_BIT: + case AUDIO_FORMAT_PCM_8_24_BIT: + return kEnableExtendedPrecision; + default: + return false; + } + } + // standby delay for MIXER and DUPLICATING playback threads is read from property // ro.audio.flinger_standbytime_ms or defaults to kDefaultStandbyTimeInNsecs static nsecs_t mStandbyTimeInNsecs; @@ -394,6 +468,8 @@ private: #include "Effects.h" +#include "PatchPanel.h" + // server side of the client's IAudioTrack class TrackHandle : public android::BnAudioTrack { public: @@ -427,7 +503,6 @@ private: public: RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack); virtual ~RecordHandle(); - virtual sp<IMemory> getCblk() const; virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession); virtual void stop(); virtual status_t onTransact( @@ -443,15 +518,39 @@ private: PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const; MixerThread *checkMixerThread_l(audio_io_handle_t output) const; RecordThread *checkRecordThread_l(audio_io_handle_t input) const; + sp<RecordThread> openInput_l(audio_module_handle_t module, + audio_io_handle_t *input, + audio_config_t *config, + audio_devices_t device, + const String8& address, + audio_source_t source, + audio_input_flags_t flags); + sp<PlaybackThread> openOutput_l(audio_module_handle_t module, + audio_io_handle_t *output, + audio_config_t *config, + audio_devices_t devices, + const String8& address, + audio_output_flags_t flags); + + void closeOutputFinish(sp<PlaybackThread> thread); + void closeInputFinish(sp<RecordThread> thread); + // no range check, AudioFlinger::mLock held bool streamMute_l(audio_stream_type_t stream) const { return mStreamTypes[stream].mute; } // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held float streamVolume_l(audio_stream_type_t stream) const { return mStreamTypes[stream].volume; } - void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2); - - // allocate an audio_io_handle_t, session ID, or effect ID + void audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2); + + // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t. + // They all share the same ID space, but the namespaces are actually independent + // because there are separate KeyedVectors for each kind of ID. + // The return value is uint32_t, but is cast to signed for some IDs. + // FIXME This API does not handle rollover to zero (for unsigned IDs), + // or from positive to negative (for signed IDs). + // Thus it may fail by returning an ID of the wrong sign, + // or by returning a non-unique ID. uint32_t nextUniqueId(); status_t moveEffectChain_l(int sessionId, @@ -467,10 +566,26 @@ private: void removeClient_l(pid_t pid); void removeNotificationClient(pid_t pid); - bool isNonOffloadableGlobalEffectEnabled_l(); void onNonOffloadableGlobalEffectEnable(); + // Store an effect chain to mOrphanEffectChains keyed vector. + // Called when a thread exits and effects are still attached to it. + // If effects are later created on the same session, they will reuse the same + // effect chain and same instances in the effect library. + // return ALREADY_EXISTS if a chain with the same session already exists in + // mOrphanEffectChains. Note that this should never happen as there is only one + // chain for a given session and it is attached to only one thread at a time. + status_t putOrphanEffectChain_l(const sp<EffectChain>& chain); + // Get an effect chain for the specified session in mOrphanEffectChains and remove + // it if found. Returns 0 if not found (this is the most common case). + sp<EffectChain> getOrphanEffectChain_l(audio_session_t session); + // Called when the last effect handle on an effect instance is removed. If this + // effect belongs to an effect chain in mOrphanEffectChains, the chain is updated + // and removed from mOrphanEffectChains if it does not contain any effect. + // Return true if the effect was found in mOrphanEffectChains, false otherwise. + bool updateOrphanEffectChains(const sp<EffectModule>& effect); + class AudioHwDevice { public: enum Flags { @@ -478,10 +593,11 @@ private: AHWD_CAN_SET_MASTER_MUTE = 0x2, }; - AudioHwDevice(const char *moduleName, + AudioHwDevice(audio_module_handle_t handle, + const char *moduleName, audio_hw_device_t *hwDevice, Flags flags) - : mModuleName(strdup(moduleName)) + : mHandle(handle), mModuleName(strdup(moduleName)) , mHwDevice(hwDevice) , mFlags(flags) { } /*virtual*/ ~AudioHwDevice() { free((void *)mModuleName); } @@ -494,12 +610,16 @@ private: return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE)); } + audio_module_handle_t handle() const { return mHandle; } const char *moduleName() const { return mModuleName; } audio_hw_device_t *hwDevice() const { return mHwDevice; } + uint32_t version() const { return mHwDevice->common.version; } + private: + const audio_module_handle_t mHandle; const char * const mModuleName; audio_hw_device_t * const mHwDevice; - Flags mFlags; + const Flags mFlags; }; // AudioStreamOut and AudioStreamIn are immutable, so their fields are const. @@ -509,7 +629,7 @@ private: struct AudioStreamOut { AudioHwDevice* const audioHwDev; audio_stream_out_t* const stream; - audio_output_flags_t flags; + const audio_output_flags_t flags; audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); } @@ -537,7 +657,11 @@ private: }; mutable Mutex mLock; - + // protects mClients and mNotificationClients. + // must be locked after mLock and ThreadBase::mLock if both must be locked + // avoids acquiring AudioFlinger::mLock from inside thread loop. + mutable Mutex mClientLock; + // protected by mClientLock DefaultKeyedVector< pid_t, wp<Client> > mClients; // see ~Client() mutable Mutex mHardwareLock; @@ -586,8 +710,13 @@ private: DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> > mRecordThreads; + // protected by mClientLock DefaultKeyedVector< pid_t, sp<NotificationClient> > mNotificationClients; + volatile int32_t mNextUniqueId; // updated by android_atomic_inc + // nextUniqueId() returns uint32_t, but this is declared int32_t + // because the atomic operations require an int32_t + audio_mode_t mMode; bool mBtNrecIsOff; @@ -601,12 +730,17 @@ private: Vector < sp<SyncEvent> > mPendingSyncEvents; // sync events awaiting for a session // to be created + // Effect chains without a valid thread + DefaultKeyedVector< audio_session_t , sp<EffectChain> > mOrphanEffectChains; + private: - sp<Client> registerPid_l(pid_t pid); // always returns non-0 + sp<Client> registerPid(pid_t pid); // always returns non-0 // for use from destructor status_t closeOutput_nonvirtual(audio_io_handle_t output); + void closeOutputInternal_l(sp<PlaybackThread> thread); status_t closeInput_nonvirtual(audio_io_handle_t input); + void closeInputInternal_l(sp<RecordThread> thread); #ifdef TEE_SINK // all record threads serially share a common tee sink, which is re-created on format change @@ -634,7 +768,7 @@ public: // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes static const size_t kTeeSinkInputFramesDefault = 0x200000; static const size_t kTeeSinkOutputFramesDefault = 0x200000; - static const size_t kTeeSinkTrackFramesDefault = 0x1000; + static const size_t kTeeSinkTrackFramesDefault = 0x200000; #endif // This method reads from a variable without mLock, but the variable is updated under mLock. So @@ -647,10 +781,17 @@ private: bool mIsLowRamDevice; bool mIsDeviceTypeKnown; nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled + + sp<PatchPanel> mPatchPanel; + + uint32_t mPrimaryOutputSampleRate; // sample rate of the primary output, or zero if none + // protected by mHardwareLock }; #undef INCLUDING_FROM_AUDIOFLINGER_H +const char *formatToString(audio_format_t format); + // ---------------------------------------------------------------------------- }; // namespace android diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index f92421e..fd28ea1 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -22,6 +22,7 @@ #include <stdint.h> #include <string.h> #include <stdlib.h> +#include <math.h> #include <sys/types.h> #include <utils/Errors.h> @@ -34,65 +35,345 @@ #include <system/audio.h> #include <audio_utils/primitives.h> +#include <audio_utils/format.h> #include <common_time/local_clock.h> #include <common_time/cc_helper.h> #include <media/EffectsFactoryApi.h> +#include <audio_effects/effect_downmix.h> +#include "AudioMixerOps.h" #include "AudioMixer.h" +// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer. +#ifndef FCC_2 +#define FCC_2 2 +#endif + +// Look for MONO_HACK for any Mono hack involving legacy mono channel to +// stereo channel conversion. + +/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is + * being used. This is a considerable amount of log spam, so don't enable unless you + * are verifying the hook based code. + */ +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +//define ALOGVV printf // for test-mixer.cpp +#else +#define ALOGVV(a...) do { } while (0) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0])) +#endif + +// Set kUseNewMixer to true to use the new mixer engine. Otherwise the +// original code will be used. This is false for now. +static const bool kUseNewMixer = false; + +// Set kUseFloat to true to allow floating input into the mixer engine. +// If kUseNewMixer is false, this is ignored or may be overridden internally +// because of downmix/upmix support. +static const bool kUseFloat = true; + +// Set to default copy buffer size in frames for input processing. +static const size_t kCopyBufferFrameCount = 256; + namespace android { // ---------------------------------------------------------------------------- -AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider() : AudioBufferProvider(), - mTrackBufferProvider(NULL), mDownmixHandle(NULL) + +template <typename T> +T min(const T& a, const T& b) { + return a < b ? a : b; } -AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() +AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize, + size_t outputFrameSize, size_t bufferFrameCount) : + mInputFrameSize(inputFrameSize), + mOutputFrameSize(outputFrameSize), + mLocalBufferFrameCount(bufferFrameCount), + mLocalBufferData(NULL), + mConsumed(0) { - ALOGV("AudioMixer deleting DownmixerBufferProvider (%p)", this); - EffectRelease(mDownmixHandle); + ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this, + inputFrameSize, outputFrameSize, bufferFrameCount); + LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0, + "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)", + inputFrameSize, outputFrameSize); + if (mLocalBufferFrameCount) { + (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize); + } + mBuffer.frameCount = 0; +} + +AudioMixer::CopyBufferProvider::~CopyBufferProvider() +{ + ALOGV("~CopyBufferProvider(%p)", this); + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + free(mLocalBufferData); } -status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, - int64_t pts) { - //ALOGV("DownmixerBufferProvider::getNextBuffer()"); - if (this->mTrackBufferProvider != NULL) { +status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, + int64_t pts) +{ + //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)", + // this, pBuffer, pBuffer->frameCount, pts); + if (mLocalBufferFrameCount == 0) { status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); if (res == OK) { - mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount; - mDownmixConfig.inputCfg.buffer.raw = pBuffer->raw; - mDownmixConfig.outputCfg.buffer.frameCount = pBuffer->frameCount; - mDownmixConfig.outputCfg.buffer.raw = mDownmixConfig.inputCfg.buffer.raw; - // in-place so overwrite the buffer contents, has been set in prepareTrackForDownmix() - //mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; - - res = (*mDownmixHandle)->process(mDownmixHandle, - &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer); - //ALOGV("getNextBuffer is downmixing"); + copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount); } return res; - } else { - ALOGE("DownmixerBufferProvider::getNextBuffer() error: NULL track buffer provider"); - return NO_INIT; } + if (mBuffer.frameCount == 0) { + mBuffer.frameCount = pBuffer->frameCount; + status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts); + // At one time an upstream buffer provider had + // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014. + // + // By API spec, if res != OK, then mBuffer.frameCount == 0. + // but there may be improper implementations. + ALOG_ASSERT(res == OK || mBuffer.frameCount == 0); + if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe. + pBuffer->raw = NULL; + pBuffer->frameCount = 0; + return res; + } + mConsumed = 0; + } + ALOG_ASSERT(mConsumed < mBuffer.frameCount); + size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed); + count = min(count, pBuffer->frameCount); + pBuffer->raw = mLocalBufferData; + pBuffer->frameCount = count; + copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize, + pBuffer->frameCount); + return OK; } -void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) { - //ALOGV("DownmixerBufferProvider::releaseBuffer()"); - if (this->mTrackBufferProvider != NULL) { +void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) +{ + //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))", + // this, pBuffer, pBuffer->frameCount); + if (mLocalBufferFrameCount == 0) { mTrackBufferProvider->releaseBuffer(pBuffer); - } else { - ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider"); + return; } + // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount"); + mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content + if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + ALOG_ASSERT(mBuffer.frameCount == 0); + } + pBuffer->raw = NULL; + pBuffer->frameCount = 0; } +void AudioMixer::CopyBufferProvider::reset() +{ + if (mBuffer.frameCount != 0) { + mTrackBufferProvider->releaseBuffer(&mBuffer); + } + mConsumed = 0; +} -// ---------------------------------------------------------------------------- -bool AudioMixer::isMultichannelCapable = false; +AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider( + audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount) // set bufferFrameCount to 0 to do in-place +{ + ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)", + this, inputChannelMask, outputChannelMask, format, + sampleRate, sessionId); + if (!sIsMultichannelCapable + || EffectCreate(&sDwnmFxDesc.uuid, + sessionId, + SESSION_ID_INVALID_AND_IGNORED, + &mDownmixHandle) != 0) { + ALOGE("DownmixerBufferProvider() error creating downmixer effect"); + mDownmixHandle = NULL; + return; + } + // channel input configuration will be overridden per-track + mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits + mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits + mDownmixConfig.inputCfg.format = format; + mDownmixConfig.outputCfg.format = format; + mDownmixConfig.inputCfg.samplingRate = sampleRate; + mDownmixConfig.outputCfg.samplingRate = sampleRate; + mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; + mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; + // input and output buffer provider, and frame count will not be used as the downmix effect + // process() function is called directly (see DownmixerBufferProvider::getNextBuffer()) + mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | + EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE; + mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask; + + int cmdStatus; + uint32_t replySize = sizeof(int); + + // Configure downmixer + status_t status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/, + &mDownmixConfig /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Enable downmixer + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/, + &replySize, &cmdStatus /*pReplyData*/); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + + // Set downmix type + // parameter size rounded for padding on 32bit boundary + const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int); + const int downmixParamSize = + sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t); + effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize); + param->psize = sizeof(downmix_params_t); + const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE; + memcpy(param->data, &downmixParam, param->psize); + const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD; + param->vsize = sizeof(downmix_type_t); + memcpy(param->data + psizePadded, &downmixType, param->vsize); + replySize = sizeof(int); + status = (*mDownmixHandle)->command(mDownmixHandle, + EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */, + param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/); + free(param); + if (status != 0 || cmdStatus != 0) { + ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type", + status, cmdStatus); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; + return; + } + ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType); +} + +AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() +{ + ALOGV("~DownmixerBufferProvider (%p)", this); + EffectRelease(mDownmixHandle); + mDownmixHandle = NULL; +} -effect_descriptor_t AudioMixer::dwnmFxDesc; +void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + mDownmixConfig.inputCfg.buffer.frameCount = frames; + mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src); + mDownmixConfig.outputCfg.buffer.frameCount = frames; + mDownmixConfig.outputCfg.buffer.raw = dst; + // may be in-place if src == dst. + status_t res = (*mDownmixHandle)->process(mDownmixHandle, + &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer); + ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res); +} + +/* call once in a pthread_once handler. */ +/*static*/ status_t AudioMixer::DownmixerBufferProvider::init() +{ + // find multichannel downmix effect if we have to play multichannel content + uint32_t numEffects = 0; + int ret = EffectQueryNumberEffects(&numEffects); + if (ret != 0) { + ALOGE("AudioMixer() error %d querying number of effects", ret); + return NO_INIT; + } + ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); + + for (uint32_t i = 0 ; i < numEffects ; i++) { + if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { + ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); + if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { + ALOGI("found effect \"%s\" from %s", + sDwnmFxDesc.name, sDwnmFxDesc.implementor); + sIsMultichannelCapable = true; + break; + } + } + } + ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); + return NO_INIT; +} + +/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false; +/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc; + +AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount) : + CopyBufferProvider( + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(inputChannelMask), + audio_bytes_per_sample(format) + * audio_channel_count_from_out_mask(outputChannelMask), + bufferFrameCount), + mFormat(format), + mSampleSize(audio_bytes_per_sample(format)), + mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)), + mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask)) +{ + ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu", + this, format, inputChannelMask, outputChannelMask, + mInputChannels, mOutputChannels); + // TODO: consider channel representation in index array formulation + // We ignore channel representation, and just use the bits. + memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry), + audio_channel_mask_get_bits(outputChannelMask), + audio_channel_mask_get_bits(inputChannelMask)); +} + +void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_index_array(dst, mOutputChannels, + src, mInputChannels, mIdxAry, mSampleSize, frames); +} + +AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount) : + CopyBufferProvider( + channels * audio_bytes_per_sample(inputFormat), + channels * audio_bytes_per_sample(outputFormat), + bufferFrameCount), + mChannels(channels), + mInputFormat(inputFormat), + mOutputFormat(outputFormat) +{ + ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat); +} + +void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames) +{ + memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels); +} + +// ---------------------------------------------------------------------------- // Ensure mConfiguredNames bitmask is initialized properly on all architectures. // The value of 1 << x is undefined in C when x >= 32. @@ -101,20 +382,12 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr : mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1), mSampleRate(sampleRate) { - // AudioMixer is not yet capable of multi-channel beyond stereo - COMPILE_TIME_ASSERT_FUNCTION_SCOPE(2 == MAX_NUM_CHANNELS); - ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u", maxNumTracks, MAX_NUM_TRACKS); // AudioMixer is not yet capable of more than 32 active track inputs ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS); - // AudioMixer is not yet capable of multi-channel output beyond stereo - ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS); - - LocalClock lc; - pthread_once(&sOnceControl, &sInitRoutine); mState.enabledTracks= 0; @@ -133,30 +406,10 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) { t->resampler = NULL; t->downmixerBufferProvider = NULL; + t->mReformatBufferProvider = NULL; t++; } - // find multichannel downmix effect if we have to play multichannel content - uint32_t numEffects = 0; - int ret = EffectQueryNumberEffects(&numEffects); - if (ret != 0) { - ALOGE("AudioMixer() error %d querying number of effects", ret); - return; - } - ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); - - for (uint32_t i = 0 ; i < numEffects ; i++) { - if (EffectQueryEffect(i, &dwnmFxDesc) == 0) { - ALOGV("effect %d is called %s", i, dwnmFxDesc.name); - if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { - ALOGI("found effect \"%s\" from %s", - dwnmFxDesc.name, dwnmFxDesc.implementor); - isMultichannelCapable = true; - break; - } - } - } - ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect"); } AudioMixer::~AudioMixer() @@ -165,6 +418,7 @@ AudioMixer::~AudioMixer() for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) { delete t->resampler; delete t->downmixerBufferProvider; + delete t->mReformatBufferProvider; t++; } delete [] mState.outputTemp; @@ -176,32 +430,52 @@ void AudioMixer::setLog(NBLog::Writer *log) mState.mLog = log; } -int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) +int AudioMixer::getTrackName(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId) { + if (!isValidPcmTrackFormat(format)) { + ALOGE("AudioMixer::getTrackName invalid format (%#x)", format); + return -1; + } uint32_t names = (~mTrackNames) & mConfiguredNames; if (names != 0) { int n = __builtin_ctz(names); ALOGV("add track (%d)", n); - mTrackNames |= 1 << n; // assume default parameters for the track, except where noted below track_t* t = &mState.tracks[n]; t->needs = 0; - t->volume[0] = UNITY_GAIN; - t->volume[1] = UNITY_GAIN; - // no initialization needed - // t->prevVolume[0] - // t->prevVolume[1] + + // Integer volume. + // Currently integer volume is kept for the legacy integer mixer. + // Will be removed when the legacy mixer path is removed. + t->volume[0] = UNITY_GAIN_INT; + t->volume[1] = UNITY_GAIN_INT; + t->prevVolume[0] = UNITY_GAIN_INT << 16; + t->prevVolume[1] = UNITY_GAIN_INT << 16; t->volumeInc[0] = 0; t->volumeInc[1] = 0; t->auxLevel = 0; t->auxInc = 0; + t->prevAuxLevel = 0; + + // Floating point volume. + t->mVolume[0] = UNITY_GAIN_FLOAT; + t->mVolume[1] = UNITY_GAIN_FLOAT; + t->mPrevVolume[0] = UNITY_GAIN_FLOAT; + t->mPrevVolume[1] = UNITY_GAIN_FLOAT; + t->mVolumeInc[0] = 0.; + t->mVolumeInc[1] = 0.; + t->mAuxLevel = 0.; + t->mAuxInc = 0.; + t->mPrevAuxLevel = 0.; + // no initialization needed - // t->prevAuxLevel // t->frameCount - t->channelCount = 2; + t->channelCount = audio_channel_count_from_out_mask(channelMask); t->enabled = false; - t->format = 16; - t->channelMask = AUDIO_CHANNEL_OUT_STEREO; + ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO, + "Non-stereo channel mask: %d\n", channelMask); + t->channelMask = channelMask; t->sessionId = sessionId; // setBufferProvider(name, AudioBufferProvider *) is required before enable(name) t->bufferProvider = NULL; @@ -215,52 +489,116 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name) t->mainBuffer = NULL; t->auxBuffer = NULL; + t->mInputBufferProvider = NULL; + t->mReformatBufferProvider = NULL; t->downmixerBufferProvider = NULL; - - status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask); - if (status == OK) { - return TRACK0 + n; + t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT; + t->mFormat = format; + t->mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits( + AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO); + t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask); + // Check the downmixing (or upmixing) requirements. + status_t status = initTrackDownmix(t, n); + if (status != OK) { + ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask); + return -1; } - ALOGE("AudioMixer::getTrackName(0x%x) failed, error preparing track for downmix", - channelMask); + // initTrackDownmix() may change the input format requirement. + // If you desire floating point input to the mixer, it may change + // to integer because the downmixer requires integer to process. + ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat); + prepareTrackForReformat(t, n); + mTrackNames |= 1 << n; + return TRACK0 + n; } + ALOGE("AudioMixer::getTrackName out of available tracks"); return -1; } void AudioMixer::invalidateState(uint32_t mask) { - if (mask) { + if (mask != 0) { mState.needsChanged |= mask; mState.hook = process__validate; } } -status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask) +// Called when channel masks have changed for a track name +// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format, +// which will simplify this logic. +bool AudioMixer::setChannelMasks(int name, + audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) { + track_t &track = mState.tracks[name]; + + if (trackChannelMask == track.channelMask + && mixerChannelMask == track.mMixerChannelMask) { + return false; // no need to change + } + // always recompute for both channel masks even if only one has changed. + const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask); + const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask); + const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount; + + ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) + && trackChannelCount + && mixerChannelCount); + track.channelMask = trackChannelMask; + track.channelCount = trackChannelCount; + track.mMixerChannelMask = mixerChannelMask; + track.mMixerChannelCount = mixerChannelCount; + + // channel masks have changed, does this track need a downmixer? + // update to try using our desired format (if we aren't already using it) + const audio_format_t prevMixerInFormat = track.mMixerInFormat; + track.mMixerInFormat = kUseFloat && kUseNewMixer + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + const status_t status = initTrackDownmix(&mState.tracks[name], name); + ALOGE_IF(status != OK, + "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x", + status, track.channelMask, track.mMixerChannelMask); + + const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat; + if (mixerInFormatChanged) { + prepareTrackForReformat(&track, name); // because of downmixer, track format may change! + } + + if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) { + // resampler input format or channels may have changed. + const uint32_t resetToSampleRate = track.sampleRate; + delete track.resampler; + track.resampler = NULL; + track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate. + // recreate the resampler with updated format, channels, saved sampleRate. + track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/); + } + return true; +} + +status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName) { - uint32_t channelCount = popcount(mask); - ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount); - status_t status = OK; - if (channelCount > MAX_NUM_CHANNELS) { - pTrack->channelMask = mask; - pTrack->channelCount = channelCount; - ALOGV("initTrackDownmix(track=%d, mask=0x%x) calls prepareTrackForDownmix()", - trackNum, mask); - status = prepareTrackForDownmix(pTrack, trackNum); - } else { - unprepareTrackForDownmix(pTrack, trackNum); + // Only remix (upmix or downmix) if the track and mixer/device channel masks + // are not the same and not handled internally, as mono -> stereo currently is. + if (pTrack->channelMask != pTrack->mMixerChannelMask + && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO + && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) { + return prepareTrackForDownmix(pTrack, trackName); } - return status; + // no remix necessary + unprepareTrackForDownmix(pTrack, trackName); + return NO_ERROR; } -void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName) { +void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) { ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName); if (pTrack->downmixerBufferProvider != NULL) { // this track had previously been configured with a downmixer, delete it ALOGV(" deleting old downmixer"); - pTrack->bufferProvider = pTrack->downmixerBufferProvider->mTrackBufferProvider; delete pTrack->downmixerBufferProvider; pTrack->downmixerBufferProvider = NULL; + reconfigureBufferProviders(pTrack); } else { ALOGV(" nothing to do, no downmixer to delete"); } @@ -272,101 +610,66 @@ status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName) // discard the previous downmixer if there was one unprepareTrackForDownmix(pTrack, trackName); - - DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(); - int32_t status; - - if (!isMultichannelCapable) { - ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content", - trackName); - goto noDownmixForActiveTrack; - } - - if (EffectCreate(&dwnmFxDesc.uuid, - pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/, - &pDbp->mDownmixHandle/*pHandle*/) != 0) { - ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName); - goto noDownmixForActiveTrack; - } - - // channel input configuration will be overridden per-track - pDbp->mDownmixConfig.inputCfg.channels = pTrack->channelMask; - pDbp->mDownmixConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO; - pDbp->mDownmixConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT; - pDbp->mDownmixConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; - pDbp->mDownmixConfig.inputCfg.samplingRate = pTrack->sampleRate; - pDbp->mDownmixConfig.outputCfg.samplingRate = pTrack->sampleRate; - pDbp->mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; - pDbp->mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE; - // input and output buffer provider, and frame count will not be used as the downmix effect - // process() function is called directly (see DownmixerBufferProvider::getNextBuffer()) - pDbp->mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | - EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE; - pDbp->mDownmixConfig.outputCfg.mask = pDbp->mDownmixConfig.inputCfg.mask; - - {// scope for local variables that are not used in goto label "noDownmixForActiveTrack" - int cmdStatus; - uint32_t replySize = sizeof(int); - - // Configure and enable downmixer - status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle, - EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/, - &pDbp->mDownmixConfig /*pCmdData*/, - &replySize /*replySize*/, &cmdStatus /*pReplyData*/); - if ((status != 0) || (cmdStatus != 0)) { - ALOGE("error %d while configuring downmixer for track %d", status, trackName); - goto noDownmixForActiveTrack; - } - replySize = sizeof(int); - status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle, - EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/, - &replySize /*replySize*/, &cmdStatus /*pReplyData*/); - if ((status != 0) || (cmdStatus != 0)) { - ALOGE("error %d while enabling downmixer for track %d", status, trackName); - goto noDownmixForActiveTrack; + if (DownmixerBufferProvider::isMultichannelCapable()) { + DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask, + pTrack->mMixerChannelMask, + AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */, + pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount); + + if (pDbp->isValid()) { // if constructor completed properly + pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix + pTrack->downmixerBufferProvider = pDbp; + reconfigureBufferProviders(pTrack); + return NO_ERROR; } + delete pDbp; + } - // Set downmix type - // parameter size rounded for padding on 32bit boundary - const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int); - const int downmixParamSize = - sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t); - effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize); - param->psize = sizeof(downmix_params_t); - const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE; - memcpy(param->data, &downmixParam, param->psize); - const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD; - param->vsize = sizeof(downmix_type_t); - memcpy(param->data + psizePadded, &downmixType, param->vsize); - - status = (*pDbp->mDownmixHandle)->command(pDbp->mDownmixHandle, - EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize/* cmdSize */, - param /*pCmndData*/, &replySize /*replySize*/, &cmdStatus /*pReplyData*/); - - free(param); - - if ((status != 0) || (cmdStatus != 0)) { - ALOGE("error %d while setting downmix type for track %d", status, trackName); - goto noDownmixForActiveTrack; - } else { - ALOGV("downmix type set to %d for track %d", (int) downmixType, trackName); - } - }// end of scope for local variables that are not used in goto label "noDownmixForActiveTrack" + // Effect downmixer does not accept the channel conversion. Let's use our remixer. + RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask, + pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount); + // Remix always finds a conversion whereas Downmixer effect above may fail. + pTrack->downmixerBufferProvider = pRbp; + reconfigureBufferProviders(pTrack); + return NO_ERROR; +} - // initialization successful: - // - keep track of the real buffer provider in case it was set before - pDbp->mTrackBufferProvider = pTrack->bufferProvider; - // - we'll use the downmix effect integrated inside this - // track's buffer provider, and we'll use it as the track's buffer provider - pTrack->downmixerBufferProvider = pDbp; - pTrack->bufferProvider = pDbp; +void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) { + ALOGV("AudioMixer::unprepareTrackForReformat(%d)", trackName); + if (pTrack->mReformatBufferProvider != NULL) { + delete pTrack->mReformatBufferProvider; + pTrack->mReformatBufferProvider = NULL; + reconfigureBufferProviders(pTrack); + } +} +status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName) +{ + ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat); + // discard the previous reformatter if there was one + unprepareTrackForReformat(pTrack, trackName); + // only configure reformatter if needed + if (pTrack->mFormat != pTrack->mMixerInFormat) { + pTrack->mReformatBufferProvider = new ReformatBufferProvider( + audio_channel_count_from_out_mask(pTrack->channelMask), + pTrack->mFormat, pTrack->mMixerInFormat, + kCopyBufferFrameCount); + reconfigureBufferProviders(pTrack); + } return NO_ERROR; +} -noDownmixForActiveTrack: - delete pDbp; - pTrack->downmixerBufferProvider = NULL; - return NO_INIT; +void AudioMixer::reconfigureBufferProviders(track_t* pTrack) +{ + pTrack->bufferProvider = pTrack->mInputBufferProvider; + if (pTrack->mReformatBufferProvider) { + pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider); + pTrack->bufferProvider = pTrack->mReformatBufferProvider; + } + if (pTrack->downmixerBufferProvider) { + pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider); + pTrack->bufferProvider = pTrack->downmixerBufferProvider; + } } void AudioMixer::deleteTrackName(int name) @@ -385,6 +688,8 @@ void AudioMixer::deleteTrackName(int name) track.resampler = NULL; // delete the downmixer unprepareTrackForDownmix(&mState.tracks[name], name); + // delete the reformatter + unprepareTrackForReformat(&mState.tracks[name], name); mTrackNames &= ~(1<<name); } @@ -415,6 +720,73 @@ void AudioMixer::disable(int name) } } +/* Sets the volume ramp variables for the AudioMixer. + * + * The volume ramp variables are used to transition from the previous + * volume to the set volume. ramp controls the duration of the transition. + * Its value is typically one state framecount period, but may also be 0, + * meaning "immediate." + * + * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment + * even if there is a nonzero floating point increment (in that case, the volume + * change is immediate). This restriction should be changed when the legacy mixer + * is removed (see #2). + * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed + * when no longer needed. + * + * @param newVolume set volume target in floating point [0.0, 1.0]. + * @param ramp number of frames to increment over. if ramp is 0, the volume + * should be set immediately. Currently ramp should not exceed 65535 (frames). + * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return. + * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return. + * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return. + * @param pSetVolume pointer to the float target volume, set on return. + * @param pPrevVolume pointer to the float previous volume, set on return. + * @param pVolumeInc pointer to the float increment per output audio frame, set on return. + * @return true if the volume has changed, false if volume is same. + */ +static inline bool setVolumeRampVariables(float newVolume, int32_t ramp, + int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc, + float *pSetVolume, float *pPrevVolume, float *pVolumeInc) { + if (newVolume == *pSetVolume) { + return false; + } + /* set the floating point volume variables */ + if (ramp != 0) { + *pVolumeInc = (newVolume - *pSetVolume) / ramp; + *pPrevVolume = *pSetVolume; + } else { + *pVolumeInc = 0; + *pPrevVolume = newVolume; + } + *pSetVolume = newVolume; + + /* set the legacy integer volume variables */ + int32_t intVolume = newVolume * AudioMixer::UNITY_GAIN_INT; + if (intVolume > AudioMixer::UNITY_GAIN_INT) { + intVolume = AudioMixer::UNITY_GAIN_INT; + } else if (intVolume < 0) { + ALOGE("negative volume %.7g", newVolume); + intVolume = 0; // should never happen, but for safety check. + } + if (intVolume == *pIntSetVolume) { + *pIntVolumeInc = 0; + /* TODO: integer/float workaround: ignore floating volume ramp */ + *pVolumeInc = 0; + *pPrevVolume = newVolume; + return true; + } + if (ramp != 0) { + *pIntVolumeInc = ((intVolume - *pIntSetVolume) << 16) / ramp; + *pIntPrevVolume = (*pIntVolumeInc == 0 ? intVolume : *pIntSetVolume) << 16; + } else { + *pIntVolumeInc = 0; + *pIntPrevVolume = intVolume << 16; + } + *pIntSetVolume = intVolume; + return true; +} + void AudioMixer::setParameter(int name, int target, int param, void *value) { name -= TRACK0; @@ -429,16 +801,10 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) case TRACK: switch (param) { case CHANNEL_MASK: { - audio_channel_mask_t mask = - static_cast<audio_channel_mask_t>(reinterpret_cast<uintptr_t>(value)); - if (track.channelMask != mask) { - uint32_t channelCount = popcount(mask); - ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX) && channelCount); - track.channelMask = mask; - track.channelCount = channelCount; - // the mask has changed, does this track need a downmixer? - initTrackDownmix(&mState.tracks[name], name, mask); - ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask); + const audio_channel_mask_t trackChannelMask = + static_cast<audio_channel_mask_t>(valueInt); + if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) { + ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask); invalidateState(1 << name); } } break; @@ -456,15 +822,37 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) invalidateState(1 << name); } break; - case FORMAT: - ALOG_ASSERT(valueInt == AUDIO_FORMAT_PCM_16_BIT); - break; + case FORMAT: { + audio_format_t format = static_cast<audio_format_t>(valueInt); + if (track.mFormat != format) { + ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format); + track.mFormat = format; + ALOGV("setParameter(TRACK, FORMAT, %#x)", format); + prepareTrackForReformat(&track, name); + invalidateState(1 << name); + } + } break; // FIXME do we want to support setting the downmix type from AudioFlinger? // for a specific track? or per mixer? /* case DOWNMIX_TYPE: break */ + case MIXER_FORMAT: { + audio_format_t format = static_cast<audio_format_t>(valueInt); + if (track.mMixerFormat != format) { + track.mMixerFormat = format; + ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format); + } + } break; + case MIXER_CHANNEL_MASK: { + const audio_channel_mask_t mixerChannelMask = + static_cast<audio_channel_mask_t>(valueInt); + if (setChannelMasks(name, track.channelMask, mixerChannelMask)) { + ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask); + invalidateState(1 << name); + } + } break; default: - LOG_FATAL("bad param"); + LOG_ALWAYS_FATAL("setParameter track: bad param %d", param); } break; @@ -489,85 +877,77 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) invalidateState(1 << name); break; default: - LOG_FATAL("bad param"); + LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param); } break; case RAMP_VOLUME: case VOLUME: switch (param) { - case VOLUME0: - case VOLUME1: - if (track.volume[param-VOLUME0] != valueInt) { - ALOGV("setParameter(VOLUME, VOLUME0/1: %04x)", valueInt); - track.prevVolume[param-VOLUME0] = track.volume[param-VOLUME0] << 16; - track.volume[param-VOLUME0] = valueInt; - if (target == VOLUME) { - track.prevVolume[param-VOLUME0] = valueInt << 16; - track.volumeInc[param-VOLUME0] = 0; - } else { - int32_t d = (valueInt<<16) - track.prevVolume[param-VOLUME0]; - int32_t volInc = d / int32_t(mState.frameCount); - track.volumeInc[param-VOLUME0] = volInc; - if (volInc == 0) { - track.prevVolume[param-VOLUME0] = valueInt << 16; - } - } - invalidateState(1 << name); - } - break; case AUXLEVEL: - //ALOG_ASSERT(0 <= valueInt && valueInt <= MAX_GAIN_INT, "bad aux level %d", valueInt); - if (track.auxLevel != valueInt) { - ALOGV("setParameter(VOLUME, AUXLEVEL: %04x)", valueInt); - track.prevAuxLevel = track.auxLevel << 16; - track.auxLevel = valueInt; - if (target == VOLUME) { - track.prevAuxLevel = valueInt << 16; - track.auxInc = 0; - } else { - int32_t d = (valueInt<<16) - track.prevAuxLevel; - int32_t volInc = d / int32_t(mState.frameCount); - track.auxInc = volInc; - if (volInc == 0) { - track.prevAuxLevel = valueInt << 16; - } - } + if (setVolumeRampVariables(*reinterpret_cast<float*>(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + &track.auxLevel, &track.prevAuxLevel, &track.auxInc, + &track.mAuxLevel, &track.mPrevAuxLevel, &track.mAuxInc)) { + ALOGV("setParameter(%s, AUXLEVEL: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel); invalidateState(1 << name); } break; default: - LOG_FATAL("bad param"); + if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) { + if (setVolumeRampVariables(*reinterpret_cast<float*>(value), + target == RAMP_VOLUME ? mState.frameCount : 0, + &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0], + &track.volumeInc[param - VOLUME0], + &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0], + &track.mVolumeInc[param - VOLUME0])) { + ALOGV("setParameter(%s, VOLUME%d: %04x)", + target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0, + track.volume[param - VOLUME0]); + invalidateState(1 << name); + } + } else { + LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param); + } } break; default: - LOG_FATAL("bad target"); + LOG_ALWAYS_FATAL("setParameter: bad target %d", target); } } -bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) +bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate) { - if (value != devSampleRate || resampler != NULL) { - if (sampleRate != value) { - sampleRate = value; + if (trackSampleRate != devSampleRate || resampler != NULL) { + if (sampleRate != trackSampleRate) { + sampleRate = trackSampleRate; if (resampler == NULL) { - ALOGV("creating resampler from track %d Hz to device %d Hz", value, devSampleRate); + ALOGV("Creating resampler from track %d Hz to device %d Hz", + trackSampleRate, devSampleRate); AudioResampler::src_quality quality; // force lowest quality level resampler if use case isn't music or video // FIXME this is flawed for dynamic sample rates, as we choose the resampler // quality level based on the initial ratio, but that could change later. // Should have a way to distinguish tracks with static ratios vs. dynamic ratios. - if (!((value == 44100 && devSampleRate == 48000) || - (value == 48000 && devSampleRate == 44100))) { - quality = AudioResampler::LOW_QUALITY; + if (!((trackSampleRate == 44100 && devSampleRate == 48000) || + (trackSampleRate == 48000 && devSampleRate == 44100))) { + quality = AudioResampler::DYN_LOW_QUALITY; } else { quality = AudioResampler::DEFAULT_QUALITY; } + + // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer + // but if none exists, it is the channel count (1 for mono). + const int resamplerChannelCount = downmixerBufferProvider != NULL + ? mMixerChannelCount : channelCount; + ALOGVV("Creating resampler:" + " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n", + mMixerInFormat, resamplerChannelCount, devSampleRate, quality); resampler = AudioResampler::create( - format, - // the resampler sees the number of channels after the downmixer, if any - downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount, + mMixerInFormat, + resamplerChannelCount, devSampleRate, quality); resampler->setLocalTimeFreq(sLocalTimeFreq); } @@ -577,21 +957,57 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) return false; } -inline -void AudioMixer::track_t::adjustVolumeRamp(bool aux) +/* Checks to see if the volume ramp has completed and clears the increment + * variables appropriately. + * + * FIXME: There is code to handle int/float ramp variable switchover should it not + * complete within a mixer buffer processing call, but it is preferred to avoid switchover + * due to precision issues. The switchover code is included for legacy code purposes + * and can be removed once the integer volume is removed. + * + * It is not sufficient to clear only the volumeInc integer variable because + * if one channel requires ramping, all channels are ramped. + * + * There is a bit of duplicated code here, but it keeps backward compatibility. + */ +inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat) { - for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) { - if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) || - ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) { - volumeInc[i] = 0; - prevVolume[i] = volume[i]<<16; + if (useFloat) { + for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) { + if (mVolumeInc[i] != 0 && fabs(mVolume[i] - mPrevVolume[i]) <= fabs(mVolumeInc[i])) { + volumeInc[i] = 0; + prevVolume[i] = volume[i] << 16; + mVolumeInc[i] = 0.; + mPrevVolume[i] = mVolume[i]; + } else { + //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]); + prevVolume[i] = u4_28_from_float(mPrevVolume[i]); + } + } + } else { + for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) { + if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) || + ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) { + volumeInc[i] = 0; + prevVolume[i] = volume[i] << 16; + mVolumeInc[i] = 0.; + mPrevVolume[i] = mVolume[i]; + } else { + //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]); + mPrevVolume[i] = float_from_u4_28(prevVolume[i]); + } } } + /* TODO: aux is always integer regardless of output buffer type */ if (aux) { if (((auxInc>0) && (((prevAuxLevel+auxInc)>>16) >= auxLevel)) || - ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) { + ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) { auxInc = 0; - prevAuxLevel = auxLevel<<16; + prevAuxLevel = auxLevel << 16; + mAuxInc = 0.; + mPrevAuxLevel = mAuxLevel; + } else { + //ALOGV("aux ramp: %d %d %d", auxLevel << 16, prevAuxLevel, auxInc); } } } @@ -610,21 +1026,16 @@ void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider name -= TRACK0; ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name); - if (mState.tracks[name].downmixerBufferProvider != NULL) { - // update required? - if (mState.tracks[name].downmixerBufferProvider->mTrackBufferProvider != bufferProvider) { - ALOGV("AudioMixer::setBufferProvider(%p) for downmix", bufferProvider); - // setting the buffer provider for a track that gets downmixed consists in: - // 1/ setting the buffer provider to the "downmix / buffer provider" wrapper - // so it's the one that gets called when the buffer provider is needed, - mState.tracks[name].bufferProvider = mState.tracks[name].downmixerBufferProvider; - // 2/ saving the buffer provider for the track so the wrapper can use it - // when it downmixes. - mState.tracks[name].downmixerBufferProvider->mTrackBufferProvider = bufferProvider; - } - } else { - mState.tracks[name].bufferProvider = bufferProvider; + if (mState.tracks[name].mInputBufferProvider == bufferProvider) { + return; // don't reset any buffer providers if identical. + } + if (mState.tracks[name].mReformatBufferProvider != NULL) { + mState.tracks[name].mReformatBufferProvider->reset(); + } else if (mState.tracks[name].downmixerBufferProvider != NULL) { } + + mState.tracks[name].mInputBufferProvider = bufferProvider; + reconfigureBufferProviders(&mState.tracks[name]); } @@ -657,6 +1068,9 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // compute everything we need... int countActiveTracks = 0; + // TODO: fix all16BitsStereNoResample logic to + // either properly handle muted tracks (it should ignore them) + // or remove altogether as an obsolete optimization. bool all16BitsStereoNoResample = true; bool resampling = false; bool volumeRamp = false; @@ -668,39 +1082,47 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) countActiveTracks++; track_t& t = state->tracks[i]; uint32_t n = 0; + // FIXME can overflow (mask is only 3 bits) n |= NEEDS_CHANNEL_1 + t.channelCount - 1; - n |= NEEDS_FORMAT_16; - n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED; + if (t.doesResample()) { + n |= NEEDS_RESAMPLE; + } if (t.auxLevel != 0 && t.auxBuffer != NULL) { - n |= NEEDS_AUX_ENABLED; + n |= NEEDS_AUX; } if (t.volumeInc[0]|t.volumeInc[1]) { volumeRamp = true; } else if (!t.doesResample() && t.volumeRL == 0) { - n |= NEEDS_MUTE_ENABLED; + n |= NEEDS_MUTE; } t.needs = n; - if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) { + if (n & NEEDS_MUTE) { t.hook = track__nop; } else { - if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) { + if (n & NEEDS_AUX) { all16BitsStereoNoResample = false; } - if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (n & NEEDS_RESAMPLE) { all16BitsStereoNoResample = false; resampling = true; - t.hook = track__genericResample; + t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, "Track %d needs downmix + resample", i); } else { if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){ - t.hook = track__16BitsMono; + t.hook = getTrackHook( + t.mMixerChannelCount == 2 // TODO: MONO_HACK. + ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE, + t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); all16BitsStereoNoResample = false; } if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){ - t.hook = track__16BitsStereo; + t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount, + t.mMixerInFormat, t.mMixerFormat); ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2, "Track %d needs downmix", i); } @@ -710,7 +1132,7 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // select the processing hooks state->hook = process__nop; - if (countActiveTracks) { + if (countActiveTracks > 0) { if (resampling) { if (!state->outputTemp) { state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount]; @@ -731,7 +1153,17 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) state->hook = process__genericNoResampling; if (all16BitsStereoNoResample && !volumeRamp) { if (countActiveTracks == 1) { - state->hook = process__OneTrack16BitsStereoNoResampling; + const int i = 31 - __builtin_clz(state->enabledTracks); + track_t& t = state->tracks[i]; + if ((t.needs & NEEDS_MUTE) == 0) { + // The check prevents a muted track from acquiring a process hook. + // + // This is dangerous if the track is MONO as that requires + // special case handling due to implicit channel duplication. + // Stereo or Multichannel should actually be fine here. + state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, + t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat); + } } } } @@ -746,16 +1178,15 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // Now that the volume ramp has been done, set optimal state and // track hooks for subsequent mixer process - if (countActiveTracks) { + if (countActiveTracks > 0) { bool allMuted = true; uint32_t en = state->enabledTracks; while (en) { const int i = 31 - __builtin_clz(en); en &= ~(1<<i); track_t& t = state->tracks[i]; - if (!t.doesResample() && t.volumeRL == 0) - { - t.needs |= NEEDS_MUTE_ENABLED; + if (!t.doesResample() && t.volumeRL == 0) { + t.needs |= NEEDS_MUTE; t.hook = track__nop; } else { allMuted = false; @@ -765,7 +1196,11 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) state->hook = process__nop; } else if (all16BitsStereoNoResample) { if (countActiveTracks == 1) { - state->hook = process__OneTrack16BitsStereoNoResampling; + const int i = 31 - __builtin_clz(state->enabledTracks); + track_t& t = state->tracks[i]; + // Muted single tracks handled by allMuted above. + state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK, + t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat); } } } @@ -775,15 +1210,15 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux) { + ALOGVV("track__genericResample\n"); t->resampler->setSampleRate(t->sampleRate); // ramp gain - resample to temp buffer and scale/mix in 2nd step if (aux != NULL) { // always resample with unity gain when sending to auxiliary buffer to be able // to apply send level after resampling - // TODO: modify each resampler to support aux channel? - t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN); - memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t)); + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); + memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t)); t->resampler->resample(temp, outFrameCount, t->bufferProvider); if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) { volumeRampStereo(t, out, outFrameCount, temp, aux); @@ -792,7 +1227,7 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram } } else { if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) { - t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN); + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t)); t->resampler->resample(temp, outFrameCount, t->bufferProvider); volumeRampStereo(t, out, outFrameCount, temp, aux); @@ -800,14 +1235,14 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram // constant gain else { - t->resampler->setVolume(t->volume[0], t->volume[1]); + t->resampler->setVolume(t->mVolume[0], t->mVolume[1]); t->resampler->resample(out, outFrameCount, t->bufferProvider); } } } -void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused, + size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused) { } @@ -883,9 +1318,10 @@ void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32 } } -void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) { + ALOGVV("track__16BitsStereo\n"); const int16_t *in = static_cast<const int16_t *>(t->in); if (CC_UNLIKELY(aux != NULL)) { @@ -974,9 +1410,10 @@ void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount t->in = in; } -void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) { + ALOGVV("track__16BitsMono\n"); const int16_t *in = static_cast<int16_t const *>(t->in); if (CC_UNLIKELY(aux != NULL)) { @@ -1064,8 +1501,8 @@ void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, // no-op case void AudioMixer::process__nop(state_t* state, int64_t pts) { + ALOGVV("process__nop\n"); uint32_t e0 = state->enabledTracks; - size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS; while (e0) { // process by group of tracks with same output buffer to // avoid multiple memset() on same buffer @@ -1084,7 +1521,8 @@ void AudioMixer::process__nop(state_t* state, int64_t pts) } e0 &= ~(e1); - memset(t1.mainBuffer, 0, bufSize); + memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount + * audio_bytes_per_sample(t1.mMixerFormat)); } while (e1) { @@ -1110,6 +1548,7 @@ void AudioMixer::process__nop(state_t* state, int64_t pts) // generic code without resampling void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) { + ALOGVV("process__genericNoResampling\n"); int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32))); // acquire each track's buffer @@ -1154,7 +1593,7 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) track_t& t = state->tracks[i]; size_t outFrames = BLOCKSIZE; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer + numFrames; } while (outFrames) { @@ -1166,9 +1605,9 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) break; } size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount; - if (inFrames) { - t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, - state->resampleTemp, aux); + if (inFrames > 0) { + t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount, + inFrames, state->resampleTemp, aux); t.frameCount -= inFrames; outFrames -= inFrames; if (CC_UNLIKELY(aux != NULL)) { @@ -1192,8 +1631,13 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) } } } - ditherAndClamp(out, outTemp, BLOCKSIZE); - out += BLOCKSIZE; + + convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat, + BLOCKSIZE * t1.mMixerChannelCount); + // TODO: fix ugly casting due to choice of out pointer type + out = reinterpret_cast<int32_t*>((uint8_t*)out + + BLOCKSIZE * t1.mMixerChannelCount + * audio_bytes_per_sample(t1.mMixerFormat)); numFrames += BLOCKSIZE; } while (numFrames < state->frameCount); } @@ -1212,10 +1656,9 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) // generic code with resampling void AudioMixer::process__genericResampling(state_t* state, int64_t pts) { + ALOGVV("process__genericResampling\n"); // this const just means that local variable outTemp doesn't change int32_t* const outTemp = state->outputTemp; - const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount; - size_t numFrames = state->frameCount; uint32_t e0 = state->enabledTracks; @@ -1236,20 +1679,20 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) } e0 &= ~(e1); int32_t *out = t1.mainBuffer; - memset(outTemp, 0, size); + memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount); while (e1) { const int i = 31 - __builtin_clz(e1); e1 &= ~(1<<i); track_t& t = state->tracks[i]; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer; } // this is a little goofy, on the resampling case we don't // acquire/release the buffers because it's done by // the resampler. - if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (t.needs & NEEDS_RESAMPLE) { t.resampler->setPTS(pts); t.hook(&t, outTemp, numFrames, state->resampleTemp, aux); } else { @@ -1268,14 +1711,15 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) if (CC_UNLIKELY(aux != NULL)) { aux += outFrames; } - t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, + t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount, state->resampleTemp, aux); outFrames += t.buffer.frameCount; t.bufferProvider->releaseBuffer(&t.buffer); } } } - ditherAndClamp(out, outTemp, numFrames); + convertMixerFormat(out, t1.mMixerFormat, + outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount); } } @@ -1283,6 +1727,7 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, int64_t pts) { + ALOGVV("process__OneTrack16BitsStereoNoResampling\n"); // This method is only called when state->enabledTracks has exactly // one bit set. The asserts below would verify this, but are commented out // since the whole point of this method is to optimize performance. @@ -1294,6 +1739,7 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, AudioBufferProvider::Buffer& b(t.buffer); int32_t* out = t.mainBuffer; + float *fout = reinterpret_cast<float*>(out); size_t numFrames = state->frameCount; const int16_t vl = t.volume[0]; @@ -1307,161 +1753,486 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, // in == NULL can happen if the track was flushed just after having // been enabled for mixing. - if (in == NULL || ((unsigned long)in & 3)) { - memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t)); - ALOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: " - "buffer %p track %d, channels %d, needs %08x", - in, i, t.channelCount, t.needs); + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), + "process__OneTrack16BitsStereoNoResampling: misaligned buffer" + " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f", + in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]); return; } size_t outFrames = b.frameCount; - if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) { - // volume is boosted, so we might need to clamp even though - // we process only one track. - do { - uint32_t rl = *reinterpret_cast<const uint32_t *>(in); - in += 2; - int32_t l = mulRL(1, rl, vrl) >> 12; - int32_t r = mulRL(0, rl, vrl) >> 12; - // clamping... - l = clamp16(l); - r = clamp16(r); - *out++ = (r<<16) | (l & 0xFFFF); - } while (--outFrames); - } else { + switch (t.mMixerFormat) { + case AUDIO_FORMAT_PCM_FLOAT: do { uint32_t rl = *reinterpret_cast<const uint32_t *>(in); in += 2; - int32_t l = mulRL(1, rl, vrl) >> 12; - int32_t r = mulRL(0, rl, vrl) >> 12; - *out++ = (r<<16) | (l & 0xFFFF); + int32_t l = mulRL(1, rl, vrl); + int32_t r = mulRL(0, rl, vrl); + *fout++ = float_from_q4_27(l); + *fout++ = float_from_q4_27(r); + // Note: In case of later int16_t sink output, + // conversion and clamping is done by memcpy_to_i16_from_float(). } while (--outFrames); + break; + case AUDIO_FORMAT_PCM_16_BIT: + if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) { + // volume is boosted, so we might need to clamp even though + // we process only one track. + do { + uint32_t rl = *reinterpret_cast<const uint32_t *>(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + // clamping... + l = clamp16(l); + r = clamp16(r); + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } else { + do { + uint32_t rl = *reinterpret_cast<const uint32_t *>(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } + break; + default: + LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat); } numFrames -= b.frameCount; t.bufferProvider->releaseBuffer(&b); } } -#if 0 -// 2 tracks is also a common case -// NEVER used in current implementation of process__validate() -// only use if the 2 tracks have the same output buffer -void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state, - int64_t pts) +int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, + int outputFrameIndex) { - int i; - uint32_t en = state->enabledTracks; - - i = 31 - __builtin_clz(en); - const track_t& t0 = state->tracks[i]; - AudioBufferProvider::Buffer& b0(t0.buffer); + if (AudioBufferProvider::kInvalidPTS == basePTS) { + return AudioBufferProvider::kInvalidPTS; + } - en &= ~(1<<i); - i = 31 - __builtin_clz(en); - const track_t& t1 = state->tracks[i]; - AudioBufferProvider::Buffer& b1(t1.buffer); + return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); +} - const int16_t *in0; - const int16_t vl0 = t0.volume[0]; - const int16_t vr0 = t0.volume[1]; - size_t frameCount0 = 0; +/*static*/ uint64_t AudioMixer::sLocalTimeFreq; +/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT; - const int16_t *in1; - const int16_t vl1 = t1.volume[0]; - const int16_t vr1 = t1.volume[1]; - size_t frameCount1 = 0; +/*static*/ void AudioMixer::sInitRoutine() +{ + LocalClock lc; + sLocalTimeFreq = lc.getLocalFreq(); // for the resampler - //FIXME: only works if two tracks use same buffer - int32_t* out = t0.mainBuffer; - size_t numFrames = state->frameCount; - const int16_t *buff = NULL; + DownmixerBufferProvider::init(); // for the downmixer +} +/* TODO: consider whether this level of optimization is necessary. + * Perhaps just stick with a single for loop. + */ + +// Needs to derive a compile time constant (constexpr). Could be targeted to go +// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication. +#define MIXTYPE_MONOVOL(mixtype) (mixtype == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \ + mixtype == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : mixtype) + +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, + typename TO, typename TI, typename TV, typename TA, typename TAV> +static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount, + const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc) +{ + switch (channels) { + case 1: + volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 2: + volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 3: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 4: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 5: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 6: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 7: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + case 8: + volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, + frameCount, in, aux, vol, volinc, vola, volainc); + break; + } +} - while (numFrames) { +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, + typename TO, typename TI, typename TV, typename TA, typename TAV> +static void volumeMulti(uint32_t channels, TO* out, size_t frameCount, + const TI* in, TA* aux, const TV *vol, TAV vola) +{ + switch (channels) { + case 1: + volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola); + break; + case 2: + volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola); + break; + case 3: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola); + break; + case 4: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola); + break; + case 5: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola); + break; + case 6: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola); + break; + case 7: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola); + break; + case 8: + volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola); + break; + } +} - if (frameCount0 == 0) { - b0.frameCount = numFrames; - int64_t outputPTS = calculateOutputPTS(t0, pts, - out - t0.mainBuffer); - t0.bufferProvider->getNextBuffer(&b0, outputPTS); - if (b0.i16 == NULL) { - if (buff == NULL) { - buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount]; - } - in0 = buff; - b0.frameCount = numFrames; - } else { - in0 = b0.i16; +/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * USEFLOATVOL (set to true if float volume is used) + * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL, + typename TO, typename TI, typename TA> +void AudioMixer::volumeMix(TO *out, size_t outFrames, + const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t) +{ + if (USEFLOATVOL) { + if (ramp) { + volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux, + t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc); + if (ADJUSTVOL) { + t->adjustVolumeRamp(aux != NULL, true); } - frameCount0 = b0.frameCount; + } else { + volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux, + t->mVolume, t->auxLevel); } - if (frameCount1 == 0) { - b1.frameCount = numFrames; - int64_t outputPTS = calculateOutputPTS(t1, pts, - out - t0.mainBuffer); - t1.bufferProvider->getNextBuffer(&b1, outputPTS); - if (b1.i16 == NULL) { - if (buff == NULL) { - buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount]; - } - in1 = buff; - b1.frameCount = numFrames; - } else { - in1 = b1.i16; + } else { + if (ramp) { + volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux, + t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc); + if (ADJUSTVOL) { + t->adjustVolumeRamp(aux != NULL); } - frameCount1 = b1.frameCount; + } else { + volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux, + t->volume, t->auxLevel); } + } +} - size_t outFrames = frameCount0 < frameCount1?frameCount0:frameCount1; - - numFrames -= outFrames; - frameCount0 -= outFrames; - frameCount1 -= outFrames; +/* This process hook is called when there is a single track without + * aux buffer, volume ramp, or resampling. + * TODO: Update the hook selection: this can properly handle aux and ramp. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, typename TO, typename TI, typename TA> +void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts) +{ + ALOGVV("process_NoResampleOneTrack\n"); + // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz. + const int i = 31 - __builtin_clz(state->enabledTracks); + ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled"); + track_t *t = &state->tracks[i]; + const uint32_t channels = t->mMixerChannelCount; + TO* out = reinterpret_cast<TO*>(t->mainBuffer); + TA* aux = reinterpret_cast<TA*>(t->auxBuffer); + const bool ramp = t->needsRamp(); + + for (size_t numFrames = state->frameCount; numFrames; ) { + AudioBufferProvider::Buffer& b(t->buffer); + // get input buffer + b.frameCount = numFrames; + const int64_t outputPTS = calculateOutputPTS(*t, pts, state->frameCount - numFrames); + t->bufferProvider->getNextBuffer(&b, outputPTS); + const TI *in = reinterpret_cast<TI*>(b.raw); - do { - int32_t l0 = *in0++; - int32_t r0 = *in0++; - l0 = mul(l0, vl0); - r0 = mul(r0, vr0); - int32_t l = *in1++; - int32_t r = *in1++; - l = mulAdd(l, vl1, l0) >> 12; - r = mulAdd(r, vr1, r0) >> 12; - // clamping... - l = clamp16(l); - r = clamp16(r); - *out++ = (r<<16) | (l & 0xFFFF); - } while (--outFrames); - - if (frameCount0 == 0) { - t0.bufferProvider->releaseBuffer(&b0); + // in == NULL can happen if the track was flushed just after having + // been enabled for mixing. + if (in == NULL || (((uintptr_t)in) & 3)) { + memset(out, 0, numFrames + * channels * audio_bytes_per_sample(t->mMixerFormat)); + ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: " + "buffer %p track %p, channels %d, needs %#x", + in, t, t->channelCount, t->needs); + return; } - if (frameCount1 == 0) { - t1.bufferProvider->releaseBuffer(&b1); + + const size_t outFrames = b.frameCount; + volumeMix<MIXTYPE, is_same<TI, float>::value, false> ( + out, outFrames, in, aux, ramp, t); + + out += outFrames * channels; + if (aux != NULL) { + aux += channels; } + numFrames -= b.frameCount; + + // release buffer + t->bufferProvider->releaseBuffer(&b); } + if (ramp) { + t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value); + } +} - delete [] buff; +/* This track hook is called to do resampling then mixing, + * pulling from the track's upstream AudioBufferProvider. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, typename TO, typename TI, typename TA> +void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux) +{ + ALOGVV("track__Resample\n"); + t->resampler->setSampleRate(t->sampleRate); + const bool ramp = t->needsRamp(); + if (ramp || aux != NULL) { + // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step. + // if aux != NULL: resample with unity gain to temp buffer then apply send level. + + t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT); + memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO)); + t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider); + + volumeMix<MIXTYPE, is_same<TI, float>::value, true>( + out, outFrameCount, temp, aux, ramp, t); + + } else { // constant volume gain + t->resampler->setVolume(t->mVolume[0], t->mVolume[1]); + t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider); + } } -#endif -int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, - int outputFrameIndex) +/* This track hook is called to mix a track, when no resampling is required. + * The input buffer should be present in t->in. + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ +template <int MIXTYPE, typename TO, typename TI, typename TA> +void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux) { - if (AudioBufferProvider::kInvalidPTS == basePTS) - return AudioBufferProvider::kInvalidPTS; + ALOGVV("track__NoResample\n"); + const TI *in = static_cast<const TI *>(t->in); - return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); + volumeMix<MIXTYPE, is_same<TI, float>::value, true>( + out, frameCount, in, aux, t->needsRamp(), t); + + // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels. + // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels. + in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount; + t->in = in; } -/*static*/ uint64_t AudioMixer::sLocalTimeFreq; -/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT; +/* The Mixer engine generates either int32_t (Q4_27) or float data. + * We use this function to convert the engine buffers + * to the desired mixer output format, either int16_t (Q.15) or float. + */ +void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount) +{ + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out + break; + case AUDIO_FORMAT_PCM_16_BIT: + memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount); + break; + case AUDIO_FORMAT_PCM_16_BIT: + // two int16_t are produced per iteration + ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1); + break; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } +} -/*static*/ void AudioMixer::sInitRoutine() +/* Returns the proper track hook to use for mixing the track into the output buffer. + */ +AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused) { - LocalClock lc; - sLocalTimeFreq = lc.getLocalFreq(); + if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + return track__genericResample; + case TRACKTYPE_NORESAMPLEMONO: + return track__16BitsMono; + case TRACKTYPE_NORESAMPLE: + return track__16BitsStereo; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + } + LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS); + switch (trackType) { + case TRACKTYPE_NOP: + return track__nop; + case TRACKTYPE_RESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t)\ + track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLEMONO: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + case TRACKTYPE_NORESAMPLE: + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MULTI, float, float, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return (AudioMixer::hook_t) + track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad trackType: %d", trackType); + break; + } + return NULL; +} + +/* Returns the proper process hook for mixing tracks. Currently works only for + * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling. + * + * TODO: Due to the special mixing considerations of duplicating to + * a stereo output track, the input track cannot be MONO. This should be + * prevented by the caller. + */ +AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat) +{ + if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK + LOG_ALWAYS_FATAL("bad processType: %d", processType); + return NULL; + } + if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) { + return process__OneTrack16BitsStereoNoResampling; + } + LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS); + switch (mixerInFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, + float /*TO*/, float /*TI*/, int32_t /*TA*/>; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, + int16_t, float, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + case AUDIO_FORMAT_PCM_16_BIT: + switch (mixerOutFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, + float, int16_t, int32_t>; + case AUDIO_FORMAT_PCM_16_BIT: + return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY, + int16_t, int16_t, int32_t>; + default: + LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat); + break; + } + break; + default: + LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat); + break; + } + return NULL; } // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 43aeb86..3b972bb 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -26,10 +26,13 @@ #include <media/AudioBufferProvider.h> #include "AudioResampler.h" -#include <audio_effects/effect_downmix.h> +#include <hardware/audio_effect.h> #include <system/audio.h> #include <media/nbaio/NBLog.h> +// FIXME This is actually unity gain, which might not be max in future, expressed in U.12 +#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT + namespace android { // ---------------------------------------------------------------------------- @@ -48,14 +51,14 @@ public: static const uint32_t MAX_NUM_TRACKS = 32; // maximum number of channels supported by the mixer - // This mixer has a hard-coded upper limit of 2 channels for output. - // There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect. - // Adding support for > 2 channel output would require more than simply changing this value. - static const uint32_t MAX_NUM_CHANNELS = 2; + // This mixer has a hard-coded upper limit of 8 channels for output. + static const uint32_t MAX_NUM_CHANNELS = 8; + static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only // maximum number of channels supported for the content - static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = 8; + static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX; - static const uint16_t UNITY_GAIN = 0x1000; + static const uint16_t UNITY_GAIN_INT = 0x1000; + static const float UNITY_GAIN_FLOAT = 1.0f; enum { // names @@ -77,6 +80,8 @@ public: MAIN_BUFFER = 0x4002, AUX_BUFFER = 0x4003, DOWNMIX_TYPE = 0X4004, + MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output // for target RESAMPLE SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name; // parameter 'value' is the new sample rate in Hz. @@ -90,6 +95,7 @@ public: REMOVE = 0x4102, // Remove the sample rate converter on this track name; // the track is restored to the mix sample rate. // for target RAMP_VOLUME and VOLUME (8 channels max) + // FIXME use float for these 3 to improve the dynamic range VOLUME0 = 0x4200, VOLUME1 = 0x4201, AUXLEVEL = 0x4210, @@ -99,7 +105,10 @@ public: // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS // Allocate a track name. Returns new track name if successful, -1 on failure. - int getTrackName(audio_channel_mask_t channelMask, int sessionId); + // The failure could be because of an invalid channelMask or format, or that + // the track capacity of the mixer is exceeded. + int getTrackName(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId); // Free an allocated track by name void deleteTrackName(int name); @@ -117,35 +126,34 @@ public: size_t getUnreleasedFrames(int name) const; + static inline bool isValidPcmTrackFormat(audio_format_t format) { + return format == AUDIO_FORMAT_PCM_16_BIT || + format == AUDIO_FORMAT_PCM_24_BIT_PACKED || + format == AUDIO_FORMAT_PCM_32_BIT || + format == AUDIO_FORMAT_PCM_FLOAT; + } + private: enum { + // FIXME this representation permits up to 8 channels NEEDS_CHANNEL_COUNT__MASK = 0x00000007, - NEEDS_FORMAT__MASK = 0x000000F0, - NEEDS_MUTE__MASK = 0x00000100, - NEEDS_RESAMPLE__MASK = 0x00001000, - NEEDS_AUX__MASK = 0x00010000, }; enum { - NEEDS_CHANNEL_1 = 0x00000000, - NEEDS_CHANNEL_2 = 0x00000001, - - NEEDS_FORMAT_16 = 0x00000010, + NEEDS_CHANNEL_1 = 0x00000000, // mono + NEEDS_CHANNEL_2 = 0x00000001, // stereo - NEEDS_MUTE_DISABLED = 0x00000000, - NEEDS_MUTE_ENABLED = 0x00000100, + // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT - NEEDS_RESAMPLE_DISABLED = 0x00000000, - NEEDS_RESAMPLE_ENABLED = 0x00001000, - - NEEDS_AUX_DISABLED = 0x00000000, - NEEDS_AUX_ENABLED = 0x00010000, + NEEDS_MUTE = 0x00000100, + NEEDS_RESAMPLE = 0x00001000, + NEEDS_AUX = 0x00010000, }; struct state_t; struct track_t; - class DownmixerBufferProvider; + class CopyBufferProvider; typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux); @@ -154,16 +162,17 @@ private: struct track_t { uint32_t needs; + // TODO: Eventually remove legacy integer volume settings union { - int16_t volume[MAX_NUM_CHANNELS]; // [0]3.12 fixed point + int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero) int32_t volumeRL; }; - int32_t prevVolume[MAX_NUM_CHANNELS]; + int32_t prevVolume[MAX_NUM_VOLUMES]; // 16-byte boundary - int32_t volumeInc[MAX_NUM_CHANNELS]; + int32_t volumeInc[MAX_NUM_VOLUMES]; int32_t auxInc; int32_t prevAuxLevel; @@ -173,7 +182,7 @@ private: uint16_t frameCount; uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK) - uint8_t format; // always 16 + uint8_t unused_padding; // formerly format, was always 16 uint16_t enabled; // actually bool audio_channel_mask_t channelMask; @@ -196,48 +205,159 @@ private: int32_t* auxBuffer; // 16-byte boundary - - DownmixerBufferProvider* downmixerBufferProvider; // 4 bytes + AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider. + CopyBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting. + CopyBufferProvider* downmixerBufferProvider; // wrapper for channel conversion. int32_t sessionId; - int32_t padding[2]; + // 16-byte boundary + audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + audio_format_t mFormat; // input track format + audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + // each track must be converted to this format. + + float mVolume[MAX_NUM_VOLUMES]; // floating point set volume + float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume + float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment + + float mAuxLevel; // floating point set aux level + float mPrevAuxLevel; // floating point prev aux level + float mAuxInc; // floating point aux increment // 16-byte boundary + audio_channel_mask_t mMixerChannelMask; + uint32_t mMixerChannelCount; - bool setResampler(uint32_t sampleRate, uint32_t devSampleRate); + bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; } + bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate); bool doesResample() const { return resampler != NULL; } void resetResampler() { if (resampler != NULL) resampler->reset(); } - void adjustVolumeRamp(bool aux); + void adjustVolumeRamp(bool aux, bool useFloat = false); size_t getUnreleasedFrames() const { return resampler != NULL ? resampler->getUnreleasedFrames() : 0; }; }; + typedef void (*process_hook_t)(state_t* state, int64_t pts); + // pad to 32-bytes to fill cache line struct state_t { uint32_t enabledTracks; uint32_t needsChanged; size_t frameCount; - void (*hook)(state_t* state, int64_t pts); // one of process__*, never NULL + process_hook_t hook; // one of process__*, never NULL int32_t *outputTemp; int32_t *resampleTemp; NBLog::Writer* mLog; int32_t reserved[1]; // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS - track_t tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32))); + track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32))); }; - // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect - class DownmixerBufferProvider : public AudioBufferProvider { + // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider, + // and ReformatBufferProvider. + // It handles a private buffer for use in converting format or channel masks from the + // input data to a form acceptable by the mixer. + // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the + // processing pipeline. + class CopyBufferProvider : public AudioBufferProvider { public: + // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes). + // If bufferFrameCount is 0, no private buffer is created and in-place modification of + // the upstream buffer provider's buffers is performed by copyFrames(). + CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize, + size_t bufferFrameCount); + virtual ~CopyBufferProvider(); + + // Overrides AudioBufferProvider methods virtual status_t getNextBuffer(Buffer* buffer, int64_t pts); virtual void releaseBuffer(Buffer* buffer); - DownmixerBufferProvider(); - virtual ~DownmixerBufferProvider(); + // Other public methods + + // call this to release the buffer to the upstream provider. + // treat it as an audio discontinuity for future samples. + virtual void reset(); + + // this function should be supplied by the derived class. It converts + // #frames in the *src pointer to the *dst pointer. It is public because + // some providers will allow this to work on arbitrary buffers outside + // of the internal buffers. + virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; + + // set the upstream buffer provider. Consider calling "reset" before this function. + void setBufferProvider(AudioBufferProvider *p) { + mTrackBufferProvider = p; + } + + protected: AudioBufferProvider* mTrackBufferProvider; + const size_t mInputFrameSize; + const size_t mOutputFrameSize; + private: + AudioBufferProvider::Buffer mBuffer; + const size_t mLocalBufferFrameCount; + void* mLocalBufferData; + size_t mConsumed; + }; + + // DownmixerBufferProvider wraps a track AudioBufferProvider to provide + // position dependent downmixing by an Audio Effect. + class DownmixerBufferProvider : public CopyBufferProvider { + public: + DownmixerBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount); + virtual ~DownmixerBufferProvider(); + virtual void copyFrames(void *dst, const void *src, size_t frames); + bool isValid() const { return mDownmixHandle != NULL; } + + static status_t init(); + static bool isMultichannelCapable() { return sIsMultichannelCapable; } + + protected: effect_handle_t mDownmixHandle; effect_config_t mDownmixConfig; + + // effect descriptor for the downmixer used by the mixer + static effect_descriptor_t sDwnmFxDesc; + // indicates whether a downmix effect has been found and is usable by this mixer + static bool sIsMultichannelCapable; + // FIXME: should we allow effects outside of the framework? + // We need to here. A special ioId that must be <= -2 so it does not map to a session. + static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2; + }; + + // RemixBufferProvider wraps a track AudioBufferProvider to perform an + // upmix or downmix to the proper channel count and mask. + class RemixBufferProvider : public CopyBufferProvider { + public: + RemixBufferProvider(audio_channel_mask_t inputChannelMask, + audio_channel_mask_t outputChannelMask, audio_format_t format, + size_t bufferFrameCount); + virtual void copyFrames(void *dst, const void *src, size_t frames); + + protected: + const audio_format_t mFormat; + const size_t mSampleSize; + const size_t mInputChannels; + const size_t mOutputChannels; + int8_t mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices + }; + + // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data + // to an acceptable mixer input format type. + class ReformatBufferProvider : public CopyBufferProvider { + public: + ReformatBufferProvider(int32_t channels, + audio_format_t inputFormat, audio_format_t outputFormat, + size_t bufferFrameCount); + virtual void copyFrames(void *dst, const void *src, size_t frames); + + protected: + const int32_t mChannels; + const audio_format_t mInputFormat; + const audio_format_t mOutputFormat; }; // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc. @@ -255,18 +375,20 @@ public: private: state_t mState __attribute__((aligned(32))); - // effect descriptor for the downmixer used by the mixer - static effect_descriptor_t dwnmFxDesc; - // indicates whether a downmix effect has been found and is usable by this mixer - static bool isMultichannelCapable; - // Call after changing either the enabled status of a track, or parameters of an enabled track. // OK to call more often than that, but unnecessary. void invalidateState(uint32_t mask); - static status_t initTrackDownmix(track_t* pTrack, int trackNum, audio_channel_mask_t mask); + bool setChannelMasks(int name, + audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask); + + // TODO: remove unused trackName/trackNum from functions below. + static status_t initTrackDownmix(track_t* pTrack, int trackName); static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum); static void unprepareTrackForDownmix(track_t* pTrack, int trackName); + static status_t prepareTrackForReformat(track_t* pTrack, int trackNum); + static void unprepareTrackForReformat(track_t* pTrack, int trackName); + static void reconfigureBufferProviders(track_t* pTrack); static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux); @@ -286,10 +408,6 @@ private: static void process__genericResampling(state_t* state, int64_t pts); static void process__OneTrack16BitsStereoNoResampling(state_t* state, int64_t pts); -#if 0 - static void process__TwoTracks16BitsStereoNoResampling(state_t* state, - int64_t pts); -#endif static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS, int outputFrameIndex); @@ -297,6 +415,53 @@ private: static uint64_t sLocalTimeFreq; static pthread_once_t sOnceControl; static void sInitRoutine(); + + /* multi-format volume mixing function (calls template functions + * in AudioMixerOps.h). The template parameters are as follows: + * + * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration) + * USEFLOATVOL (set to true if float volume is used) + * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards) + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TA: int32_t (Q4.27) + */ + template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL, + typename TO, typename TI, typename TA> + static void volumeMix(TO *out, size_t outFrames, + const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t); + + // multi-format process hooks + template <int MIXTYPE, typename TO, typename TI, typename TA> + static void process_NoResampleOneTrack(state_t* state, int64_t pts); + + // multi-format track hooks + template <int MIXTYPE, typename TO, typename TI, typename TA> + static void track__Resample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + template <int MIXTYPE, typename TO, typename TI, typename TA> + static void track__NoResample(track_t* t, TO* out, size_t frameCount, + TO* temp __unused, TA* aux); + + static void convertMixerFormat(void *out, audio_format_t mixerOutFormat, + void *in, audio_format_t mixerInFormat, size_t sampleCount); + + // hook types + enum { + PROCESSTYPE_NORESAMPLEONETRACK, + }; + enum { + TRACKTYPE_NOP, + TRACKTYPE_RESAMPLE, + TRACKTYPE_NORESAMPLE, + TRACKTYPE_NORESAMPLEMONO, + }; + + // functions for determining the proper process and track hooks. + static process_hook_t getProcessHook(int processType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); + static hook_t getTrackHook(int trackType, uint32_t channelCount, + audio_format_t mixerInFormat, audio_format_t mixerOutFormat); }; // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixerOps.h b/services/audioflinger/AudioMixerOps.h new file mode 100644 index 0000000..f7376a8 --- /dev/null +++ b/services/audioflinger/AudioMixerOps.h @@ -0,0 +1,454 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_MIXER_OPS_H +#define ANDROID_AUDIO_MIXER_OPS_H + +namespace android { + +/* Behavior of is_same<>::value is true if the types are identical, + * false otherwise. Identical to the STL std::is_same. + */ +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + + +/* MixMul is a multiplication operator to scale an audio input signal + * by a volume gain, with the formula: + * + * O(utput) = I(nput) * V(olume) + * + * The output, input, and volume may have different types. + * There are 27 variants, of which 14 are actually defined in an + * explicitly templated class. + * + * The following type variables and the underlying meaning: + * + * Output type TO: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1] + * Input signal type TI: int32_t (Q4.27) or int16_t (Q.15) or float [-1,1] + * Volume type TV: int32_t (U4.28) or int16_t (U4.12) or float [-1,1] + * + * For high precision audio, only the <TO, TI, TV> = <float, float, float> + * needs to be accelerated. This is perhaps the easiest form to do quickly as well. + */ + +template <typename TO, typename TI, typename TV> +inline TO MixMul(TI value, TV volume) { + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(false); + // should not be here :-). + // To avoid mistakes, this template is always specialized. + return value * volume; +} + +template <> +inline int32_t MixMul<int32_t, int16_t, int16_t>(int16_t value, int16_t volume) { + return value * volume; +} + +template <> +inline int32_t MixMul<int32_t, int32_t, int16_t>(int32_t value, int16_t volume) { + return (value >> 12) * volume; +} + +template <> +inline int32_t MixMul<int32_t, int16_t, int32_t>(int16_t value, int32_t volume) { + return value * (volume >> 16); +} + +template <> +inline int32_t MixMul<int32_t, int32_t, int32_t>(int32_t value, int32_t volume) { + return (value >> 12) * (volume >> 16); +} + +template <> +inline float MixMul<float, float, int16_t>(float value, int16_t volume) { + static const float norm = 1. / (1 << 12); + return value * volume * norm; +} + +template <> +inline float MixMul<float, float, int32_t>(float value, int32_t volume) { + static const float norm = 1. / (1 << 28); + return value * volume * norm; +} + +template <> +inline int16_t MixMul<int16_t, float, int16_t>(float value, int16_t volume) { + return clamp16_from_float(MixMul<float, float, int16_t>(value, volume)); +} + +template <> +inline int16_t MixMul<int16_t, float, int32_t>(float value, int32_t volume) { + return clamp16_from_float(MixMul<float, float, int32_t>(value, volume)); +} + +template <> +inline float MixMul<float, int16_t, int16_t>(int16_t value, int16_t volume) { + static const float norm = 1. / (1 << (15 + 12)); + return static_cast<float>(value) * static_cast<float>(volume) * norm; +} + +template <> +inline float MixMul<float, int16_t, int32_t>(int16_t value, int32_t volume) { + static const float norm = 1. / (1ULL << (15 + 28)); + return static_cast<float>(value) * static_cast<float>(volume) * norm; +} + +template <> +inline int16_t MixMul<int16_t, int16_t, int16_t>(int16_t value, int16_t volume) { + return clamp16(MixMul<int32_t, int16_t, int16_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int32_t, int16_t>(int32_t value, int16_t volume) { + return clamp16(MixMul<int32_t, int32_t, int16_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int16_t, int32_t>(int16_t value, int32_t volume) { + return clamp16(MixMul<int32_t, int16_t, int32_t>(value, volume) >> 12); +} + +template <> +inline int16_t MixMul<int16_t, int32_t, int32_t>(int32_t value, int32_t volume) { + return clamp16(MixMul<int32_t, int32_t, int32_t>(value, volume) >> 12); +} + +/* Required for floating point volume. Some are needed for compilation but + * are not needed in execution and should be removed from the final build by + * an optimizing compiler. + */ +template <> +inline float MixMul<float, float, float>(float value, float volume) { + return value * volume; +} + +template <> +inline float MixMul<float, int16_t, float>(int16_t value, float volume) { + static const float float_from_q_15 = 1. / (1 << 15); + return value * volume * float_from_q_15; +} + +template <> +inline int32_t MixMul<int32_t, int32_t, float>(int32_t value, float volume) { + LOG_ALWAYS_FATAL("MixMul<int32_t, int32_t, float> Runtime Should not be here"); + return value * volume; +} + +template <> +inline int32_t MixMul<int32_t, int16_t, float>(int16_t value, float volume) { + LOG_ALWAYS_FATAL("MixMul<int32_t, int16_t, float> Runtime Should not be here"); + static const float u4_12_from_float = (1 << 12); + return value * volume * u4_12_from_float; +} + +template <> +inline int16_t MixMul<int16_t, int16_t, float>(int16_t value, float volume) { + LOG_ALWAYS_FATAL("MixMul<int16_t, int16_t, float> Runtime Should not be here"); + return value * volume; +} + +template <> +inline int16_t MixMul<int16_t, float, float>(float value, float volume) { + static const float q_15_from_float = (1 << 15); + return value * volume * q_15_from_float; +} + +/* + * MixAccum is used to add into an accumulator register of a possibly different + * type. The TO and TI types are the same as MixMul. + */ + +template <typename TO, typename TI> +inline void MixAccum(TO *auxaccum, TI value) { + if (!is_same<TO, TI>::value) { + LOG_ALWAYS_FATAL("MixAccum type not properly specialized: %zu %zu\n", + sizeof(TO), sizeof(TI)); + } + *auxaccum += value; +} + +template<> +inline void MixAccum<float, int16_t>(float *auxaccum, int16_t value) { + static const float norm = 1. / (1 << 15); + *auxaccum += norm * value; +} + +template<> +inline void MixAccum<float, int32_t>(float *auxaccum, int32_t value) { + static const float norm = 1. / (1 << 27); + *auxaccum += norm * value; +} + +template<> +inline void MixAccum<int32_t, int16_t>(int32_t *auxaccum, int16_t value) { + *auxaccum += value << 12; +} + +template<> +inline void MixAccum<int32_t, float>(int32_t *auxaccum, float value) { + *auxaccum += clampq4_27_from_float(value); +} + +/* MixMulAux is just like MixMul except it combines with + * an accumulator operation MixAccum. + */ + +template <typename TO, typename TI, typename TV, typename TA> +inline TO MixMulAux(TI value, TV volume, TA *auxaccum) { + MixAccum<TA, TI>(auxaccum, value); + return MixMul<TO, TI, TV>(value, volume); +} + +/* MIXTYPE is used to determine how the samples in the input frame + * are mixed with volume gain into the output frame. + * See the volumeRampMulti functions below for more details. + */ +enum { + MIXTYPE_MULTI, + MIXTYPE_MONOEXPAND, + MIXTYPE_MULTI_SAVEONLY, + MIXTYPE_MULTI_MONOVOL, + MIXTYPE_MULTI_SAVEONLY_MONOVOL, +}; + +/* + * The volumeRampMulti and volumeRamp functions take a MIXTYPE + * which indicates the per-frame mixing and accumulation strategy. + * + * MIXTYPE_MULTI: + * NCHAN represents number of input and output channels. + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * vol: represents a volume array. + * + * This accumulates into the out pointer. + * + * MIXTYPE_MONOEXPAND: + * Single input channel. NCHAN represents number of output channels. + * TO: int32_t (Q4.27) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * Input channel count is 1. + * vol: represents volume array. + * + * This accumulates into the out pointer. + * + * MIXTYPE_MULTI_SAVEONLY: + * NCHAN represents number of input and output channels. + * TO: int16_t (Q.15) or float + * TI: int32_t (Q4.27) or int16_t (Q0.15) or float + * TV: int32_t (U4.28) or int16_t (U4.12) or float + * vol: represents a volume array. + * + * MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer. + * + * MIXTYPE_MULTI_MONOVOL: + * Same as MIXTYPE_MULTI, but uses only volume[0]. + * + * MIXTYPE_MULTI_SAVEONLY_MONOVOL: + * Same as MIXTYPE_MULTI_SAVEONLY, but uses only volume[0]. + * + */ + +template <int MIXTYPE, int NCHAN, + typename TO, typename TI, typename TV, typename TA, typename TAV> +inline void volumeRampMulti(TO* out, size_t frameCount, + const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc) +{ +#ifdef ALOGVV + ALOGVV("volumeRampMulti, MIXTYPE:%d\n", MIXTYPE); +#endif + if (aux != NULL) { + do { + TA auxaccum = 0; + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + in++; + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MULTI_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum); + } + vol[0] += volinc[0]; + break; + case MIXTYPE_MULTI_SAVEONLY_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum); + } + vol[0] += volinc[0]; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + auxaccum /= NCHAN; + *aux++ += MixMul<TA, TA, TAV>(auxaccum, *vola); + vola[0] += volainc; + } while (--frameCount); + } else { + do { + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[i]); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in, vol[i]); + vol[i] += volinc[i]; + } + in++; + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[i]); + vol[i] += volinc[i]; + } + break; + case MIXTYPE_MULTI_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[0]); + } + vol[0] += volinc[0]; + break; + case MIXTYPE_MULTI_SAVEONLY_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[0]); + } + vol[0] += volinc[0]; + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + } while (--frameCount); + } +} + +template <int MIXTYPE, int NCHAN, + typename TO, typename TI, typename TV, typename TA, typename TAV> +inline void volumeMulti(TO* out, size_t frameCount, + const TI* in, TA* aux, const TV *vol, TAV vola) +{ +#ifdef ALOGVV + ALOGVV("volumeMulti MIXTYPE:%d\n", MIXTYPE); +#endif + if (aux != NULL) { + do { + TA auxaccum = 0; + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum); + } + in++; + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum); + } + break; + case MIXTYPE_MULTI_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum); + } + break; + case MIXTYPE_MULTI_SAVEONLY_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[0], &auxaccum); + } + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + auxaccum /= NCHAN; + *aux++ += MixMul<TA, TA, TAV>(auxaccum, vola); + } while (--frameCount); + } else { + do { + switch (MIXTYPE) { + case MIXTYPE_MULTI: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[i]); + } + break; + case MIXTYPE_MONOEXPAND: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in, vol[i]); + } + in++; + break; + case MIXTYPE_MULTI_SAVEONLY: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[i]); + } + break; + case MIXTYPE_MULTI_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ += MixMul<TO, TI, TV>(*in++, vol[0]); + } + break; + case MIXTYPE_MULTI_SAVEONLY_MONOVOL: + for (int i = 0; i < NCHAN; ++i) { + *out++ = MixMul<TO, TI, TV>(*in++, vol[0]); + } + break; + default: + LOG_ALWAYS_FATAL("invalid mixtype %d", MIXTYPE); + break; + } + } while (--frameCount); + } +} + +}; + +#endif /* ANDROID_AUDIO_MIXER_OPS_H */ diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp deleted file mode 100644 index 6ea5324..0000000 --- a/services/audioflinger/AudioPolicyService.cpp +++ /dev/null @@ -1,1691 +0,0 @@ -/* - * Copyright (C) 2009 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "AudioPolicyService" -//#define LOG_NDEBUG 0 - -#include "Configuration.h" -#undef __STRICT_ANSI__ -#define __STDINT_LIMITS -#define __STDC_LIMIT_MACROS -#include <stdint.h> - -#include <sys/time.h> -#include <binder/IServiceManager.h> -#include <utils/Log.h> -#include <cutils/properties.h> -#include <binder/IPCThreadState.h> -#include <utils/String16.h> -#include <utils/threads.h> -#include "AudioPolicyService.h" -#include "ServiceUtilities.h" -#include <hardware_legacy/power.h> -#include <media/AudioEffect.h> -#include <media/EffectsFactoryApi.h> - -#include <hardware/hardware.h> -#include <system/audio.h> -#include <system/audio_policy.h> -#include <hardware/audio_policy.h> -#include <audio_effects/audio_effects_conf.h> -#include <media/AudioParameter.h> - -namespace android { - -static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n"; -static const char kCmdDeadlockedString[] = "AudioPolicyService command thread may be deadlocked\n"; - -static const int kDumpLockRetries = 50; -static const int kDumpLockSleepUs = 20000; - -static const nsecs_t kAudioCommandTimeout = 3000000000LL; // 3 seconds - -namespace { - extern struct audio_policy_service_ops aps_ops; -}; - -// ---------------------------------------------------------------------------- - -AudioPolicyService::AudioPolicyService() - : BnAudioPolicyService() , mpAudioPolicyDev(NULL) , mpAudioPolicy(NULL) -{ - char value[PROPERTY_VALUE_MAX]; - const struct hw_module_t *module; - int forced_val; - int rc; - - Mutex::Autolock _l(mLock); - - // start tone playback thread - mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this); - // start audio commands thread - mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this); - // start output activity command thread - mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this); - /* instantiate the audio policy manager */ - rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module); - if (rc) - return; - - rc = audio_policy_dev_open(module, &mpAudioPolicyDev); - ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc)); - if (rc) - return; - - rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this, - &mpAudioPolicy); - ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc)); - if (rc) - return; - - rc = mpAudioPolicy->init_check(mpAudioPolicy); - ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc)); - if (rc) - return; - - ALOGI("Loaded audio policy from %s (%s)", module->name, module->id); - - // load audio pre processing modules - if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) { - loadPreProcessorConfig(AUDIO_EFFECT_VENDOR_CONFIG_FILE); - } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) { - loadPreProcessorConfig(AUDIO_EFFECT_DEFAULT_CONFIG_FILE); - } -} - -AudioPolicyService::~AudioPolicyService() -{ - mTonePlaybackThread->exit(); - mTonePlaybackThread.clear(); - mAudioCommandThread->exit(); - mAudioCommandThread.clear(); - - - // release audio pre processing resources - for (size_t i = 0; i < mInputSources.size(); i++) { - delete mInputSources.valueAt(i); - } - mInputSources.clear(); - - for (size_t i = 0; i < mInputs.size(); i++) { - mInputs.valueAt(i)->mEffects.clear(); - delete mInputs.valueAt(i); - } - mInputs.clear(); - - if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) - mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy); - if (mpAudioPolicyDev != NULL) - audio_policy_dev_close(mpAudioPolicyDev); -} - -status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, - audio_policy_dev_state_t state, - const char *device_address) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (!audio_is_output_device(device) && !audio_is_input_device(device)) { - return BAD_VALUE; - } - if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE && - state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { - return BAD_VALUE; - } - - ALOGV("setDeviceConnectionState()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device, - state, device_address); -} - -audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState( - audio_devices_t device, - const char *device_address) -{ - if (mpAudioPolicy == NULL) { - return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; - } - return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device, - device_address); -} - -status_t AudioPolicyService::setPhoneState(audio_mode_t state) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(state) >= AUDIO_MODE_CNT) { - return BAD_VALUE; - } - - ALOGV("setPhoneState()"); - - // TODO: check if it is more appropriate to do it in platform specific policy manager - AudioSystem::setMode(state); - - Mutex::Autolock _l(mLock); - mpAudioPolicy->set_phone_state(mpAudioPolicy, state); - return NO_ERROR; -} - -status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage, - audio_policy_forced_cfg_t config) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { - return BAD_VALUE; - } - if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) { - return BAD_VALUE; - } - ALOGV("setForceUse()"); - Mutex::Autolock _l(mLock); - mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config); - return NO_ERROR; -} - -audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage) -{ - if (mpAudioPolicy == NULL) { - return AUDIO_POLICY_FORCE_NONE; - } - if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { - return AUDIO_POLICY_FORCE_NONE; - } - return mpAudioPolicy->get_force_use(mpAudioPolicy, usage); -} - -audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, - uint32_t samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, - audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - ALOGV("getOutput()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, - format, channelMask, flags, offloadInfo); -} - -status_t AudioPolicyService::startOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - ALOGV("startOutput()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session); -} - -status_t AudioPolicyService::stopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - ALOGV("stopOutput()"); - mOutputCommandThread->stopOutputCommand(output, stream, session); - return NO_ERROR; -} - -status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - ALOGV("doStopOutput from tid %d", gettid()); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session); -} - -void AudioPolicyService::releaseOutput(audio_io_handle_t output) -{ - if (mpAudioPolicy == NULL) { - return; - } - ALOGV("releaseOutput()"); - mOutputCommandThread->releaseOutputCommand(output); -} - -void AudioPolicyService::doReleaseOutput(audio_io_handle_t output) -{ - ALOGV("doReleaseOutput from tid %d", gettid()); - Mutex::Autolock _l(mLock); - mpAudioPolicy->release_output(mpAudioPolicy, output); -} - -audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, - uint32_t samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int audioSession) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - // already checked by client, but double-check in case the client wrapper is bypassed - if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) { - return 0; - } - - if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { - return 0; - } - - Mutex::Autolock _l(mLock); - // the audio_in_acoustics_t parameter is ignored by get_input() - audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, - format, channelMask, (audio_in_acoustics_t) 0); - - if (input == 0) { - return input; - } - // create audio pre processors according to input source - audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? - AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; - - ssize_t index = mInputSources.indexOfKey(aliasSource); - if (index < 0) { - return input; - } - ssize_t idx = mInputs.indexOfKey(input); - InputDesc *inputDesc; - if (idx < 0) { - inputDesc = new InputDesc(audioSession); - mInputs.add(input, inputDesc); - } else { - inputDesc = mInputs.valueAt(idx); - } - - Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; - for (size_t i = 0; i < effects.size(); i++) { - EffectDesc *effect = effects[i]; - sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); - status_t status = fx->initCheck(); - if (status != NO_ERROR && status != ALREADY_EXISTS) { - ALOGW("Failed to create Fx %s on input %d", effect->mName, input); - // fx goes out of scope and strong ref on AudioEffect is released - continue; - } - for (size_t j = 0; j < effect->mParams.size(); j++) { - fx->setParameter(effect->mParams[j]); - } - inputDesc->mEffects.add(fx); - } - setPreProcessorEnabled(inputDesc, true); - return input; -} - -status_t AudioPolicyService::startInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - - return mpAudioPolicy->start_input(mpAudioPolicy, input); -} - -status_t AudioPolicyService::stopInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - - return mpAudioPolicy->stop_input(mpAudioPolicy, input); -} - -void AudioPolicyService::releaseInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return; - } - Mutex::Autolock _l(mLock); - mpAudioPolicy->release_input(mpAudioPolicy, input); - - ssize_t index = mInputs.indexOfKey(input); - if (index < 0) { - return; - } - InputDesc *inputDesc = mInputs.valueAt(index); - setPreProcessorEnabled(inputDesc, false); - delete inputDesc; - mInputs.removeItemsAt(index); -} - -status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, - int indexMin, - int indexMax) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax); - return NO_ERROR; -} - -status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, - int index, - audio_devices_t device) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - if (mpAudioPolicy->set_stream_volume_index_for_device) { - return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy, - stream, - index, - device); - } else { - return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index); - } -} - -status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, - int *index, - audio_devices_t device) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - if (mpAudioPolicy->get_stream_volume_index_for_device) { - return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy, - stream, - index, - device); - } else { - return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index); - } -} - -uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream); -} - -//audio policy: use audio_device_t appropriately - -audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream) -{ - if (mpAudioPolicy == NULL) { - return (audio_devices_t)0; - } - return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream); -} - -audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc); -} - -status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc, - audio_io_handle_t io, - uint32_t strategy, - int session, - int id) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id); -} - -status_t AudioPolicyService::unregisterEffect(int id) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->unregister_effect(mpAudioPolicy, id); -} - -status_t AudioPolicyService::setEffectEnabled(int id, bool enabled) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled); -} - -bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const -{ - if (mpAudioPolicy == NULL) { - return 0; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs); -} - -bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const -{ - if (mpAudioPolicy == NULL) { - return 0; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs); -} - -bool AudioPolicyService::isSourceActive(audio_source_t source) const -{ - if (mpAudioPolicy == NULL) { - return false; - } - if (mpAudioPolicy->is_source_active == 0) { - return false; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_source_active(mpAudioPolicy, source); -} - -status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, - effect_descriptor_t *descriptors, - uint32_t *count) -{ - - if (mpAudioPolicy == NULL) { - *count = 0; - return NO_INIT; - } - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - - size_t index; - for (index = 0; index < mInputs.size(); index++) { - if (mInputs.valueAt(index)->mSessionId == audioSession) { - break; - } - } - if (index == mInputs.size()) { - *count = 0; - return BAD_VALUE; - } - Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; - - for (size_t i = 0; i < effects.size(); i++) { - effect_descriptor_t desc = effects[i]->descriptor(); - if (i < *count) { - descriptors[i] = desc; - } - } - if (effects.size() > *count) { - status = NO_MEMORY; - } - *count = effects.size(); - return status; -} - -void AudioPolicyService::binderDied(const wp<IBinder>& who) { - ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(), - IPCThreadState::self()->getCallingPid()); -} - -static bool tryLock(Mutex& mutex) -{ - bool locked = false; - for (int i = 0; i < kDumpLockRetries; ++i) { - if (mutex.tryLock() == NO_ERROR) { - locked = true; - break; - } - usleep(kDumpLockSleepUs); - } - return locked; -} - -status_t AudioPolicyService::dumpInternals(int fd) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy); - result.append(buffer); - snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get()); - result.append(buffer); - snprintf(buffer, SIZE, "Tones Thread: %p\n", mTonePlaybackThread.get()); - result.append(buffer); - - write(fd, result.string(), result.size()); - return NO_ERROR; -} - -status_t AudioPolicyService::dump(int fd, const Vector<String16>& args) -{ - if (!dumpAllowed()) { - dumpPermissionDenial(fd); - } else { - bool locked = tryLock(mLock); - if (!locked) { - String8 result(kDeadlockedString); - write(fd, result.string(), result.size()); - } - - dumpInternals(fd); - if (mAudioCommandThread != 0) { - mAudioCommandThread->dump(fd); - } - if (mTonePlaybackThread != 0) { - mTonePlaybackThread->dump(fd); - } - - if (mpAudioPolicy) { - mpAudioPolicy->dump(mpAudioPolicy, fd); - } - - if (locked) mLock.unlock(); - } - return NO_ERROR; -} - -status_t AudioPolicyService::dumpPermissionDenial(int fd) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - snprintf(buffer, SIZE, "Permission Denial: " - "can't dump AudioPolicyService from pid=%d, uid=%d\n", - IPCThreadState::self()->getCallingPid(), - IPCThreadState::self()->getCallingUid()); - result.append(buffer); - write(fd, result.string(), result.size()); - return NO_ERROR; -} - -void AudioPolicyService::setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled) -{ - const Vector<sp<AudioEffect> > &fxVector = inputDesc->mEffects; - for (size_t i = 0; i < fxVector.size(); i++) { - fxVector.itemAt(i)->setEnabled(enabled); - } -} - -status_t AudioPolicyService::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) -{ - return BnAudioPolicyService::onTransact(code, data, reply, flags); -} - - -// ----------- AudioPolicyService::AudioCommandThread implementation ---------- - -AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name, - const wp<AudioPolicyService>& service) - : Thread(false), mName(name), mService(service) -{ - mpToneGenerator = NULL; -} - - -AudioPolicyService::AudioCommandThread::~AudioCommandThread() -{ - if (!mAudioCommands.isEmpty()) { - release_wake_lock(mName.string()); - } - for (size_t k=0; k < mAudioCommands.size(); k++) { - delete mAudioCommands[k]->mParam; - delete mAudioCommands[k]; - } - mAudioCommands.clear(); - delete mpToneGenerator; -} - -void AudioPolicyService::AudioCommandThread::onFirstRef() -{ - run(mName.string(), ANDROID_PRIORITY_AUDIO); -} - -bool AudioPolicyService::AudioCommandThread::threadLoop() -{ - nsecs_t waitTime = INT64_MAX; - - mLock.lock(); - while (!exitPending()) - { - while (!mAudioCommands.isEmpty()) { - nsecs_t curTime = systemTime(); - // commands are sorted by increasing time stamp: execute them from index 0 and up - if (mAudioCommands[0]->mTime <= curTime) { - AudioCommand *command = mAudioCommands[0]; - mAudioCommands.removeAt(0); - mLastCommand = *command; - - switch (command->mCommand) { - case START_TONE: { - mLock.unlock(); - ToneData *data = (ToneData *)command->mParam; - ALOGV("AudioCommandThread() processing start tone %d on stream %d", - data->mType, data->mStream); - delete mpToneGenerator; - mpToneGenerator = new ToneGenerator(data->mStream, 1.0); - mpToneGenerator->startTone(data->mType); - delete data; - mLock.lock(); - }break; - case STOP_TONE: { - mLock.unlock(); - ALOGV("AudioCommandThread() processing stop tone"); - if (mpToneGenerator != NULL) { - mpToneGenerator->stopTone(); - delete mpToneGenerator; - mpToneGenerator = NULL; - } - mLock.lock(); - }break; - case SET_VOLUME: { - VolumeData *data = (VolumeData *)command->mParam; - ALOGV("AudioCommandThread() processing set volume stream %d, \ - volume %f, output %d", data->mStream, data->mVolume, data->mIO); - command->mStatus = AudioSystem::setStreamVolume(data->mStream, - data->mVolume, - data->mIO); - if (command->mWaitStatus) { - command->mCond.signal(); - command->mCond.waitRelative(mLock, kAudioCommandTimeout); - } - delete data; - }break; - case SET_PARAMETERS: { - ParametersData *data = (ParametersData *)command->mParam; - ALOGV("AudioCommandThread() processing set parameters string %s, io %d", - data->mKeyValuePairs.string(), data->mIO); - command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs); - if (command->mWaitStatus) { - command->mCond.signal(); - command->mCond.waitRelative(mLock, kAudioCommandTimeout); - } - delete data; - }break; - case SET_VOICE_VOLUME: { - VoiceVolumeData *data = (VoiceVolumeData *)command->mParam; - ALOGV("AudioCommandThread() processing set voice volume volume %f", - data->mVolume); - command->mStatus = AudioSystem::setVoiceVolume(data->mVolume); - if (command->mWaitStatus) { - command->mCond.signal(); - command->mCond.waitRelative(mLock, kAudioCommandTimeout); - } - delete data; - }break; - case STOP_OUTPUT: { - StopOutputData *data = (StopOutputData *)command->mParam; - ALOGV("AudioCommandThread() processing stop output %d", - data->mIO); - sp<AudioPolicyService> svc = mService.promote(); - if (svc == 0) { - break; - } - mLock.unlock(); - svc->doStopOutput(data->mIO, data->mStream, data->mSession); - mLock.lock(); - delete data; - }break; - case RELEASE_OUTPUT: { - ReleaseOutputData *data = (ReleaseOutputData *)command->mParam; - ALOGV("AudioCommandThread() processing release output %d", - data->mIO); - sp<AudioPolicyService> svc = mService.promote(); - if (svc == 0) { - break; - } - mLock.unlock(); - svc->doReleaseOutput(data->mIO); - mLock.lock(); - delete data; - }break; - default: - ALOGW("AudioCommandThread() unknown command %d", command->mCommand); - } - delete command; - waitTime = INT64_MAX; - } else { - waitTime = mAudioCommands[0]->mTime - curTime; - break; - } - } - // release delayed commands wake lock - if (mAudioCommands.isEmpty()) { - release_wake_lock(mName.string()); - } - ALOGV("AudioCommandThread() going to sleep"); - mWaitWorkCV.waitRelative(mLock, waitTime); - ALOGV("AudioCommandThread() waking up"); - } - mLock.unlock(); - return false; -} - -status_t AudioPolicyService::AudioCommandThread::dump(int fd) -{ - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "AudioCommandThread %p Dump\n", this); - result.append(buffer); - write(fd, result.string(), result.size()); - - bool locked = tryLock(mLock); - if (!locked) { - String8 result2(kCmdDeadlockedString); - write(fd, result2.string(), result2.size()); - } - - snprintf(buffer, SIZE, "- Commands:\n"); - result = String8(buffer); - result.append(" Command Time Wait pParam\n"); - for (size_t i = 0; i < mAudioCommands.size(); i++) { - mAudioCommands[i]->dump(buffer, SIZE); - result.append(buffer); - } - result.append(" Last Command\n"); - mLastCommand.dump(buffer, SIZE); - result.append(buffer); - - write(fd, result.string(), result.size()); - - if (locked) mLock.unlock(); - - return NO_ERROR; -} - -void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type, - audio_stream_type_t stream) -{ - AudioCommand *command = new AudioCommand(); - command->mCommand = START_TONE; - ToneData *data = new ToneData(); - data->mType = type; - data->mStream = stream; - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command); - ALOGV("AudioCommandThread() adding tone start type %d, stream %d", type, stream); - mWaitWorkCV.signal(); -} - -void AudioPolicyService::AudioCommandThread::stopToneCommand() -{ - AudioCommand *command = new AudioCommand(); - command->mCommand = STOP_TONE; - command->mParam = NULL; - Mutex::Autolock _l(mLock); - insertCommand_l(command); - ALOGV("AudioCommandThread() adding tone stop"); - mWaitWorkCV.signal(); -} - -status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream, - float volume, - audio_io_handle_t output, - int delayMs) -{ - status_t status = NO_ERROR; - - AudioCommand *command = new AudioCommand(); - command->mCommand = SET_VOLUME; - VolumeData *data = new VolumeData(); - data->mStream = stream; - data->mVolume = volume; - data->mIO = output; - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command, delayMs); - ALOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d", - stream, volume, output); - mWaitWorkCV.signal(); - if (command->mWaitStatus) { - command->mCond.wait(mLock); - status = command->mStatus; - command->mCond.signal(); - } - return status; -} - -status_t AudioPolicyService::AudioCommandThread::parametersCommand(audio_io_handle_t ioHandle, - const char *keyValuePairs, - int delayMs) -{ - status_t status = NO_ERROR; - - AudioCommand *command = new AudioCommand(); - command->mCommand = SET_PARAMETERS; - ParametersData *data = new ParametersData(); - data->mIO = ioHandle; - data->mKeyValuePairs = String8(keyValuePairs); - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command, delayMs); - ALOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d", - keyValuePairs, ioHandle, delayMs); - mWaitWorkCV.signal(); - if (command->mWaitStatus) { - command->mCond.wait(mLock); - status = command->mStatus; - command->mCond.signal(); - } - return status; -} - -status_t AudioPolicyService::AudioCommandThread::voiceVolumeCommand(float volume, int delayMs) -{ - status_t status = NO_ERROR; - - AudioCommand *command = new AudioCommand(); - command->mCommand = SET_VOICE_VOLUME; - VoiceVolumeData *data = new VoiceVolumeData(); - data->mVolume = volume; - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command, delayMs); - ALOGV("AudioCommandThread() adding set voice volume volume %f", volume); - mWaitWorkCV.signal(); - if (command->mWaitStatus) { - command->mCond.wait(mLock); - status = command->mStatus; - command->mCond.signal(); - } - return status; -} - -void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - AudioCommand *command = new AudioCommand(); - command->mCommand = STOP_OUTPUT; - StopOutputData *data = new StopOutputData(); - data->mIO = output; - data->mStream = stream; - data->mSession = session; - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command); - ALOGV("AudioCommandThread() adding stop output %d", output); - mWaitWorkCV.signal(); -} - -void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output) -{ - AudioCommand *command = new AudioCommand(); - command->mCommand = RELEASE_OUTPUT; - ReleaseOutputData *data = new ReleaseOutputData(); - data->mIO = output; - command->mParam = data; - Mutex::Autolock _l(mLock); - insertCommand_l(command); - ALOGV("AudioCommandThread() adding release output %d", output); - mWaitWorkCV.signal(); -} - -// insertCommand_l() must be called with mLock held -void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs) -{ - ssize_t i; // not size_t because i will count down to -1 - Vector <AudioCommand *> removedCommands; - command->mTime = systemTime() + milliseconds(delayMs); - - // acquire wake lock to make sure delayed commands are processed - if (mAudioCommands.isEmpty()) { - acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string()); - } - - // check same pending commands with later time stamps and eliminate them - for (i = mAudioCommands.size()-1; i >= 0; i--) { - AudioCommand *command2 = mAudioCommands[i]; - // commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands - if (command2->mTime <= command->mTime) break; - if (command2->mCommand != command->mCommand) continue; - - switch (command->mCommand) { - case SET_PARAMETERS: { - ParametersData *data = (ParametersData *)command->mParam; - ParametersData *data2 = (ParametersData *)command2->mParam; - if (data->mIO != data2->mIO) break; - ALOGV("Comparing parameter command %s to new command %s", - data2->mKeyValuePairs.string(), data->mKeyValuePairs.string()); - AudioParameter param = AudioParameter(data->mKeyValuePairs); - AudioParameter param2 = AudioParameter(data2->mKeyValuePairs); - for (size_t j = 0; j < param.size(); j++) { - String8 key; - String8 value; - param.getAt(j, key, value); - for (size_t k = 0; k < param2.size(); k++) { - String8 key2; - String8 value2; - param2.getAt(k, key2, value2); - if (key2 == key) { - param2.remove(key2); - ALOGV("Filtering out parameter %s", key2.string()); - break; - } - } - } - // if all keys have been filtered out, remove the command. - // otherwise, update the key value pairs - if (param2.size() == 0) { - removedCommands.add(command2); - } else { - data2->mKeyValuePairs = param2.toString(); - } - command->mTime = command2->mTime; - // force delayMs to non 0 so that code below does not request to wait for - // command status as the command is now delayed - delayMs = 1; - } break; - - case SET_VOLUME: { - VolumeData *data = (VolumeData *)command->mParam; - VolumeData *data2 = (VolumeData *)command2->mParam; - if (data->mIO != data2->mIO) break; - if (data->mStream != data2->mStream) break; - ALOGV("Filtering out volume command on output %d for stream %d", - data->mIO, data->mStream); - removedCommands.add(command2); - command->mTime = command2->mTime; - // force delayMs to non 0 so that code below does not request to wait for - // command status as the command is now delayed - delayMs = 1; - } break; - case START_TONE: - case STOP_TONE: - default: - break; - } - } - - // remove filtered commands - for (size_t j = 0; j < removedCommands.size(); j++) { - // removed commands always have time stamps greater than current command - for (size_t k = i + 1; k < mAudioCommands.size(); k++) { - if (mAudioCommands[k] == removedCommands[j]) { - ALOGV("suppressing command: %d", mAudioCommands[k]->mCommand); - // for commands that are not filtered, - // command->mParam is deleted in threadLoop - delete mAudioCommands[k]->mParam; - delete mAudioCommands[k]; - mAudioCommands.removeAt(k); - break; - } - } - } - removedCommands.clear(); - - // wait for status only if delay is 0 - if (delayMs == 0) { - command->mWaitStatus = true; - } else { - command->mWaitStatus = false; - } - - // insert command at the right place according to its time stamp - ALOGV("inserting command: %d at index %d, num commands %d", - command->mCommand, (int)i+1, mAudioCommands.size()); - mAudioCommands.insertAt(command, i + 1); -} - -void AudioPolicyService::AudioCommandThread::exit() -{ - ALOGV("AudioCommandThread::exit"); - { - AutoMutex _l(mLock); - requestExit(); - mWaitWorkCV.signal(); - } - requestExitAndWait(); -} - -void AudioPolicyService::AudioCommandThread::AudioCommand::dump(char* buffer, size_t size) -{ - snprintf(buffer, size, " %02d %06d.%03d %01u %p\n", - mCommand, - (int)ns2s(mTime), - (int)ns2ms(mTime)%1000, - mWaitStatus, - mParam); -} - -/******* helpers for the service_ops callbacks defined below *********/ -void AudioPolicyService::setParameters(audio_io_handle_t ioHandle, - const char *keyValuePairs, - int delayMs) -{ - mAudioCommandThread->parametersCommand(ioHandle, keyValuePairs, - delayMs); -} - -int AudioPolicyService::setStreamVolume(audio_stream_type_t stream, - float volume, - audio_io_handle_t output, - int delayMs) -{ - return (int)mAudioCommandThread->volumeCommand(stream, volume, - output, delayMs); -} - -int AudioPolicyService::startTone(audio_policy_tone_t tone, - audio_stream_type_t stream) -{ - if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) - ALOGE("startTone: illegal tone requested (%d)", tone); - if (stream != AUDIO_STREAM_VOICE_CALL) - ALOGE("startTone: illegal stream (%d) requested for tone %d", stream, - tone); - mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING, - AUDIO_STREAM_VOICE_CALL); - return 0; -} - -int AudioPolicyService::stopTone() -{ - mTonePlaybackThread->stopToneCommand(); - return 0; -} - -int AudioPolicyService::setVoiceVolume(float volume, int delayMs) -{ - return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs); -} - -bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) -{ - if (mpAudioPolicy == NULL) { - ALOGV("mpAudioPolicy == NULL"); - return false; - } - - if (mpAudioPolicy->is_offload_supported == NULL) { - ALOGV("HAL does not implement is_offload_supported"); - return false; - } - - return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info); -} - -// ---------------------------------------------------------------------------- -// Audio pre-processing configuration -// ---------------------------------------------------------------------------- - -/*static*/ const char * const AudioPolicyService::kInputSourceNames[AUDIO_SOURCE_CNT -1] = { - MIC_SRC_TAG, - VOICE_UL_SRC_TAG, - VOICE_DL_SRC_TAG, - VOICE_CALL_SRC_TAG, - CAMCORDER_SRC_TAG, - VOICE_REC_SRC_TAG, - VOICE_COMM_SRC_TAG -}; - -// returns the audio_source_t enum corresponding to the input source name or -// AUDIO_SOURCE_CNT is no match found -audio_source_t AudioPolicyService::inputSourceNameToEnum(const char *name) -{ - int i; - for (i = AUDIO_SOURCE_MIC; i < AUDIO_SOURCE_CNT; i++) { - if (strcmp(name, kInputSourceNames[i - AUDIO_SOURCE_MIC]) == 0) { - ALOGV("inputSourceNameToEnum found source %s %d", name, i); - break; - } - } - return (audio_source_t)i; -} - -size_t AudioPolicyService::growParamSize(char *param, - size_t size, - size_t *curSize, - size_t *totSize) -{ - // *curSize is at least sizeof(effect_param_t) + 2 * sizeof(int) - size_t pos = ((*curSize - 1 ) / size + 1) * size; - - if (pos + size > *totSize) { - while (pos + size > *totSize) { - *totSize += ((*totSize + 7) / 8) * 4; - } - param = (char *)realloc(param, *totSize); - } - *curSize = pos + size; - return pos; -} - -size_t AudioPolicyService::readParamValue(cnode *node, - char *param, - size_t *curSize, - size_t *totSize) -{ - if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(short), curSize, totSize); - *(short *)((char *)param + pos) = (short)atoi(node->value); - ALOGV("readParamValue() reading short %d", *(short *)((char *)param + pos)); - return sizeof(short); - } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(int), curSize, totSize); - *(int *)((char *)param + pos) = atoi(node->value); - ALOGV("readParamValue() reading int %d", *(int *)((char *)param + pos)); - return sizeof(int); - } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(float), curSize, totSize); - *(float *)((char *)param + pos) = (float)atof(node->value); - ALOGV("readParamValue() reading float %f",*(float *)((char *)param + pos)); - return sizeof(float); - } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) { - size_t pos = growParamSize(param, sizeof(bool), curSize, totSize); - if (strncmp(node->value, "false", strlen("false") + 1) == 0) { - *(bool *)((char *)param + pos) = false; - } else { - *(bool *)((char *)param + pos) = true; - } - ALOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false"); - return sizeof(bool); - } else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) { - size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX); - if (*curSize + len + 1 > *totSize) { - *totSize = *curSize + len + 1; - param = (char *)realloc(param, *totSize); - } - strncpy(param + *curSize, node->value, len); - *curSize += len; - param[*curSize] = '\0'; - ALOGV("readParamValue() reading string %s", param + *curSize - len); - return len; - } - ALOGW("readParamValue() unknown param type %s", node->name); - return 0; -} - -effect_param_t *AudioPolicyService::loadEffectParameter(cnode *root) -{ - cnode *param; - cnode *value; - size_t curSize = sizeof(effect_param_t); - size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int); - effect_param_t *fx_param = (effect_param_t *)malloc(totSize); - - param = config_find(root, PARAM_TAG); - value = config_find(root, VALUE_TAG); - if (param == NULL && value == NULL) { - // try to parse simple parameter form {int int} - param = root->first_child; - if (param != NULL) { - // Note: that a pair of random strings is read as 0 0 - int *ptr = (int *)fx_param->data; - int *ptr2 = (int *)((char *)param + sizeof(effect_param_t)); - ALOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2); - *ptr++ = atoi(param->name); - *ptr = atoi(param->value); - fx_param->psize = sizeof(int); - fx_param->vsize = sizeof(int); - return fx_param; - } - } - if (param == NULL || value == NULL) { - ALOGW("loadEffectParameter() invalid parameter description %s", root->name); - goto error; - } - - fx_param->psize = 0; - param = param->first_child; - while (param) { - ALOGV("loadEffectParameter() reading param of type %s", param->name); - size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize); - if (size == 0) { - goto error; - } - fx_param->psize += size; - param = param->next; - } - - // align start of value field on 32 bit boundary - curSize = ((curSize - 1 ) / sizeof(int) + 1) * sizeof(int); - - fx_param->vsize = 0; - value = value->first_child; - while (value) { - ALOGV("loadEffectParameter() reading value of type %s", value->name); - size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize); - if (size == 0) { - goto error; - } - fx_param->vsize += size; - value = value->next; - } - - return fx_param; - -error: - free(fx_param); - return NULL; -} - -void AudioPolicyService::loadEffectParameters(cnode *root, Vector <effect_param_t *>& params) -{ - cnode *node = root->first_child; - while (node) { - ALOGV("loadEffectParameters() loading param %s", node->name); - effect_param_t *param = loadEffectParameter(node); - if (param == NULL) { - node = node->next; - continue; - } - params.add(param); - node = node->next; - } -} - -AudioPolicyService::InputSourceDesc *AudioPolicyService::loadInputSource( - cnode *root, - const Vector <EffectDesc *>& effects) -{ - cnode *node = root->first_child; - if (node == NULL) { - ALOGW("loadInputSource() empty element %s", root->name); - return NULL; - } - InputSourceDesc *source = new InputSourceDesc(); - while (node) { - size_t i; - for (i = 0; i < effects.size(); i++) { - if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) { - ALOGV("loadInputSource() found effect %s in list", node->name); - break; - } - } - if (i == effects.size()) { - ALOGV("loadInputSource() effect %s not in list", node->name); - node = node->next; - continue; - } - EffectDesc *effect = new EffectDesc(*effects[i]); // deep copy - loadEffectParameters(node, effect->mParams); - ALOGV("loadInputSource() adding effect %s uuid %08x", effect->mName, effect->mUuid.timeLow); - source->mEffects.add(effect); - node = node->next; - } - if (source->mEffects.size() == 0) { - ALOGW("loadInputSource() no valid effects found in source %s", root->name); - delete source; - return NULL; - } - return source; -} - -status_t AudioPolicyService::loadInputSources(cnode *root, const Vector <EffectDesc *>& effects) -{ - cnode *node = config_find(root, PREPROCESSING_TAG); - if (node == NULL) { - return -ENOENT; - } - node = node->first_child; - while (node) { - audio_source_t source = inputSourceNameToEnum(node->name); - if (source == AUDIO_SOURCE_CNT) { - ALOGW("loadInputSources() invalid input source %s", node->name); - node = node->next; - continue; - } - ALOGV("loadInputSources() loading input source %s", node->name); - InputSourceDesc *desc = loadInputSource(node, effects); - if (desc == NULL) { - node = node->next; - continue; - } - mInputSources.add(source, desc); - node = node->next; - } - return NO_ERROR; -} - -AudioPolicyService::EffectDesc *AudioPolicyService::loadEffect(cnode *root) -{ - cnode *node = config_find(root, UUID_TAG); - if (node == NULL) { - return NULL; - } - effect_uuid_t uuid; - if (AudioEffect::stringToGuid(node->value, &uuid) != NO_ERROR) { - ALOGW("loadEffect() invalid uuid %s", node->value); - return NULL; - } - return new EffectDesc(root->name, uuid); -} - -status_t AudioPolicyService::loadEffects(cnode *root, Vector <EffectDesc *>& effects) -{ - cnode *node = config_find(root, EFFECTS_TAG); - if (node == NULL) { - return -ENOENT; - } - node = node->first_child; - while (node) { - ALOGV("loadEffects() loading effect %s", node->name); - EffectDesc *effect = loadEffect(node); - if (effect == NULL) { - node = node->next; - continue; - } - effects.add(effect); - node = node->next; - } - return NO_ERROR; -} - -status_t AudioPolicyService::loadPreProcessorConfig(const char *path) -{ - cnode *root; - char *data; - - data = (char *)load_file(path, NULL); - if (data == NULL) { - return -ENODEV; - } - root = config_node("", ""); - config_load(root, data); - - Vector <EffectDesc *> effects; - loadEffects(root, effects); - loadInputSources(root, effects); - - // delete effects to fix memory leak. - // as effects is local var and valgrind would treat this as memory leak - // and although it only did in mediaserver init, but free it in case mediaserver reboot - size_t i; - for (i = 0; i < effects.size(); i++) { - delete effects[i]; - } - - config_free(root); - free(root); - free(data); - - return NO_ERROR; -} - -/* implementation of the interface to the policy manager */ -extern "C" { - - -static audio_module_handle_t aps_load_hw_module(void *service, - const char *name) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->loadHwModule(name); -} - -// deprecated: replaced by aps_open_output_on_module() -static audio_io_handle_t aps_open_output(void *service, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask, - uint32_t *pLatencyMs, - audio_output_flags_t flags) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask, - pLatencyMs, flags); -} - -static audio_io_handle_t aps_open_output_on_module(void *service, - audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask, - uint32_t *pLatencyMs, - audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask, - pLatencyMs, flags, offloadInfo); -} - -static audio_io_handle_t aps_open_dup_output(void *service, - audio_io_handle_t output1, - audio_io_handle_t output2) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - return af->openDuplicateOutput(output1, output2); -} - -static int aps_close_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->closeOutput(output); -} - -static int aps_suspend_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return PERMISSION_DENIED; - } - - return af->suspendOutput(output); -} - -static int aps_restore_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return PERMISSION_DENIED; - } - - return af->restoreOutput(output); -} - -// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored -static audio_io_handle_t aps_open_input(void *service, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask, - audio_in_acoustics_t acoustics) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask); -} - -static audio_io_handle_t aps_open_input_on_module(void *service, - audio_module_handle_t module, - audio_devices_t *pDevices, - uint32_t *pSamplingRate, - audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask); -} - -static int aps_close_input(void *service, audio_io_handle_t input) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->closeInput(input); -} - -static int aps_set_stream_output(void *service, audio_stream_type_t stream, - audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->setStreamOutput(stream, output); -} - -static int aps_move_effects(void *service, int session, - audio_io_handle_t src_output, - audio_io_handle_t dst_output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->moveEffects(session, src_output, dst_output); -} - -static char * aps_get_parameters(void *service, audio_io_handle_t io_handle, - const char *keys) -{ - String8 result = AudioSystem::getParameters(io_handle, String8(keys)); - return strdup(result.string()); -} - -static void aps_set_parameters(void *service, audio_io_handle_t io_handle, - const char *kv_pairs, int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms); -} - -static int aps_set_stream_volume(void *service, audio_stream_type_t stream, - float volume, audio_io_handle_t output, - int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->setStreamVolume(stream, volume, output, - delay_ms); -} - -static int aps_start_tone(void *service, audio_policy_tone_t tone, - audio_stream_type_t stream) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->startTone(tone, stream); -} - -static int aps_stop_tone(void *service) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->stopTone(); -} - -static int aps_set_voice_volume(void *service, float volume, int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->setVoiceVolume(volume, delay_ms); -} - -}; // extern "C" - -namespace { - struct audio_policy_service_ops aps_ops = { - open_output : aps_open_output, - open_duplicate_output : aps_open_dup_output, - close_output : aps_close_output, - suspend_output : aps_suspend_output, - restore_output : aps_restore_output, - open_input : aps_open_input, - close_input : aps_close_input, - set_stream_volume : aps_set_stream_volume, - set_stream_output : aps_set_stream_output, - set_parameters : aps_set_parameters, - get_parameters : aps_get_parameters, - start_tone : aps_start_tone, - stop_tone : aps_stop_tone, - set_voice_volume : aps_set_voice_volume, - move_effects : aps_move_effects, - load_hw_module : aps_load_hw_module, - open_output_on_module : aps_open_output_on_module, - open_input_on_module : aps_open_input_on_module, - }; -}; // namespace <unnamed> - -}; // namespace android diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h deleted file mode 100644 index a38160f..0000000 --- a/services/audioflinger/AudioPolicyService.h +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright (C) 2009 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_AUDIOPOLICYSERVICE_H -#define ANDROID_AUDIOPOLICYSERVICE_H - -#include <cutils/misc.h> -#include <cutils/config_utils.h> -#include <cutils/compiler.h> -#include <utils/String8.h> -#include <utils/Vector.h> -#include <utils/SortedVector.h> -#include <binder/BinderService.h> -#include <system/audio.h> -#include <system/audio_policy.h> -#include <hardware/audio_policy.h> -#include <media/IAudioPolicyService.h> -#include <media/ToneGenerator.h> -#include <media/AudioEffect.h> - -namespace android { - -// ---------------------------------------------------------------------------- - -class AudioPolicyService : - public BinderService<AudioPolicyService>, - public BnAudioPolicyService, -// public AudioPolicyClientInterface, - public IBinder::DeathRecipient -{ - friend class BinderService<AudioPolicyService>; - -public: - // for BinderService - static const char *getServiceName() ANDROID_API { return "media.audio_policy"; } - - virtual status_t dump(int fd, const Vector<String16>& args); - - // - // BnAudioPolicyService (see AudioPolicyInterface for method descriptions) - // - - virtual status_t setDeviceConnectionState(audio_devices_t device, - audio_policy_dev_state_t state, - const char *device_address); - virtual audio_policy_dev_state_t getDeviceConnectionState( - audio_devices_t device, - const char *device_address); - virtual status_t setPhoneState(audio_mode_t state); - virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config); - virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage); - virtual audio_io_handle_t getOutput(audio_stream_type_t stream, - uint32_t samplingRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, - audio_output_flags_t flags = - AUDIO_OUTPUT_FLAG_NONE, - const audio_offload_info_t *offloadInfo = NULL); - virtual status_t startOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session = 0); - virtual status_t stopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session = 0); - virtual void releaseOutput(audio_io_handle_t output); - virtual audio_io_handle_t getInput(audio_source_t inputSource, - uint32_t samplingRate = 0, - audio_format_t format = AUDIO_FORMAT_DEFAULT, - audio_channel_mask_t channelMask = 0, - int audioSession = 0); - virtual status_t startInput(audio_io_handle_t input); - virtual status_t stopInput(audio_io_handle_t input); - virtual void releaseInput(audio_io_handle_t input); - virtual status_t initStreamVolume(audio_stream_type_t stream, - int indexMin, - int indexMax); - virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, - int index, - audio_devices_t device); - virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, - int *index, - audio_devices_t device); - - virtual uint32_t getStrategyForStream(audio_stream_type_t stream); - virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream); - - virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc); - virtual status_t registerEffect(const effect_descriptor_t *desc, - audio_io_handle_t io, - uint32_t strategy, - int session, - int id); - virtual status_t unregisterEffect(int id); - virtual status_t setEffectEnabled(int id, bool enabled); - virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const; - virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const; - virtual bool isSourceActive(audio_source_t source) const; - - virtual status_t queryDefaultPreProcessing(int audioSession, - effect_descriptor_t *descriptors, - uint32_t *count); - virtual status_t onTransact( - uint32_t code, - const Parcel& data, - Parcel* reply, - uint32_t flags); - - // IBinder::DeathRecipient - virtual void binderDied(const wp<IBinder>& who); - - // - // Helpers for the struct audio_policy_service_ops implementation. - // This is used by the audio policy manager for certain operations that - // are implemented by the policy service. - // - virtual void setParameters(audio_io_handle_t ioHandle, - const char *keyValuePairs, - int delayMs); - - virtual status_t setStreamVolume(audio_stream_type_t stream, - float volume, - audio_io_handle_t output, - int delayMs = 0); - virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream); - virtual status_t stopTone(); - virtual status_t setVoiceVolume(float volume, int delayMs = 0); - virtual bool isOffloadSupported(const audio_offload_info_t &config); - - status_t doStopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session = 0); - void doReleaseOutput(audio_io_handle_t output); - -private: - AudioPolicyService() ANDROID_API; - virtual ~AudioPolicyService(); - - status_t dumpInternals(int fd); - - // Thread used for tone playback and to send audio config commands to audio flinger - // For tone playback, using a separate thread is necessary to avoid deadlock with mLock because - // startTone() and stopTone() are normally called with mLock locked and requesting a tone start - // or stop will cause calls to AudioPolicyService and an attempt to lock mLock. - // For audio config commands, it is necessary because audio flinger requires that the calling - // process (user) has permission to modify audio settings. - class AudioCommandThread : public Thread { - class AudioCommand; - public: - - // commands for tone AudioCommand - enum { - START_TONE, - STOP_TONE, - SET_VOLUME, - SET_PARAMETERS, - SET_VOICE_VOLUME, - STOP_OUTPUT, - RELEASE_OUTPUT - }; - - AudioCommandThread (String8 name, const wp<AudioPolicyService>& service); - virtual ~AudioCommandThread(); - - status_t dump(int fd); - - // Thread virtuals - virtual void onFirstRef(); - virtual bool threadLoop(); - - void exit(); - void startToneCommand(ToneGenerator::tone_type type, - audio_stream_type_t stream); - void stopToneCommand(); - status_t volumeCommand(audio_stream_type_t stream, float volume, - audio_io_handle_t output, int delayMs = 0); - status_t parametersCommand(audio_io_handle_t ioHandle, - const char *keyValuePairs, int delayMs = 0); - status_t voiceVolumeCommand(float volume, int delayMs = 0); - void stopOutputCommand(audio_io_handle_t output, - audio_stream_type_t stream, - int session); - void releaseOutputCommand(audio_io_handle_t output); - - void insertCommand_l(AudioCommand *command, int delayMs = 0); - - private: - class AudioCommandData; - - // descriptor for requested tone playback event - class AudioCommand { - - public: - AudioCommand() - : mCommand(-1) {} - - void dump(char* buffer, size_t size); - - int mCommand; // START_TONE, STOP_TONE ... - nsecs_t mTime; // time stamp - Condition mCond; // condition for status return - status_t mStatus; // command status - bool mWaitStatus; // true if caller is waiting for status - AudioCommandData *mParam; // command specific parameter data - }; - - class AudioCommandData { - public: - virtual ~AudioCommandData() {} - protected: - AudioCommandData() {} - }; - - class ToneData : public AudioCommandData { - public: - ToneGenerator::tone_type mType; // tone type (START_TONE only) - audio_stream_type_t mStream; // stream type (START_TONE only) - }; - - class VolumeData : public AudioCommandData { - public: - audio_stream_type_t mStream; - float mVolume; - audio_io_handle_t mIO; - }; - - class ParametersData : public AudioCommandData { - public: - audio_io_handle_t mIO; - String8 mKeyValuePairs; - }; - - class VoiceVolumeData : public AudioCommandData { - public: - float mVolume; - }; - - class StopOutputData : public AudioCommandData { - public: - audio_io_handle_t mIO; - audio_stream_type_t mStream; - int mSession; - }; - - class ReleaseOutputData : public AudioCommandData { - public: - audio_io_handle_t mIO; - }; - - Mutex mLock; - Condition mWaitWorkCV; - Vector <AudioCommand *> mAudioCommands; // list of pending commands - ToneGenerator *mpToneGenerator; // the tone generator - AudioCommand mLastCommand; // last processed command (used by dump) - String8 mName; // string used by wake lock fo delayed commands - wp<AudioPolicyService> mService; - }; - - class EffectDesc { - public: - EffectDesc(const char *name, const effect_uuid_t& uuid) : - mName(strdup(name)), - mUuid(uuid) { } - EffectDesc(const EffectDesc& orig) : - mName(strdup(orig.mName)), - mUuid(orig.mUuid) { - // deep copy mParams - for (size_t k = 0; k < orig.mParams.size(); k++) { - effect_param_t *origParam = orig.mParams[k]; - // psize and vsize are rounded up to an int boundary for allocation - size_t origSize = sizeof(effect_param_t) + - ((origParam->psize + 3) & ~3) + - ((origParam->vsize + 3) & ~3); - effect_param_t *dupParam = (effect_param_t *) malloc(origSize); - memcpy(dupParam, origParam, origSize); - // This works because the param buffer allocation is also done by - // multiples of 4 bytes originally. In theory we should memcpy only - // the actual param size, that is without rounding vsize. - mParams.add(dupParam); - } - } - /*virtual*/ ~EffectDesc() { - free(mName); - for (size_t k = 0; k < mParams.size(); k++) { - free(mParams[k]); - } - } - char *mName; - effect_uuid_t mUuid; - Vector <effect_param_t *> mParams; - }; - - class InputSourceDesc { - public: - InputSourceDesc() {} - /*virtual*/ ~InputSourceDesc() { - for (size_t j = 0; j < mEffects.size(); j++) { - delete mEffects[j]; - } - } - Vector <EffectDesc *> mEffects; - }; - - - class InputDesc { - public: - InputDesc(int session) : mSessionId(session) {} - /*virtual*/ ~InputDesc() {} - const int mSessionId; - Vector< sp<AudioEffect> >mEffects; - }; - - static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1]; - - void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled); - status_t loadPreProcessorConfig(const char *path); - status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects); - EffectDesc *loadEffect(cnode *root); - status_t loadInputSources(cnode *root, const Vector <EffectDesc *>& effects); - audio_source_t inputSourceNameToEnum(const char *name); - InputSourceDesc *loadInputSource(cnode *root, const Vector <EffectDesc *>& effects); - void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params); - effect_param_t *loadEffectParameter(cnode *root); - size_t readParamValue(cnode *node, - char *param, - size_t *curSize, - size_t *totSize); - size_t growParamSize(char *param, - size_t size, - size_t *curSize, - size_t *totSize); - - // Internal dump utilities. - status_t dumpPermissionDenial(int fd); - - - mutable Mutex mLock; // prevents concurrent access to AudioPolicy manager functions changing - // device connection state or routing - sp<AudioCommandThread> mAudioCommandThread; // audio commands thread - sp<AudioCommandThread> mTonePlaybackThread; // tone playback thread - sp<AudioCommandThread> mOutputCommandThread; // process stop and release output - struct audio_policy_device *mpAudioPolicyDev; - struct audio_policy *mpAudioPolicy; - KeyedVector< audio_source_t, InputSourceDesc* > mInputSources; - KeyedVector< audio_io_handle_t, InputDesc* > mInputs; -}; - -}; // namespace android - -#endif // ANDROID_AUDIOPOLICYSERVICE_H diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp index e5cceb1..1f7a613 100644 --- a/services/audioflinger/AudioResampler.cpp +++ b/services/audioflinger/AudioResampler.cpp @@ -22,9 +22,11 @@ #include <sys/types.h> #include <cutils/log.h> #include <cutils/properties.h> +#include <audio_utils/primitives.h> #include "AudioResampler.h" #include "AudioResamplerSinc.h" #include "AudioResamplerCubic.h" +#include "AudioResamplerDyn.h" #ifdef __arm__ #include <machine/cpu-features.h> @@ -39,8 +41,8 @@ namespace android { class AudioResamplerOrder1 : public AudioResampler { public: - AudioResamplerOrder1(int bitDepth, int inChannelCount, int32_t sampleRate) : - AudioResampler(bitDepth, inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) { + AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) : + AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) { } virtual void resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); @@ -77,6 +79,9 @@ private: int mX0R; }; +/*static*/ +const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits; + bool AudioResampler::qualityIsSupported(src_quality quality) { switch (quality) { @@ -85,6 +90,9 @@ bool AudioResampler::qualityIsSupported(src_quality quality) case MED_QUALITY: case HIGH_QUALITY: case VERY_HIGH_QUALITY: + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: return true; default: return false; @@ -105,7 +113,7 @@ void AudioResampler::init_routine() if (*endptr == '\0') { defaultQuality = (src_quality) l; ALOGD("forcing AudioResampler quality to %d", defaultQuality); - if (defaultQuality < DEFAULT_QUALITY || defaultQuality > VERY_HIGH_QUALITY) { + if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) { defaultQuality = DEFAULT_QUALITY; } } @@ -125,6 +133,12 @@ uint32_t AudioResampler::qualityMHz(src_quality quality) return 20; case VERY_HIGH_QUALITY: return 34; + case DYN_LOW_QUALITY: + return 4; + case DYN_MED_QUALITY: + return 6; + case DYN_HIGH_QUALITY: + return 12; } } @@ -132,7 +146,7 @@ static const uint32_t maxMHz = 130; // an arbitrary number that permits 3 VHQ, s static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t currentMHz = 0; -AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, +AudioResampler* AudioResampler::create(audio_format_t format, int inChannelCount, int32_t sampleRate, src_quality quality) { bool atFinalQuality; @@ -148,6 +162,16 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, atFinalQuality = true; } + /* if the caller requests DEFAULT_QUALITY and af.resampler.property + * has not been set, the target resampler quality is set to DYN_MED_QUALITY, + * and allowed to "throttle" down to DYN_LOW_QUALITY if necessary + * due to estimated CPU load of having too many active resamplers + * (the code below the if). + */ + if (quality == DEFAULT_QUALITY) { + quality = DYN_MED_QUALITY; + } + // naive implementation of CPU load throttling doesn't account for whether resampler is active pthread_mutex_lock(&mutex); for (;;) { @@ -162,7 +186,6 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, // not enough CPU available for proposed quality level, so try next lowest level switch (quality) { default: - case DEFAULT_QUALITY: case LOW_QUALITY: atFinalQuality = true; break; @@ -175,6 +198,15 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, case VERY_HIGH_QUALITY: quality = HIGH_QUALITY; break; + case DYN_LOW_QUALITY: + atFinalQuality = true; + break; + case DYN_MED_QUALITY: + quality = DYN_LOW_QUALITY; + break; + case DYN_HIGH_QUALITY: + quality = DYN_MED_QUALITY; + break; } } pthread_mutex_unlock(&mutex); @@ -183,22 +215,43 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, switch (quality) { default: - case DEFAULT_QUALITY: case LOW_QUALITY: ALOGV("Create linear Resampler"); - resampler = new AudioResamplerOrder1(bitDepth, inChannelCount, sampleRate); + LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT); + resampler = new AudioResamplerOrder1(inChannelCount, sampleRate); break; case MED_QUALITY: ALOGV("Create cubic Resampler"); - resampler = new AudioResamplerCubic(bitDepth, inChannelCount, sampleRate); + LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT); + resampler = new AudioResamplerCubic(inChannelCount, sampleRate); break; case HIGH_QUALITY: ALOGV("Create HIGH_QUALITY sinc Resampler"); - resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate); + LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT); + resampler = new AudioResamplerSinc(inChannelCount, sampleRate); break; case VERY_HIGH_QUALITY: ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality); - resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate, quality); + LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT); + resampler = new AudioResamplerSinc(inChannelCount, sampleRate, quality); + break; + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: + ALOGV("Create dynamic Resampler = %d", quality); + if (format == AUDIO_FORMAT_PCM_FLOAT) { + resampler = new AudioResamplerDyn<float, float, float>(inChannelCount, + sampleRate, quality); + } else { + LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT); + if (quality == DYN_HIGH_QUALITY) { + resampler = new AudioResamplerDyn<int32_t, int16_t, int32_t>(inChannelCount, + sampleRate, quality); + } else { + resampler = new AudioResamplerDyn<int16_t, int16_t, int32_t>(inChannelCount, + sampleRate, quality); + } + } break; } @@ -207,26 +260,26 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, return resampler; } -AudioResampler::AudioResampler(int bitDepth, int inChannelCount, +AudioResampler::AudioResampler(int inChannelCount, int32_t sampleRate, src_quality quality) : - mBitDepth(bitDepth), mChannelCount(inChannelCount), - mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0), - mPhaseFraction(0), mLocalTimeFreq(0), - mPTS(AudioBufferProvider::kInvalidPTS), mQuality(quality) { - // sanity check on format - if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) { - ALOGE("Unsupported sample format, %d bits, %d channels", bitDepth, - inChannelCount); - // ALOG_ASSERT(0); + mChannelCount(inChannelCount), + mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0), + mPhaseFraction(0), mLocalTimeFreq(0), + mPTS(AudioBufferProvider::kInvalidPTS), mQuality(quality) { + + const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8; + if (inChannelCount < 1 + || inChannelCount > maxChannels) { + LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels", + quality, inChannelCount); } if (sampleRate <= 0) { - ALOGE("Unsupported sample rate %d Hz", sampleRate); + LOG_ALWAYS_FATAL("Unsupported sample rate %d Hz", sampleRate); } // initialize common members mVolume[0] = mVolume[1] = 0; mBuffer.frameCount = 0; - } AudioResampler::~AudioResampler() { @@ -246,10 +299,12 @@ void AudioResampler::setSampleRate(int32_t inSampleRate) { mPhaseIncrement = (uint32_t)((kPhaseMultiplier * inSampleRate) / mSampleRate); } -void AudioResampler::setVolume(int16_t left, int16_t right) { +void AudioResampler::setVolume(float left, float right) { // TODO: Implement anti-zipper filter - mVolume[0] = left; - mVolume[1] = right; + // convert to U4.12 for internal integer use (round down) + // integer volume values are clamped to 0 to UNITY_GAIN. + mVolume[0] = u4_12_from_float(clampFloatVol(left)); + mVolume[1] = u4_12_from_float(clampFloatVol(right)); } void AudioResampler::setLocalTimeFreq(uint64_t freq) { @@ -305,7 +360,7 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d", // outFrameCount, inputIndex, phaseFraction, phaseIncrement); @@ -339,8 +394,9 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction); out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction); Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples @@ -402,7 +458,7 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d", // outFrameCount, inputIndex, phaseFraction, phaseIncrement); @@ -434,8 +490,9 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * sample; out[outputIndex++] += vr * sample; Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples @@ -514,6 +571,16 @@ void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr, uint32_t &phaseFraction, uint32_t phaseIncrement) { + (void)maxOutPt; // remove unused parameter warnings + (void)maxInIdx; + (void)outputIndex; + (void)out; + (void)inputIndex; + (void)vl; + (void)vr; + (void)phaseFraction; + (void)phaseIncrement; + (void)in; #define MO_PARAM5 "36" // offset of parameter 5 (outputIndex) asm( @@ -625,6 +692,16 @@ void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32 size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr, uint32_t &phaseFraction, uint32_t phaseIncrement) { + (void)maxOutPt; // remove unused parameter warnings + (void)maxInIdx; + (void)outputIndex; + (void)out; + (void)inputIndex; + (void)vl; + (void)vr; + (void)phaseFraction; + (void)phaseIncrement; + (void)in; #define ST_PARAM5 "40" // offset of parameter 5 (outputIndex) asm( "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n" diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h index 33e64ce..cdc6d92 100644 --- a/services/audioflinger/AudioResampler.h +++ b/services/audioflinger/AudioResampler.h @@ -22,6 +22,7 @@ #include <cutils/compiler.h> #include <media/AudioBufferProvider.h> +#include <system/audio.h> namespace android { // ---------------------------------------------------------------------------- @@ -41,16 +42,21 @@ public: MED_QUALITY=2, HIGH_QUALITY=3, VERY_HIGH_QUALITY=4, + DYN_LOW_QUALITY=5, + DYN_MED_QUALITY=6, + DYN_HIGH_QUALITY=7, }; - static AudioResampler* create(int bitDepth, int inChannelCount, + static const float UNITY_GAIN_FLOAT = 1.0f; + + static AudioResampler* create(audio_format_t format, int inChannelCount, int32_t sampleRate, src_quality quality=DEFAULT_QUALITY); virtual ~AudioResampler(); virtual void init() = 0; virtual void setSampleRate(int32_t inSampleRate); - virtual void setVolume(int16_t left, int16_t right); + virtual void setVolume(float left, float right); virtual void setLocalTimeFreq(uint64_t freq); // set the PTS of the next buffer output by the resampler @@ -60,7 +66,7 @@ public: // A mono provider delivers a sequence of samples. // A stereo provider delivers a sequence of interleaved pairs of samples. // Multi-channel providers are not supported. - // In either case, 'out' holds interleaved pairs of fixed-point signed Q19.12. + // In either case, 'out' holds interleaved pairs of fixed-point Q4.27. // That is, for a mono provider, there is an implicit up-channeling. // Since this method accumulates, the caller is responsible for clearing 'out' initially. // FIXME assumes provider is always successful; it should return the actual frame count. @@ -81,9 +87,9 @@ protected: static const uint32_t kPhaseMask = (1LU<<kNumPhaseBits)-1; // multiplier to calculate fixed point phase increment - static const double kPhaseMultiplier = 1L << kNumPhaseBits; + static const double kPhaseMultiplier; - AudioResampler(int bitDepth, int inChannelCount, int32_t sampleRate, src_quality quality); + AudioResampler(int inChannelCount, int32_t sampleRate, src_quality quality); // prevent copying AudioResampler(const AudioResampler&); @@ -91,7 +97,6 @@ protected: int64_t calculateOutputPTS(int outputFrameIndex); - const int32_t mBitDepth; const int32_t mChannelCount; const int32_t mSampleRate; int32_t mInSampleRate; @@ -107,6 +112,47 @@ protected: uint64_t mLocalTimeFreq; int64_t mPTS; + // returns the inFrameCount required to generate outFrameCount frames. + // + // Placed here to be a consistent for all resamplers. + // + // Right now, we use the upper bound without regards to the current state of the + // input buffer using integer arithmetic, as follows: + // + // (static_cast<uint64_t>(outFrameCount)*mInSampleRate + (mSampleRate - 1))/mSampleRate; + // + // The double precision equivalent (float may not be precise enough): + // ceil(static_cast<double>(outFrameCount) * mInSampleRate / mSampleRate); + // + // this relies on the fact that the mPhaseIncrement is rounded down from + // #phases * mInSampleRate/mSampleRate and the fact that Sum(Floor(x)) <= Floor(Sum(x)). + // http://www.proofwiki.org/wiki/Sum_of_Floors_Not_Greater_Than_Floor_of_Sums + // + // (so long as double precision is computed accurately enough to be considered + // greater than or equal to the Floor(x) value in int32_t arithmetic; thus this + // will not necessarily hold for floats). + // + // TODO: + // Greater accuracy and a tight bound is obtained by: + // 1) subtract and adjust for the current state of the AudioBufferProvider buffer. + // 2) using the exact integer formula where (ignoring 64b casting) + // inFrameCount = (mPhaseIncrement * (outFrameCount - 1) + mPhaseFraction) / phaseWrapLimit; + // phaseWrapLimit is the wraparound (1 << kNumPhaseBits), if not specified explicitly. + // + inline size_t getInFrameCountRequired(size_t outFrameCount) { + return (static_cast<uint64_t>(outFrameCount)*mInSampleRate + + (mSampleRate - 1))/mSampleRate; + } + + inline float clampFloatVol(float volume) { + if (volume > UNITY_GAIN_FLOAT) { + return UNITY_GAIN_FLOAT; + } else if (volume >= 0.) { + return volume; + } + return 0.; // NaN or negative volume maps to 0. + } + private: const src_quality mQuality; diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp index 18e59e9..8f14ff9 100644 --- a/services/audioflinger/AudioResamplerCubic.cpp +++ b/services/audioflinger/AudioResamplerCubic.cpp @@ -60,14 +60,15 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // fetch first buffer if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -97,8 +98,9 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } in = mBuffer.i16; // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } @@ -126,14 +128,15 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // fetch first buffer if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -163,8 +166,9 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); in = mBuffer.i16; } diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h index 203b933..b315da5 100644 --- a/services/audioflinger/AudioResamplerCubic.h +++ b/services/audioflinger/AudioResamplerCubic.h @@ -28,8 +28,8 @@ namespace android { class AudioResamplerCubic : public AudioResampler { public: - AudioResamplerCubic(int bitDepth, int inChannelCount, int32_t sampleRate) : - AudioResampler(bitDepth, inChannelCount, sampleRate, MED_QUALITY) { + AudioResamplerCubic(int inChannelCount, int32_t sampleRate) : + AudioResampler(inChannelCount, sampleRate, MED_QUALITY) { } virtual void resample(int32_t* out, size_t outFrameCount, AudioBufferProvider* provider); diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp new file mode 100644 index 0000000..0eeb201 --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.cpp @@ -0,0 +1,621 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioResamplerDyn" +//#define LOG_NDEBUG 0 + +#include <malloc.h> +#include <string.h> +#include <stdlib.h> +#include <dlfcn.h> +#include <math.h> + +#include <cutils/compiler.h> +#include <cutils/properties.h> +#include <utils/Debug.h> +#include <utils/Log.h> +#include <audio_utils/primitives.h> + +#include "AudioResamplerFirOps.h" // USE_NEON and USE_INLINE_ASSEMBLY defined here +#include "AudioResamplerFirProcess.h" +#include "AudioResamplerFirProcessNeon.h" +#include "AudioResamplerFirGen.h" // requires math.h +#include "AudioResamplerDyn.h" + +//#define DEBUG_RESAMPLER + +namespace android { + +/* + * InBuffer is a type agnostic input buffer. + * + * Layout of the state buffer for halfNumCoefs=8. + * + * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr] + * S I R + * + * S = mState + * I = mImpulse + * R = mRingFull + * p = past samples, convoluted with the (p)ositive side of sinc() + * n = future samples, convoluted with the (n)egative side of sinc() + * r = extra space for implementing the ring buffer + */ + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::InBuffer::InBuffer() + : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateCount(0) +{ +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::InBuffer::~InBuffer() +{ + init(); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::init() +{ + free(mState); + mState = NULL; + mImpulse = NULL; + mRingFull = NULL; + mStateCount = 0; +} + +// resizes the state buffer to accommodate the appropriate filter length +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs) +{ + // calculate desired state size + size_t stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength; + + // check if buffer needs resizing + if (mState + && stateCount == mStateCount + && mRingFull-mState == (ssize_t) (mStateCount-halfNumCoefs*CHANNELS)) { + return; + } + + // create new buffer + TI* state = NULL; + (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state)); + memset(state, 0, stateCount*sizeof(*state)); + + // attempt to preserve state + if (mState) { + TI* srcLo = mImpulse - halfNumCoefs*CHANNELS; + TI* srcHi = mImpulse + halfNumCoefs*CHANNELS; + TI* dst = state; + + if (srcLo < mState) { + dst += mState-srcLo; + srcLo = mState; + } + if (srcHi > mState + mStateCount) { + srcHi = mState + mStateCount; + } + memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo)); + free(mState); + } + + // set class member vars + mState = state; + mStateCount = stateCount; + mImpulse = state + halfNumCoefs*CHANNELS; // actually one sample greater than needed + mRingFull = state + mStateCount - halfNumCoefs*CHANNELS; +} + +// copy in the input data into the head (impulse+halfNumCoefs) of the buffer. +template<typename TC, typename TI, typename TO> +template<int CHANNELS> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) +{ + TI* head = impulse + halfNumCoefs*CHANNELS; + for (size_t i=0 ; i<CHANNELS ; i++) { + head[i] = in[inputIndex*CHANNELS + i]; + } +} + +// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs) +template<typename TC, typename TI, typename TO> +template<int CHANNELS> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) +{ + impulse += CHANNELS; + + if (CC_UNLIKELY(impulse >= mRingFull)) { + const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS; + memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI)); + impulse -= shiftDown; + } + readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::Constants::set( + int L, int halfNumCoefs, int inSampleRate, int outSampleRate) +{ + int bits = 0; + int lscale = inSampleRate/outSampleRate < 2 ? L - 1 : + static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate); + for (int i=lscale; i; ++bits, i>>=1) + ; + mL = L; + mShift = kNumPhaseBits - bits; + mHalfNumCoefs = halfNumCoefs; +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::AudioResamplerDyn( + int inChannelCount, int32_t sampleRate, src_quality quality) + : AudioResampler(inChannelCount, sampleRate, quality), + mResampleFunc(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY), + mCoefBuffer(NULL) +{ + mVolumeSimd[0] = mVolumeSimd[1] = 0; + // The AudioResampler base class assumes we are always ready for 1:1 resampling. + // We reset mInSampleRate to 0, so setSampleRate() will calculate filters for + // setSampleRate() for 1:1. (May be removed if precalculated filters are used.) + mInSampleRate = 0; + mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::~AudioResamplerDyn() +{ + free(mCoefBuffer); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::init() +{ + mFilterSampleRate = 0; // always trigger new filter generation + mInBuffer.init(); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::setVolume(float left, float right) +{ + AudioResampler::setVolume(left, right); + if (is_same<TO, float>::value || is_same<TO, double>::value) { + mVolumeSimd[0] = static_cast<TO>(left); + mVolumeSimd[1] = static_cast<TO>(right); + } else { // integer requires scaling to U4_28 (rounding down) + // integer volumes are clamped to 0 to UNITY_GAIN so there + // are no issues with signed overflow. + mVolumeSimd[0] = u4_28_from_float(clampFloatVol(left)); + mVolumeSimd[1] = u4_28_from_float(clampFloatVol(right)); + } +} + +template<typename T> T max(T a, T b) {return a > b ? a : b;} + +template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c, + double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat) +{ + TC* buf = NULL; + static const double atten = 0.9998; // to avoid ripple overflow + double fcr; + double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten); + + (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC)); + if (inSampleRate < outSampleRate) { // upsample + fcr = max(0.5*tbwCheat - tbw/2, tbw/2); + } else { // downsample + fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2); + } + // create and set filter + firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten); + c.mFirCoefs = buf; + if (mCoefBuffer) { + free(mCoefBuffer); + } + mCoefBuffer = buf; +#ifdef DEBUG_RESAMPLER + // print basic filter stats + printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n", + c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw); + // test the filter and report results + double fp = (fcr - tbw/2)/c.mL; + double fs = (fcr + tbw/2)/c.mL; + double passMin, passMax, passRipple; + double stopMax, stopRipple; + testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000, + passMin, passMax, passRipple, stopMax, stopRipple); + printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple); + printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple); +#endif +} + +// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop. +static int gcd(int n, int m) +{ + if (m == 0) { + return n; + } + return gcd(m, n % m); +} + +static bool isClose(int32_t newSampleRate, int32_t prevSampleRate, + int32_t filterSampleRate, int32_t outSampleRate) +{ + + // different upsampling ratios do not need a filter change. + if (filterSampleRate != 0 + && filterSampleRate < outSampleRate + && newSampleRate < outSampleRate) + return true; + + // check design criteria again if downsampling is detected. + int pdiff = absdiff(newSampleRate, prevSampleRate); + int adiff = absdiff(newSampleRate, filterSampleRate); + + // allow up to 6% relative change increments. + // allow up to 12% absolute change increments (from filter design) + return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3; +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate) +{ + if (mInSampleRate == inSampleRate) { + return; + } + int32_t oldSampleRate = mInSampleRate; + int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs; + uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift; + bool useS32 = false; + + mInSampleRate = inSampleRate; + + // TODO: Add precalculated Equiripple filters + + if (mFilterQuality != getQuality() || + !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) { + mFilterSampleRate = inSampleRate; + mFilterQuality = getQuality(); + + // Begin Kaiser Filter computation + // + // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB. + // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters + // + // For s32 we keep the stop band attenuation at the same as 16b resolution, about + // 96-98dB + // + + double stopBandAtten; + double tbwCheat = 1.; // how much we "cheat" into aliasing + int halfLength; + if (mFilterQuality == DYN_HIGH_QUALITY) { + // 32b coefficients, 64 length + useS32 = true; + stopBandAtten = 98.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 48; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 40; + } else { + halfLength = 32; + } + } else if (mFilterQuality == DYN_LOW_QUALITY) { + // 16b coefficients, 16-32 length + useS32 = false; + stopBandAtten = 80.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 24; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 16; + } else { + halfLength = 8; + } + if (inSampleRate <= mSampleRate) { + tbwCheat = 1.05; + } else { + tbwCheat = 1.03; + } + } else { // DYN_MED_QUALITY + // 16b coefficients, 32-64 length + // note: > 64 length filters with 16b coefs can have quantization noise problems + useS32 = false; + stopBandAtten = 84.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 32; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 24; + } else { + halfLength = 16; + } + if (inSampleRate <= mSampleRate) { + tbwCheat = 1.03; + } else { + tbwCheat = 1.01; + } + } + + // determine the number of polyphases in the filterbank. + // for 16b, it is desirable to have 2^(16/2) = 256 phases. + // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html + // + // We are a bit more lax on this. + + int phases = mSampleRate / gcd(mSampleRate, inSampleRate); + + // TODO: Once dynamic sample rate change is an option, the code below + // should be modified to execute only when dynamic sample rate change is enabled. + // + // as above, #phases less than 63 is too few phases for accurate linear interpolation. + // we increase the phases to compensate, but more phases means more memory per + // filter and more time to compute the filter. + // + // if we know that the filter will be used for dynamic sample rate changes, + // that would allow us skip this part for fixed sample rate resamplers. + // + while (phases<63) { + phases *= 2; // this code only needed to support dynamic rate changes + } + + if (phases>=256) { // too many phases, always interpolate + phases = 127; + } + + // create the filter + mConstants.set(phases, halfLength, inSampleRate, mSampleRate); + createKaiserFir(mConstants, stopBandAtten, + inSampleRate, mSampleRate, tbwCheat); + } // End Kaiser filter + + // update phase and state based on the new filter. + const Constants& c(mConstants); + mInBuffer.resize(mChannelCount, c.mHalfNumCoefs); + const uint32_t phaseWrapLimit = c.mL << c.mShift; + // try to preserve as much of the phase fraction as possible for on-the-fly changes + mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction) + * phaseWrapLimit / oldPhaseWrapLimit; + mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case. + mPhaseIncrement = static_cast<uint32_t>(static_cast<uint64_t>(phaseWrapLimit) + * inSampleRate / mSampleRate); + + // determine which resampler to use + // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits") + int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0; + if (locked) { + mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase + } + + // stride is the minimum number of filter coefficients processed per loop iteration. + // We currently only allow a stride of 16 to match with SIMD processing. + // This means that the filter length must be a multiple of 16, + // or half the filter length (mHalfNumCoefs) must be a multiple of 8. + // + // Note: A stride of 2 is achieved with non-SIMD processing. + int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2; + LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more"); + LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > 8, + "Resampler channels(%d) must be between 1 to 8", mChannelCount); + // stride 16 (falls back to stride 2 for machines that do not support NEON) + if (locked) { + switch (mChannelCount) { + case 1: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>; + break; + case 2: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>; + break; + case 3: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>; + break; + case 4: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>; + break; + case 5: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>; + break; + case 6: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>; + break; + case 7: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>; + break; + case 8: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>; + break; + } + } else { + switch (mChannelCount) { + case 1: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>; + break; + case 2: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>; + break; + case 3: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>; + break; + case 4: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>; + break; + case 5: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>; + break; + case 6: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>; + break; + case 7: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>; + break; + case 8: + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>; + break; + } + } +#ifdef DEBUG_RESAMPLER + printf("channels:%d %s stride:%d %s coef:%d shift:%d\n", + mChannelCount, locked ? "locked" : "interpolated", + stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift); +#endif +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider) +{ + (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider); +} + +template<typename TC, typename TI, typename TO> +template<int CHANNELS, bool LOCKED, int STRIDE> +void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, + AudioBufferProvider* provider) +{ + // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out. + const int OUTPUT_CHANNELS = (CHANNELS < 2) ? 2 : CHANNELS; + const Constants& c(mConstants); + const TC* const coefs = mConstants.mFirCoefs; + TI* impulse = mInBuffer.getImpulse(); + size_t inputIndex = 0; + uint32_t phaseFraction = mPhaseFraction; + const uint32_t phaseIncrement = mPhaseIncrement; + size_t outputIndex = 0; + size_t outputSampleCount = outFrameCount * OUTPUT_CHANNELS; + const uint32_t phaseWrapLimit = c.mL << c.mShift; + size_t inFrameCount = (phaseIncrement * (uint64_t)outFrameCount + phaseFraction) + / phaseWrapLimit; + // sanity check that inFrameCount is in signed 32 bit integer range. + ALOG_ASSERT(0 <= inFrameCount && inFrameCount < (1U << 31)); + + //ALOGV("inFrameCount:%d outFrameCount:%d" + // " phaseIncrement:%u phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outFrameCount, phaseIncrement, phaseFraction, phaseWrapLimit); + + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + // the following logic is a bit convoluted to keep the main processing loop + // as tight as possible with register allocation. + while (outputIndex < outputSampleCount) { + //ALOGV("LOOP: inFrameCount:%d outputIndex:%d outFrameCount:%d" + // " phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit); + + // check inputIndex overflow + ALOG_ASSERT(inputIndex <= mBuffer.frameCount, "inputIndex%d > frameCount%d", + inputIndex, mBuffer.frameCount); + // Buffer is empty, fetch a new one if necessary (inFrameCount > 0). + // We may not fetch a new buffer if the existing data is sufficient. + while (mBuffer.frameCount == 0 && inFrameCount > 0) { + mBuffer.frameCount = inFrameCount; + provider->getNextBuffer(&mBuffer, + calculateOutputPTS(outputIndex / OUTPUT_CHANNELS)); + if (mBuffer.raw == NULL) { + goto resample_exit; + } + inFrameCount -= mBuffer.frameCount; + if (phaseFraction >= phaseWrapLimit) { // read in data + mInBuffer.template readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, + reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + inputIndex++; + phaseFraction -= phaseWrapLimit; + while (phaseFraction >= phaseWrapLimit) { + if (inputIndex >= mBuffer.frameCount) { + inputIndex = 0; + provider->releaseBuffer(&mBuffer); + break; + } + mInBuffer.template readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, + reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + inputIndex++; + phaseFraction -= phaseWrapLimit; + } + } + } + const TI* const in = reinterpret_cast<const TI*>(mBuffer.raw); + const size_t frameCount = mBuffer.frameCount; + const int coefShift = c.mShift; + const int halfNumCoefs = c.mHalfNumCoefs; + const TO* const volumeSimd = mVolumeSimd; + + // main processing loop + while (CC_LIKELY(outputIndex < outputSampleCount)) { + // caution: fir() is inlined and may be large. + // output will be loaded with the appropriate values + // + // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs] + // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs. + // + //ALOGV("LOOP2: inFrameCount:%d outputIndex:%d outFrameCount:%d" + // " phaseFraction:%u phaseWrapLimit:%u", + // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit); + ALOG_ASSERT(phaseFraction < phaseWrapLimit); + fir<CHANNELS, LOCKED, STRIDE>( + &out[outputIndex], + phaseFraction, phaseWrapLimit, + coefShift, halfNumCoefs, coefs, + impulse, volumeSimd); + + outputIndex += OUTPUT_CHANNELS; + + phaseFraction += phaseIncrement; + while (phaseFraction >= phaseWrapLimit) { + if (inputIndex >= frameCount) { + goto done; // need a new buffer + } + mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + inputIndex++; + phaseFraction -= phaseWrapLimit; + } + } +done: + // We arrive here when we're finished or when the input buffer runs out. + // Regardless we need to release the input buffer if we've acquired it. + if (inputIndex > 0) { // we've acquired a buffer (alternatively could check frameCount) + ALOG_ASSERT(inputIndex == frameCount, "inputIndex(%d) != frameCount(%d)", + inputIndex, frameCount); // must have been fully read. + inputIndex = 0; + provider->releaseBuffer(&mBuffer); + ALOG_ASSERT(mBuffer.frameCount == 0); + } + } + +resample_exit: + // inputIndex must be zero in all three cases: + // (1) the buffer never was been acquired; (2) the buffer was + // released at "done:"; or (3) getNextBuffer() failed. + ALOG_ASSERT(inputIndex == 0, "Releasing: inputindex:%d frameCount:%d phaseFraction:%u", + inputIndex, mBuffer.frameCount, phaseFraction); + ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer + mInBuffer.setImpulse(impulse); + mPhaseFraction = phaseFraction; +} + +/* instantiate templates used by AudioResampler::create */ +template class AudioResamplerDyn<float, float, float>; +template class AudioResamplerDyn<int16_t, int16_t, int32_t>; +template class AudioResamplerDyn<int32_t, int16_t, int32_t>; + +// ---------------------------------------------------------------------------- +}; // namespace android diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h new file mode 100644 index 0000000..e886a68 --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H +#define ANDROID_AUDIO_RESAMPLER_DYN_H + +#include <stdint.h> +#include <sys/types.h> +#include <cutils/log.h> + +#include "AudioResampler.h" + +namespace android { + +/* AudioResamplerDyn + * + * This class template is used for floating point and integer resamplers. + * + * Type variables: + * TC = filter coefficient type (one of int16_t, int32_t, or float) + * TI = input data type (one of int16_t or float) + * TO = output data type (one of int32_t or float) + * + * For integer input data types TI, the coefficient type TC is either int16_t or int32_t. + * For float input data types TI, the coefficient type TC is float. + */ + +template<typename TC, typename TI, typename TO> +class AudioResamplerDyn: public AudioResampler { +public: + AudioResamplerDyn(int inChannelCount, + int32_t sampleRate, src_quality quality); + + virtual ~AudioResamplerDyn(); + + virtual void init(); + + virtual void setSampleRate(int32_t inSampleRate); + + virtual void setVolume(float left, float right); + + virtual void resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider); + +private: + + class Constants { // stores the filter constants. + public: + Constants() : + mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefs(NULL) + {} + void set(int L, int halfNumCoefs, + int inSampleRate, int outSampleRate); + + int mL; // interpolation phases in the filter. + int mShift; // right shift to get polyphase index + unsigned int mHalfNumCoefs; // filter half #coefs + const TC* mFirCoefs; // polyphase filter bank + }; + + class InBuffer { // buffer management for input type TI + public: + InBuffer(); + ~InBuffer(); + void init(); + + void resize(int CHANNELS, int halfNumCoefs); + + // used for direct management of the mImpulse pointer + inline TI* getImpulse() { + return mImpulse; + } + + inline void setImpulse(TI *impulse) { + mImpulse = impulse; + } + + template<int CHANNELS> + inline void readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + + template<int CHANNELS> + inline void readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + + private: + // tuning parameter guidelines: 2 <= multiple <= 8 + static const int kStateSizeMultipleOfFilterLength = 4; + + // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS. + TI* mState; // base pointer for the input buffer storage + TI* mImpulse; // current location of the impulse response (centered) + TI* mRingFull; // mState <= mImpulse < mRingFull + size_t mStateCount; // size of state in units of TI. + }; + + void createKaiserFir(Constants &c, double stopBandAtten, + int inSampleRate, int outSampleRate, double tbwCheat); + + template<int CHANNELS, bool LOCKED, int STRIDE> + void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider); + + // define a pointer to member function type for resample + typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out, + size_t outFrameCount, AudioBufferProvider* provider); + + // data - the contiguous storage and layout of these is important. + InBuffer mInBuffer; + Constants mConstants; // current set of coefficient parameters + TO __attribute__ ((aligned (8))) mVolumeSimd[2]; // must be aligned or NEON may crash + resample_ABP_t mResampleFunc; // called function for resampling + int32_t mFilterSampleRate; // designed filter sample rate. + src_quality mFilterQuality; // designed filter quality. + void* mCoefBuffer; // if a filter is created, this is not null +}; + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/ diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h new file mode 100644 index 0000000..d024b2f --- /dev/null +++ b/services/audioflinger/AudioResamplerFirGen.h @@ -0,0 +1,709 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_GEN_H +#define ANDROID_AUDIO_RESAMPLER_FIR_GEN_H + +namespace android { + +/* + * generates a sine wave at equal steps. + * + * As most of our functions use sine or cosine at equal steps, + * it is very efficient to compute them that way (single multiply and subtract), + * rather than invoking the math library sin() or cos() each time. + * + * SineGen uses Goertzel's Algorithm (as a generator not a filter) + * to calculate sine(wstart + n * wstep) or cosine(wstart + n * wstep) + * by stepping through 0, 1, ... n. + * + * e^i(wstart+wstep) = 2cos(wstep) * e^i(wstart) - e^i(wstart-wstep) + * + * or looking at just the imaginary sine term, as the cosine follows identically: + * + * sin(wstart+wstep) = 2cos(wstep) * sin(wstart) - sin(wstart-wstep) + * + * Goertzel's algorithm is more efficient than the angle addition formula, + * e^i(wstart+wstep) = e^i(wstart) * e^i(wstep), which takes up to + * 4 multiplies and 2 adds (or 3* and 3+) and requires both sine and + * cosine generation due to the complex * complex multiply (full rotation). + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * + */ + +class SineGen { +public: + SineGen(double wstart, double wstep, bool cosine = false) { + if (cosine) { + mCurrent = cos(wstart); + mPrevious = cos(wstart - wstep); + } else { + mCurrent = sin(wstart); + mPrevious = sin(wstart - wstep); + } + mTwoCos = 2.*cos(wstep); + } + SineGen(double expNow, double expPrev, double twoCosStep) { + mCurrent = expNow; + mPrevious = expPrev; + mTwoCos = twoCosStep; + } + inline double value() const { + return mCurrent; + } + inline void advance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + } + inline double valueAdvance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + return tmp; + } + +private: + double mCurrent; // current value of sine/cosine + double mPrevious; // previous value of sine/cosine + double mTwoCos; // stepping factor +}; + +/* + * generates a series of sine generators, phase offset by fixed steps. + * + * This is used to generate polyphase sine generators, one per polyphase + * in the filter code below. + * + * The SineGen returned by value() starts at innerStart = outerStart + n*outerStep; + * increments by innerStep. + * + */ + +class SineGenGen { +public: + SineGenGen(double outerStart, double outerStep, double innerStep, bool cosine = false) + : mSineInnerCur(outerStart, outerStep, cosine), + mSineInnerPrev(outerStart-innerStep, outerStep, cosine) + { + mTwoCos = 2.*cos(innerStep); + } + inline SineGen value() { + return SineGen(mSineInnerCur.value(), mSineInnerPrev.value(), mTwoCos); + } + inline void advance() { + mSineInnerCur.advance(); + mSineInnerPrev.advance(); + } + inline SineGen valueAdvance() { + return SineGen(mSineInnerCur.valueAdvance(), mSineInnerPrev.valueAdvance(), mTwoCos); + } + +private: + SineGen mSineInnerCur; // generate the inner sine values (stepped by outerStep). + SineGen mSineInnerPrev; // generate the inner sine previous values + // (behind by innerStep, stepped by outerStep). + double mTwoCos; // the inner stepping factor for the returned SineGen. +}; + +static inline double sqr(double x) { + return x * x; +} + +/* + * rounds a double to the nearest integer for FIR coefficients. + * + * One variant uses noise shaping, which must keep error history + * to work (the err parameter, initialized to 0). + * The other variant is a non-noise shaped version for + * S32 coefficients (noise shaping doesn't gain much). + * + * Caution: No bounds saturation is applied, but isn't needed in this case. + * + * @param x is the value to round. + * + * @param maxval is the maximum integer scale factor expressed as an int64 (for headroom). + * Typically this may be the maximum positive integer+1 (using the fact that double precision + * FIR coefficients generated here are never that close to 1.0 to pose an overflow condition). + * + * @param err is the previous error (actual - rounded) for the previous rounding op. + * For 16b coefficients this can improve stopband dB performance by up to 2dB. + * + * Many variants exist for the noise shaping: http://en.wikipedia.org/wiki/Noise_shaping + * + */ + +static inline int64_t toint(double x, int64_t maxval, double& err) { + double val = x * maxval; + double ival = floor(val + 0.5 + err*0.2); + err = val - ival; + return static_cast<int64_t>(ival); +} + +static inline int64_t toint(double x, int64_t maxval) { + return static_cast<int64_t>(floor(x * maxval + 0.5)); +} + +/* + * Modified Bessel function of the first kind + * http://en.wikipedia.org/wiki/Bessel_function + * + * The formulas are taken from Abramowitz and Stegun, + * _Handbook of Mathematical Functions_ (links below): + * + * http://people.math.sfu.ca/~cbm/aands/page_375.htm + * http://people.math.sfu.ca/~cbm/aands/page_378.htm + * + * http://dlmf.nist.gov/10.25 + * http://dlmf.nist.gov/10.40 + * + * Note we assume x is nonnegative (the function is symmetric, + * pass in the absolute value as needed). + * + * Constants are compile time derived with templates I0Term<> and + * I0ATerm<> to the precision of the compiler. The series can be expanded + * to any precision needed, but currently set around 24b precision. + * + * We use a bit of template math here, constexpr would probably be + * more appropriate for a C++11 compiler. + * + * For the intermediate range 3.75 < x < 15, we use minimax polynomial fit. + * + */ + +template <int N> +struct I0Term { + static const double value = I0Term<N-1>::value / (4. * N * N); +}; + +template <> +struct I0Term<0> { + static const double value = 1.; +}; + +template <int N> +struct I0ATerm { + static const double value = I0ATerm<N-1>::value * (2.*N-1.) * (2.*N-1.) / (8. * N); +}; + +template <> +struct I0ATerm<0> { // 1/sqrt(2*PI); + static const double value = 0.398942280401432677939946059934381868475858631164934657665925; +}; + +#if USE_HORNERS_METHOD +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Horner's Method: http://en.wikipedia.org/wiki/Horner's_method + * + * This has fewer multiplications than Estrin's method below, but has back to back + * floating point dependencies. + * + * On ARM this appears to work slower, so USE_HORNERS_METHOD is not default enabled. + */ + +inline double Poly2(double A, double B, double x) { + return A + x * B; +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return A + x * (B + x * (C + x * (D))); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G)))))); +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G + x * (H + x * (I)))))))); +} + +#else +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Estrin's Method: http://en.wikipedia.org/wiki/Estrin's_scheme + * + * This is typically faster, perhaps gains about 5-10% overall on ARM processors + * over Horner's method above. + */ + +inline double Poly2(double A, double B, double x) { + return A + B * x; +} + +inline double Poly3(double A, double B, double C, double x, double x2) { + return Poly2(A, B, x) + C * x2; +} + +inline double Poly3(double A, double B, double C, double x) { + return Poly2(A, B, x) + C * x * x; +} + +inline double Poly4(double A, double B, double C, double D, double x, double x2) { + return Poly2(A, B, x) + Poly2(C, D, x) * x2; // same as poly2(poly2, poly2, x2); +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return Poly4(A, B, C, D, x, x * x); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + double x2 = x * x; + return Poly4(A, B, C, D, x, x2) + Poly3(E, F, G, x, x2) * (x2 * x2); +} + +inline double Poly8(double A, double B, double C, double D, double E, double F, double G, + double H, double x, double x2, double x4) { + return Poly4(A, B, C, D, x, x2) + Poly4(E, F, G, H, x, x2) * x4; +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + double x2 = x * x; +#if 1 + // It does not seem faster to explicitly decompose Poly8 into Poly4, but + // could depend on compiler floating point scheduling. + double x4 = x2 * x2; + return Poly8(A, B, C, D, E, F, G, H, x, x2, x4) + I * (x4 * x4); +#else + double val = Poly4(A, B, C, D, x, x2); + double x4 = x2 * x2; + return val + Poly4(E, F, G, H, x, x2) * x4 + I * (x4 * x4); +#endif +} +#endif + +static inline double I0(double x) { + if (x < 3.75) { + x *= x; + return Poly7(I0Term<0>::value, I0Term<1>::value, + I0Term<2>::value, I0Term<3>::value, + I0Term<4>::value, I0Term<5>::value, + I0Term<6>::value, x); // e < 1.6e-7 + } + if (1) { + /* + * Series expansion coefs are easy to calculate, but are expanded around 0, + * so error is unequal over the interval 0 < x < 3.75, the error being + * significantly better near 0. + * + * A better solution is to use precise minimax polynomial fits. + * + * We use a slightly more complicated solution for 3.75 < x < 15, based on + * the tables in Blair and Edwards, "Stable Rational Minimax Approximations + * to the Modified Bessel Functions I0(x) and I1(x)", Chalk Hill Nuclear Laboratory, + * AECL-4928. + * + * http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/06/178/6178667.pdf + * + * See Table 11 for 0 < x < 15; e < 10^(-7.13). + * + * Note: Beta cannot exceed 15 (hence Stopband cannot exceed 144dB = 24b). + * + * This speeds up overall computation by about 40% over using the else clause below, + * which requires sqrt and exp. + * + */ + + x *= x; + double num = Poly9(-0.13544938430e9, -0.33153754512e8, + -0.19406631946e7, -0.48058318783e5, + -0.63269783360e3, -0.49520779070e1, + -0.24970910370e-1, -0.74741159550e-4, + -0.18257612460e-6, x); + double y = x - 225.; // reflection around 15 (squared) + double den = Poly4(-0.34598737196e8, 0.23852643181e6, + -0.70699387620e3, 0.10000000000e1, y); + return num / den; + +#if IO_EXTENDED_BETA + /* Table 42 for x > 15; e < 10^(-8.11). + * This is used for Beta>15, but is disabled here as + * we never use Beta that high. + * + * NOTE: This should be enabled only for x > 15. + */ + + double y = 1./x; + double z = y - (1./15); + double num = Poly2(0.415079861746e1, -0.5149092496e1, z); + double den = Poly3(0.103150763823e2, -0.14181687413e2, + 0.1000000000e1, z); + return exp(x) * sqrt(y) * num / den; +#endif + } else { + /* + * NOT USED, but reference for large Beta. + * + * Abramowitz and Stegun asymptotic formula. + * works for x > 3.75. + */ + double y = 1./x; + return exp(x) * sqrt(y) * + // note: reciprocal squareroot may be easier! + // http://en.wikipedia.org/wiki/Fast_inverse_square_root + Poly9(I0ATerm<0>::value, I0ATerm<1>::value, + I0ATerm<2>::value, I0ATerm<3>::value, + I0ATerm<4>::value, I0ATerm<5>::value, + I0ATerm<6>::value, I0ATerm<7>::value, + I0ATerm<8>::value, y); // (... e) < 1.9e-7 + } +} + +/* A speed optimized version of the Modified Bessel I0() which incorporates + * the sqrt and numerator multiply and denominator divide into the computation. + * This speeds up filter computation by about 10-15%. + */ +static inline double I0SqrRat(double x2, double num, double den) { + if (x2 < (3.75 * 3.75)) { + return Poly7(I0Term<0>::value, I0Term<1>::value, + I0Term<2>::value, I0Term<3>::value, + I0Term<4>::value, I0Term<5>::value, + I0Term<6>::value, x2) * num / den; // e < 1.6e-7 + } + num *= Poly9(-0.13544938430e9, -0.33153754512e8, + -0.19406631946e7, -0.48058318783e5, + -0.63269783360e3, -0.49520779070e1, + -0.24970910370e-1, -0.74741159550e-4, + -0.18257612460e-6, x2); // e < 10^(-7.13). + double y = x2 - 225.; // reflection around 15 (squared) + den *= Poly4(-0.34598737196e8, 0.23852643181e6, + -0.70699387620e3, 0.10000000000e1, y); + return num / den; +} + +/* + * calculates the transition bandwidth for a Kaiser filter + * + * Formula 3.2.8, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + * Formula 7.76, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + * + * @param halfNumCoef is half the number of coefficients per filter phase. + * + * @param stopBandAtten is the stop band attenuation desired. + * + * @return the transition bandwidth in normalized frequency (0 <= f <= 0.5) + */ +static inline double firKaiserTbw(int halfNumCoef, double stopBandAtten) { + return (stopBandAtten - 7.95)/((2.*14.36)*halfNumCoef); +} + +/* + * calculates the fir transfer response of the overall polyphase filter at w. + * + * Calculates the DTFT transfer coefficient H(w) for 0 <= w <= PI, utilizing the + * fact that h[n] is symmetric (cosines only, no complex arithmetic). + * + * We use Goertzel's algorithm to accelerate the computation to essentially + * a single multiply and 2 adds per filter coefficient h[]. + * + * Be careful be careful to consider that h[n] is the overall polyphase filter, + * with L phases, so rescaling H(w)/L is probably what you expect for "unity gain", + * as you only use one of the polyphases at a time. + */ +template <typename T> +static inline double firTransfer(const T* coef, int L, int halfNumCoef, double w) { + double accum = static_cast<double>(coef[0])*0.5; // "center coefficient" from first bank + coef += halfNumCoef; // skip first filterbank (picked up by the last filterbank). +#if SLOW_FIRTRANSFER + /* Original code for reference. This is equivalent to the code below, but slower. */ + for (int i=1 ; i<=L ; ++i) { + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + accum += cos(ix*w)*static_cast<double>(*coef++); + } + } +#else + /* + * Our overall filter is stored striped by polyphases, not a contiguous h[n]. + * We could fetch coefficients in a non-contiguous fashion + * but that will not scale to vector processing. + * + * We apply Goertzel's algorithm directly to each polyphase filter bank instead of + * using cosine generation/multiplication, thereby saving one multiply per inner loop. + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * Also: Oppenheim and Schafer, _Discrete Time Signal Processing, 3e_, p. 720. + * + * We use the basic recursion to incorporate the cosine steps into real sequence x[n]: + * s[n] = x[n] + (2cosw)*s[n-1] + s[n-2] + * + * y[n] = s[n] - e^(iw)s[n-1] + * = sum_{k=-\infty}^{n} x[k]e^(-iw(n-k)) + * = e^(-iwn) sum_{k=0}^{n} x[k]e^(iwk) + * + * The summation contains the frequency steps we want multiplied by the source + * (similar to a DTFT). + * + * Using symmetry, and just the real part (be careful, this must happen + * after any internal complex multiplications), the polyphase filterbank + * transfer function is: + * + * Hpp[n, w, w_0] = sum_{k=0}^{n} x[k] * cos(wk + w_0) + * = Re{ e^(iwn + iw_0) y[n]} + * = cos(wn+w_0) * s[n] - cos(w(n+1)+w_0) * s[n-1] + * + * using the fact that s[n] of real x[n] is real. + * + */ + double dcos = 2. * cos(L*w); + int start = ((halfNumCoef)*L + 1); + SineGen cc((start - L) * w, w, true); // cosine + SineGen cp(start * w, w, true); // cosine + for (int i=1 ; i<=L ; ++i) { + double sc = 0; + double sp = 0; + for (int j=0 ; j<halfNumCoef ; ++j) { + double tmp = sc; + sc = static_cast<double>(*coef++) + dcos*sc - sp; + sp = tmp; + } + // If we are awfully clever, we can apply Goertzel's algorithm + // again on the sc and sp sequences returned here. + accum += cc.valueAdvance() * sc - cp.valueAdvance() * sp; + } +#endif + return accum*2.; +} + +/* + * evaluates the minimum and maximum |H(f)| bound in a band region. + * + * This is usually done with equally spaced increments in the target band in question. + * The passband is often very small, and sampled that way. The stopband is often much + * larger. + * + * We use the fact that the overall polyphase filter has an additional bank at the end + * for interpolation; hence it is overspecified for the H(f) computation. Thus the + * first polyphase is never actually checked, excepting its first term. + * + * In this code we use the firTransfer() evaluator above, which uses Goertzel's + * algorithm to calculate the transfer function at each point. + * + * TODO: An alternative with equal spacing is the FFT/DFT. An alternative with unequal + * spacing is a chirp transform. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fstart is the normalized frequency start. + * + * @param fend is the normalized frequency end. + * + * @param steps is the number of steps to take (sampling) between frequency start and end + * + * @param firMin returns the minimum transfer |H(f)| found + * + * @param firMax returns the maximum transfer |H(f)| found + * + * 0 <= f <= 0.5. + * This is used to test passband and stopband performance. + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fstart, double fend, int steps, double &firMin, double &firMax) { + double wstart = fstart*(2.*M_PI); + double wend = fend*(2.*M_PI); + double wstep = (wend - wstart)/steps; + double fmax, fmin; + double trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + fmin = fmax = trf; + wstart += wstep; + for (int i=1; i<steps; ++i) { + trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + if (trf>fmax) { + fmax = trf; + } + else if (trf<fmin) { + fmin = trf; + } + wstart += wstep; + } + // renormalize - this is only needed for integer filter types + double norm = 1./((1ULL<<(sizeof(T)*8-1))*L); + + firMin = fmin * norm; + firMax = fmax * norm; +} + +/* + * evaluates the |H(f)| lowpass band characteristics. + * + * This function tests the lowpass characteristics for the overall polyphase filter, + * and is used to verify the design. For this case, fp should be set to the + * passband normalized frequency from 0 to 0.5 for the overall filter (thus it + * is the designed polyphase bank value / L). Likewise for fs. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fp is the passband normalized frequency, 0 < fp < fs < 0.5. + * + * @param fs is the stopband normalized frequency, 0 < fp < fs < 0.5. + * + * @param passSteps is the number of passband sampling steps. + * + * @param stopSteps is the number of stopband sampling steps. + * + * @param passMin is the minimum value in the passband + * + * @param passMax is the maximum value in the passband (useful for scaling). This should + * be less than 1., to avoid sine wave test overflow. + * + * @param passRipple is the passband ripple. Typically this should be less than 0.1 for + * an audio filter. Generally speaker/headphone device characteristics will dominate + * the passband term. + * + * @param stopMax is the maximum value in the stopband. + * + * @param stopRipple is the stopband ripple, also known as stopband attenuation. + * Typically this should be greater than ~80dB for low quality, and greater than + * ~100dB for full 16b quality, otherwise aliasing may become noticeable. + * + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fp, double fs, int passSteps, int stopSteps, + double &passMin, double &passMax, double &passRipple, + double &stopMax, double &stopRipple) { + double fmin, fmax; + testFir(coef, L, halfNumCoef, 0., fp, passSteps, fmin, fmax); + double d1 = (fmax - fmin)/2.; + passMin = fmin; + passMax = fmax; + passRipple = -20.*log10(1. - d1); // passband ripple + testFir(coef, L, halfNumCoef, fs, 0.5, stopSteps, fmin, fmax); + // fmin is really not important for the stopband. + stopMax = fmax; + stopRipple = -20.*log10(fmax); // stopband ripple/attenuation +} + +/* + * Calculates the overall polyphase filter based on a windowed sinc function. + * + * The windowed sinc is an odd length symmetric filter of exactly L*halfNumCoef*2+1 + * taps for the entire kernel. This is then decomposed into L+1 polyphase filterbanks. + * The last filterbank is used for interpolation purposes (and is mostly composed + * of the first bank shifted by one sample), and is unnecessary if one does + * not do interpolation. + * + * We use the last filterbank for some transfer function calculation purposes, + * so it needs to be generated anyways. + * + * @param coef is the caller allocated space for coefficients. This should be + * exactly (L+1)*halfNumCoef in size. + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param stopBandAtten is the stopband value, should be >50dB. + * + * @param fcr is cutoff frequency/sampling rate (<0.5). At this point, the energy + * should be 6dB less. (fcr is where the amplitude drops by half). Use the + * firKaiserTbw() to calculate the transition bandwidth. fcr is the midpoint + * between the stop band and the pass band (fstop+fpass)/2. + * + * @param atten is the attenuation (generally slightly less than 1). + */ + +template <typename T> +static inline void firKaiserGen(T* coef, int L, int halfNumCoef, + double stopBandAtten, double fcr, double atten) { + // + // Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + // Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + // + // See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf + // + // Kaiser window and beta parameter + // + // | 0.1102*(A - 8.7) A > 50 + // beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 <= A <= 50 + // | 0. A < 21 + // + // with A is the desired stop-band attenuation in dBFS + // + // 30 dB 2.210 + // 40 dB 3.384 + // 50 dB 4.538 + // 60 dB 5.658 + // 70 dB 6.764 + // 80 dB 7.865 + // 90 dB 8.960 + // 100 dB 10.056 + + const int N = L * halfNumCoef; // non-negative half + const double beta = 0.1102 * (stopBandAtten - 8.7); // >= 50dB always + const double xstep = (2. * M_PI) * fcr / L; + const double xfrac = 1. / N; + const double yscale = atten * L / (I0(beta) * M_PI); + const double sqrbeta = sqr(beta); + + // We use sine generators, which computes sines on regular step intervals. + // This speeds up overall computation about 40% from computing the sine directly. + + SineGenGen sgg(0., xstep, L*xstep); // generates sine generators (one per polyphase) + + for (int i=0 ; i<=L ; ++i) { // generate an extra set of coefs for interpolation + + // computation for a single polyphase of the overall filter. + SineGen sg = sgg.valueAdvance(); // current sine generator for "j" inner loop. + double err = 0; // for noise shaping on int16_t coefficients (over each polyphase) + + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + double y; + if (CC_LIKELY(ix)) { + double x = static_cast<double>(ix); + + // sine generator: sg.valueAdvance() returns sin(ix*xstep); + // y = I0(beta * sqrt(1.0 - sqr(x * xfrac))) * yscale * sg.valueAdvance() / x; + y = I0SqrRat(sqrbeta * (1.0 - sqr(x * xfrac)), yscale * sg.valueAdvance(), x); + } else { + y = 2. * atten * fcr; // center of filter, sinc(0) = 1. + sg.advance(); + } + + if (is_same<T, int16_t>::value) { // int16_t needs noise shaping + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1), err)); + } else if (is_same<T, int32_t>::value) { + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1))); + } else { // assumed float or double + *coef++ = static_cast<T>(y); + } + } + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_GEN_H*/ diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h new file mode 100644 index 0000000..bf2163f --- /dev/null +++ b/services/audioflinger/AudioResamplerFirOps.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H + +namespace android { + +#if defined(__arm__) && !defined(__thumb__) +#define USE_INLINE_ASSEMBLY (true) +#else +#define USE_INLINE_ASSEMBLY (false) +#endif + +#if USE_INLINE_ASSEMBLY && defined(__ARM_NEON__) +#define USE_NEON (true) +#include <arm_neon.h> +#else +#define USE_NEON (false) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +static inline +int32_t mulRL(int left, int32_t in, uint32_t vRL) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smultb %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } else { + asm( "smultt %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } + return out; +#else + int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16); + return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16); +#endif +} + +static inline +int32_t mulAdd(int16_t in, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlabb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + v * in; +#endif +} + +static inline +int32_t mulAdd(int16_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlawb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16); +#endif +} + +static inline +int32_t mulAdd(int32_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smmla %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32); +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlabb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlabt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + v * s; +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlawb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlawt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16); +#endif +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h new file mode 100644 index 0000000..efc8055 --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcess.h @@ -0,0 +1,401 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H + +namespace android { + +// depends on AudioResamplerFirOps.h + +/* variant for input type TI = int16_t input samples */ +template<typename TC> +static inline +void mac(int32_t& l, int32_t& r, TC coef, const int16_t* samples) +{ + uint32_t rl = *reinterpret_cast<const uint32_t*>(samples); + l = mulAddRL(1, rl, coef, l); + r = mulAddRL(0, rl, coef, r); +} + +template<typename TC> +static inline +void mac(int32_t& l, TC coef, const int16_t* samples) +{ + l = mulAdd(samples[0], coef, l); +} + +/* variant for input type TI = float input samples */ +template<typename TC> +static inline +void mac(float& l, float& r, TC coef, const float* samples) +{ + l += *samples++ * coef; + r += *samples * coef; +} + +template<typename TC> +static inline +void mac(float& l, TC coef, const float* samples) +{ + l += *samples * coef; +} + +/* variant for output type TO = int32_t output samples */ +static inline +int32_t volumeAdjust(int32_t value, int32_t volume) +{ + return 2 * mulRL(0, value, volume); // Note: only use top 16b +} + +/* variant for output type TO = float output samples */ +static inline +float volumeAdjust(float value, float volume) +{ + return value * volume; +} + +/* + * Helper template functions for loop unrolling accumulator operations. + * + * Unrolling the loops achieves about 2x gain. + * Using a recursive template rather than an array of TO[] for the accumulator + * values is an additional 10-20% gain. + */ + +template<int CHANNELS, typename TO> +class Accumulator : public Accumulator<CHANNELS-1, TO> // recursive +{ +public: + inline void clear() { + value = 0; + Accumulator<CHANNELS-1, TO>::clear(); + } + template<typename TC, typename TI> + inline void acc(TC coef, const TI*& data) { + mac(value, coef, data++); + Accumulator<CHANNELS-1, TO>::acc(coef, data); + } + inline void volume(TO*& out, TO gain) { + *out++ = volumeAdjust(value, gain); + Accumulator<CHANNELS-1, TO>::volume(out, gain); + } + + TO value; // one per recursive inherited base class +}; + +template<typename TO> +class Accumulator<0, TO> { +public: + inline void clear() { + } + template<typename TC, typename TI> + inline void acc(TC coef __unused, const TI*& data __unused) { + } + inline void volume(TO*& out __unused, TO gain __unused) { + } +}; + +template<typename TC, typename TINTERP> +inline +TC interpolate(TC coef_0, TC coef_1, TINTERP lerp) +{ + return lerp * (coef_1 - coef_0) + coef_0; +} + +template<> +inline +int16_t interpolate<int16_t, uint32_t>(int16_t coef_0, int16_t coef_1, uint32_t lerp) +{ // in some CPU architectures 16b x 16b multiplies are faster. + return (static_cast<int16_t>(lerp) * static_cast<int16_t>(coef_1 - coef_0) >> 15) + coef_0; +} + +template<> +inline +int32_t interpolate<int32_t, uint32_t>(int32_t coef_0, int32_t coef_1, uint32_t lerp) +{ + return (lerp * static_cast<int64_t>(coef_1 - coef_0) >> 31) + coef_0; +} + +/* class scope for passing in functions into templates */ +struct InterpCompute { + template<typename TC, typename TINTERP> + static inline + TC interpolatep(TC coef_0, TC coef_1, TINTERP lerp) { + return interpolate(coef_0, coef_1, lerp); + } + + template<typename TC, typename TINTERP> + static inline + TC interpolaten(TC coef_0, TC coef_1, TINTERP lerp) { + return interpolate(coef_0, coef_1, lerp); + } +}; + +struct InterpNull { + template<typename TC, typename TINTERP> + static inline + TC interpolatep(TC coef_0, TC coef_1 __unused, TINTERP lerp __unused) { + return coef_0; + } + + template<typename TC, typename TINTERP> + static inline + TC interpolaten(TC coef_0 __unused, TC coef_1, TINTERP lerp __unused) { + return coef_1; + } +}; + +/* + * Calculates a single output frame (two samples). + * + * The Process*() functions compute both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + * + * ProcessBase() is the fundamental processing template function. + * + * ProcessL() calls ProcessBase() with TFUNC = InterpNull, for fixed/locked phase. + * Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase. + */ + +template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP> +static inline +void ProcessBase(TO* const out, + size_t count, + const TC* coefsP, + const TC* coefsN, + const TI* sP, + const TI* sN, + TINTERP lerpP, + const TO* const volumeLR) +{ + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS > 0) + + if (CHANNELS > 2) { + // TO accum[CHANNELS]; + Accumulator<CHANNELS, TO> accum; + + // for (int j = 0; j < CHANNELS; ++j) accum[j] = 0; + accum.clear(); + for (size_t i = 0; i < count; ++i) { + TC c = TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP); + + // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sP + j); + const TI *tmp_data = sP; // tmp_ptr seems to work better + accum.acc(c, tmp_data); + + coefsP++; + sP -= CHANNELS; + c = TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP); + + // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sN + j); + tmp_data = sN; // tmp_ptr seems faster than directly using sN + accum.acc(c, tmp_data); + + coefsN++; + sN += CHANNELS; + } + // for (int j = 0; j < CHANNELS; ++j) out[j] += volumeAdjust(accum[j], volumeLR[0]); + TO *tmp_out = out; // may remove if const out definition changes. + accum.volume(tmp_out, volumeLR[0]); + } else if (CHANNELS == 2) { + TO l = 0; + TO r = 0; + for (size_t i = 0; i < count; ++i) { + mac(l, r, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP); + coefsP++; + sP -= CHANNELS; + mac(l, r, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN); + coefsN++; + sN += CHANNELS; + } + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(r, volumeLR[1]); + } else { /* CHANNELS == 1 */ + TO l = 0; + for (size_t i = 0; i < count; ++i) { + mac(l, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP); + coefsP++; + sP -= CHANNELS; + mac(l, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN); + coefsN++; + sN += CHANNELS; + } + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(l, volumeLR[1]); + } +} + +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO> +static inline +void ProcessL(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TI* sP, + const TI* sN, + const TO* const volumeLR) +{ + ProcessBase<CHANNELS, STRIDE, InterpNull>(out, count, coefsP, coefsN, sP, sN, 0, volumeLR); +} + +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP> +static inline +void Process(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TC* coefsP1 __unused, + const TC* coefsN1 __unused, + const TI* sP, + const TI* sN, + TINTERP lerpP, + const TO* const volumeLR) +{ + ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR); +} + +/* + * Calculates a single output frame (two samples) from input sample pointer. + * + * This sets up the params for the accelerated Process() and ProcessL() + * functions to do the appropriate dot products. + * + * @param out should point to the output buffer with space for at least one output frame. + * + * @param phase is the fractional distance between input frames for interpolation: + * phase >= 0 && phase < phaseWrapLimit. It can be thought of as a rational fraction + * of phase/phaseWrapLimit. + * + * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases + * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift). + * + * @param coefShift gives the bit alignment of the polyphase index in the phase parameter. + * + * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the + * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored. + * + * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to + * and including the #polyphases. Each polyphase of the filter has half-length halfNumCoefs + * (due to symmetry). The total size of the filter bank in coefficients is + * (#polyphases+1)*halfNumCoefs. + * + * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line). + * + * The coefs should be attenuated (to compensate for passband ripple) + * if storing back into the native format. + * + * @param samples are unaligned input samples. The position is in the "middle" of the + * sample array with respect to the FIR filter: + * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs; + * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1. + * + * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel, + * expressed as a S32 integer. A negative value inverts the channel 180 degrees. + * The pointer volumeLR should be aligned to a minimum of 8 bytes. + * A typical value for volume is 0x1000 to align to a unity gain output of 20.12. + * + * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where + * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling. + * + * The filter polyphase index is given by indexP = phase >> coefShift. Due to + * odd length symmetric filter, the polyphase index of the negative half depends on + * whether interpolation is used. + * + * The fractional siting between the polyphase indices is given by the bits below coefShift: + * + * lerpP = phase << 32 - coefShift >> 1; // for 32 bit unsigned phase multiply + * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply + * + * For integer types, this is expressed as: + * + * lerpP = phase << sizeof(phase)*8 - coefShift + * >> (sizeof(phase)-sizeof(*coefs))*8 + 1; + * + * For floating point, lerpP is the fractional phase scaled to [0.0, 1.0): + * + * lerpP = (phase << 32 - coefShift) / (1 << 32); // floating point equivalent + */ + +template<int CHANNELS, bool LOCKED, int STRIDE, typename TC, typename TI, typename TO> +static inline +void fir(TO* const out, + const uint32_t phase, const uint32_t phaseWrapLimit, + const int coefShift, const int halfNumCoefs, const TC* const coefs, + const TI* const samples, const TO* const volumeLR) +{ + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + if (LOCKED) { + // locked polyphase (no interpolation) + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase) >> coefShift; + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const TI* sP = samples; + const TI* sN = samples + CHANNELS; + + // dot product filter. + ProcessL<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR); + } else { + // interpolated polyphase + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement. + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const TC* coefsP1 = coefsP + halfNumCoefs; + const TC* coefsN1 = coefsN + halfNumCoefs; + const TI* sP = samples; + const TI* sN = samples + CHANNELS; + + // Interpolation fraction lerpP derived by shifting all the way up and down + // to clear the appropriate bits and align to the appropriate level + // for the integer multiply. The constants should resolve in compile time. + // + // The interpolated filter coefficient is derived as follows for the pos/neg half: + // + // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP) + // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP) + + // on-the-fly interpolated dot product filter + if (is_same<TC, float>::value || is_same<TC, double>::value) { + static const TC scale = 1. / (65536. * 65536.); // scale phase bits to [0.0, 1.0) + TC lerpP = TC(phase << (sizeof(phase)*8 - coefShift)) * scale; + + Process<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR); + } else { + uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift) + >> ((sizeof(phase)-sizeof(*coefs))*8 + 1); + + Process<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR); + } + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h new file mode 100644 index 0000000..f311cef --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcessNeon.h @@ -0,0 +1,1149 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H + +namespace android { + +// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h + +#if USE_NEON +// +// NEON specializations are enabled for Process() and ProcessL() +// +// TODO: Stride 16 and Stride 8 can be combined with one pass stride 8 (if necessary) +// and looping stride 16 (or vice versa). This has some polyphase coef data alignment +// issues with S16 coefs. Consider this later. + +// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out. +#define ASSEMBLY_ACCUMULATE_MONO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes */\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output */\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums */\ + "vpadd.s32 d0, d0, d0 \n"/* (1+4d) and replicate L/R */\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume */\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating) */\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d) store result */ + +#define ASSEMBLY_ACCUMULATE_STEREO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes*/\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output*/\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums from q0*/\ + "vpadd.s32 d8, d8, d9 \n"/* (1) add all 4 partial sums from q4*/\ + "vpadd.s32 d0, d0, d8 \n"/* (1+4d) combine into L/R*/\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume*/\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating)*/\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d)store result*/ + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (0 combines+) reverse right positive + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 q8, q8, q9 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply reversed samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (1) reverse 8 frames of the right positive + + "vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply reversed samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply reversed samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply reversed samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs + + "vrev64.16 d4, d4 \n"// (1) reversed s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (2+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (2) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 4 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 d4, d4 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 d16, d16, d17 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1+0d) multiply (reversed)by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vadd.s16 d16, d16, d17 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// (stall) extend samples to 31 bits + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// (stall) accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[0] \n"// interpolate (step2) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline +void Process<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 q2, q2 \n"// (reversed) 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[1] \n"// interpolate (step3) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +#endif //USE_NEON + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/ diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp index 938ec11..e6fb76c 100644 --- a/services/audioflinger/AudioResamplerSinc.cpp +++ b/services/audioflinger/AudioResamplerSinc.cpp @@ -27,6 +27,7 @@ #include <cutils/properties.h> #include <utils/Log.h> +#include <audio_utils/primitives.h> #include "AudioResamplerSinc.h" @@ -455,9 +456,9 @@ int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a) // ---------------------------------------------------------------------------- -AudioResamplerSinc::AudioResamplerSinc(int bitDepth, +AudioResamplerSinc::AudioResamplerSinc( int inChannelCount, int32_t sampleRate, src_quality quality) - : AudioResampler(bitDepth, inChannelCount, sampleRate, quality), + : AudioResampler(inChannelCount, sampleRate, quality), mState(0), mImpulse(0), mRingFull(0), mFirCoefs(0) { /* @@ -503,10 +504,12 @@ void AudioResamplerSinc::init() { mRingFull = mImpulse + (numCoefs+1)*mChannelCount; } -void AudioResamplerSinc::setVolume(int16_t left, int16_t right) { +void AudioResamplerSinc::setVolume(float left, float right) { AudioResampler::setVolume(left, right); - mVolumeSIMD[0] = int32_t(left)<<16; - mVolumeSIMD[1] = int32_t(right)<<16; + // convert to U4_28 (rounding down). + // integer volume values are clamped to 0 to UNITY_GAIN. + mVolumeSIMD[0] = u4_28_from_float(clampFloatVol(left)); + mVolumeSIMD[1] = u4_28_from_float(clampFloatVol(right)); } void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, @@ -546,7 +549,7 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); while (outputIndex < outputSampleCount) { // buffer is empty, fetch a new one diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h index 1ea4474..4691d0a 100644 --- a/services/audioflinger/AudioResamplerSinc.h +++ b/services/audioflinger/AudioResamplerSinc.h @@ -34,7 +34,7 @@ typedef int32_t (*readResampleFirLerpIntBitsFn)(); class AudioResamplerSinc : public AudioResampler { public: - AudioResamplerSinc(int bitDepth, int inChannelCount, int32_t sampleRate, + AudioResamplerSinc(int inChannelCount, int32_t sampleRate, src_quality quality = HIGH_QUALITY); virtual ~AudioResamplerSinc(); @@ -44,7 +44,7 @@ public: private: void init(); - virtual void setVolume(int16_t left, int16_t right); + virtual void setVolume(float left, float right); template<int CHANNELS> void resample(int32_t* out, size_t outFrameCount, diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h index 0754d9d..6a8aeb1 100644 --- a/services/audioflinger/Configuration.h +++ b/services/audioflinger/Configuration.h @@ -31,6 +31,7 @@ // uncomment to enable fast mixer to take performance samples for later statistical analysis #define FAST_MIXER_STATISTICS +// FIXME rename to FAST_THREAD_STATISTICS // uncomment for debugging timing problems related to StateQueue::push() //#define STATE_QUEUE_DUMP diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp index 010e233..bcaf8ae 100644 --- a/services/audioflinger/Effects.cpp +++ b/services/audioflinger/Effects.cpp @@ -44,6 +44,8 @@ #define ALOGVV(a...) do { } while(0) #endif +#define min(a, b) ((a) < (b) ? (a) : (b)) + namespace android { // ---------------------------------------------------------------------------- @@ -66,7 +68,8 @@ AudioFlinger::EffectModule::EffectModule(ThreadBase *thread, mStatus(NO_INIT), mState(IDLE), // mMaxDisableWaitCnt is set by configure() and not used before then // mDisableWaitCnt is set by process() and updateState() and not used before then - mSuspended(false) + mSuspended(false), + mAudioFlinger(thread->mAudioFlinger) { ALOGV("Constructor %p", this); int lStatus; @@ -116,8 +119,9 @@ status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle) continue; } // first non destroyed handle is considered in control - if (controlHandle == NULL) + if (controlHandle == NULL) { controlHandle = h; + } if (h->priority() <= priority) { break; } @@ -194,9 +198,19 @@ size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIf // destructor before we exit sp<EffectModule> keep(this); { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - thread->disconnectEffect(keep, handle, unpinIfLast); + if (removeHandle(handle) == 0) { + if (!isPinned() || unpinIfLast) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + Mutex::Autolock _l(thread->mLock); + thread->removeEffect_l(this); + } + sp<AudioFlinger> af = mAudioFlinger.promote(); + if (af != 0) { + af->updateOrphanEffectChains(this); + } + AudioSystem::unregisterEffect(mId); + } } } return mHandles.size(); @@ -426,6 +440,20 @@ status_t AudioFlinger::EffectModule::init() return status; } +void AudioFlinger::EffectModule::addEffectToHal_l() +{ + if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || + (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) { + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { + audio_stream_t *stream = thread->stream(); + if (stream != NULL) { + stream->add_audio_effect(stream, mEffectInterface); + } + } + } +} + status_t AudioFlinger::EffectModule::start() { Mutex::Autolock _l(mLock); @@ -451,15 +479,11 @@ status_t AudioFlinger::EffectModule::start_l() if (status == 0) { status = cmdStatus; } - if (status == 0 && - ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC || - (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { - audio_stream_t *stream = thread->stream(); - if (stream != NULL) { - stream->add_audio_effect(stream, mEffectInterface); - } + if (status == 0) { + addEffectToHal_l(); + sp<EffectChain> chain = mChain.promote(); + if (chain != 0) { + chain->forceVolume(); } } return status; @@ -804,7 +828,112 @@ bool AudioFlinger::EffectModule::isOffloaded() const return mOffloaded; } -void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) +String8 effectFlagsToString(uint32_t flags) { + String8 s; + + s.append("conn. mode: "); + switch (flags & EFFECT_FLAG_TYPE_MASK) { + case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break; + case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break; + case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break; + case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break; + case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + s.append("insert pref: "); + switch (flags & EFFECT_FLAG_INSERT_MASK) { + case EFFECT_FLAG_INSERT_ANY: s.append("any"); break; + case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break; + case EFFECT_FLAG_INSERT_LAST: s.append("last"); break; + case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + s.append("volume mgmt: "); + switch (flags & EFFECT_FLAG_VOLUME_MASK) { + case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break; + case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break; + case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK; + if (devind) { + s.append("device indication: "); + switch (devind) { + case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + s.append("input mode: "); + switch (flags & EFFECT_FLAG_INPUT_MASK) { + case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break; + case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break; + case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break; + default: s.append("not set"); break; + } + s.append(", "); + + s.append("output mode: "); + switch (flags & EFFECT_FLAG_OUTPUT_MASK) { + case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break; + case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break; + case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break; + default: s.append("not set"); break; + } + s.append(", "); + + uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK; + if (accel) { + s.append("hardware acceleration: "); + switch (accel) { + case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break; + case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK; + if (modeind) { + s.append("mode indication: "); + switch (modeind) { + case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK; + if (srcind) { + s.append("source indication: "); + switch (srcind) { + case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + if (flags & EFFECT_FLAG_OFFLOAD_MASK) { + s.append("offloadable, "); + } + + int len = s.length(); + if (s.length() > 2) { + char *str = s.lockBuffer(len); + s.unlockBuffer(len - 2); + } + return s; +} + + +void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -838,9 +967,10 @@ void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) mDescriptor.type.node[2], mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]); result.append(buffer); - snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n", + snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n", mDescriptor.apiVersion, - mDescriptor.flags); + mDescriptor.flags, + effectFlagsToString(mDescriptor.flags).string()); result.append(buffer); snprintf(buffer, SIZE, "\t\t- name: %s\n", mDescriptor.name); @@ -851,37 +981,37 @@ void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) result.append("\t\t- Input configuration:\n"); result.append("\t\t\tFrames Smp rate Channels Format Buffer\n"); - snprintf(buffer, SIZE, "\t\t\t%05zu %05d %08x %6d %p\n", + snprintf(buffer, SIZE, "\t\t\t%05zu %05d %08x %6d (%s) %p\n", mConfig.inputCfg.buffer.frameCount, mConfig.inputCfg.samplingRate, mConfig.inputCfg.channels, mConfig.inputCfg.format, + formatToString((audio_format_t)mConfig.inputCfg.format), mConfig.inputCfg.buffer.raw); result.append(buffer); result.append("\t\t- Output configuration:\n"); result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); - snprintf(buffer, SIZE, "\t\t\t%p %05zu %05d %08x %d\n", + snprintf(buffer, SIZE, "\t\t\t%p %05zu %05d %08x %d (%s)\n", mConfig.outputCfg.buffer.raw, mConfig.outputCfg.buffer.frameCount, mConfig.outputCfg.samplingRate, mConfig.outputCfg.channels, - mConfig.outputCfg.format); + mConfig.outputCfg.format, + formatToString((audio_format_t)mConfig.outputCfg.format)); result.append(buffer); snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size()); result.append(buffer); - result.append("\t\t\tPid Priority Ctrl Locked client server\n"); + result.append("\t\t\t Pid Priority Ctrl Locked client server\n"); for (size_t i = 0; i < mHandles.size(); ++i) { EffectHandle *handle = mHandles[i]; if (handle != NULL && !handle->destroyed_l()) { - handle->dump(buffer, SIZE); + handle->dumpToBuffer(buffer, SIZE); result.append(buffer); } } - result.append("\n"); - write(fd, result.string(), result.length()); if (locked) { @@ -911,18 +1041,15 @@ AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect, } int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset); - if (mCblkMemory != 0) { - mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer()); - - if (mCblk != NULL) { - new(mCblk) effect_param_cblk_t(); - mBuffer = (uint8_t *)mCblk + bufOffset; - } - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t)); + mCblkMemory.clear(); return; } + new(mCblk) effect_param_cblk_t(); + mBuffer = (uint8_t *)mCblk + bufOffset; } AudioFlinger::EffectHandle::~EffectHandle() @@ -939,6 +1066,11 @@ AudioFlinger::EffectHandle::~EffectHandle() disconnect(false); } +status_t AudioFlinger::EffectHandle::initCheck() +{ + return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY; +} + status_t AudioFlinger::EffectHandle::enable() { ALOGV("enable %p", this); @@ -1053,8 +1185,8 @@ void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast) mCblk->~effect_param_cblk_t(); // destroy our shared-structure. } mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to - // Client destructor must run with AudioFlinger mutex locked - Mutex::Autolock _l(mClient->audioFlinger()->mLock); + // Client destructor must run with AudioFlinger client mutex locked + Mutex::Autolock _l(mClient->audioFlinger()->mClientLock); mClient.clear(); } } @@ -1179,15 +1311,15 @@ status_t AudioFlinger::EffectHandle::onTransact( } -void AudioFlinger::EffectHandle::dump(char* buffer, size_t size) +void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size) { bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock); - snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n", + snprintf(buffer, size, "\t\t\t%5d %5d %3s %3s %5u %5u\n", (mClient == 0) ? getpid_cached : mClient->pid(), mPriority, - mHasControl, - !locked, + mHasControl ? "yes" : "no", + locked ? "yes" : "no", mCblk ? mCblk->clientIndex : 0, mCblk ? mCblk->serverIndex : 0 ); @@ -1204,7 +1336,7 @@ AudioFlinger::EffectChain::EffectChain(ThreadBase *thread, int sessionId) : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0), mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX), - mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX) + mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX), mForceVolume(false) { mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC); if (thread == NULL) { @@ -1278,7 +1410,13 @@ void AudioFlinger::EffectChain::clearInputBuffer() // Must be called with EffectChain::mLock locked void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread) { - memset(mInBuffer, 0, thread->frameCount() * thread->frameSize()); + // TODO: This will change in the future, depending on multichannel + // and sample format changes for effects. + // Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT + // (4 bytes frame size) + const size_t frameSize = + audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * min(FCC_2, thread->channelCount()); + memset(mInBuffer, 0, thread->frameCount() * frameSize); } // Must be called with EffectChain::mLock locked @@ -1521,7 +1659,8 @@ bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right) } } - if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) { + if (!isVolumeForced() && ctrlIdx == mVolumeCtrlIdx && + *left == mLeftVolume && *right == mRightVolume) { if (hasControl) { *left = mNewLeftVolume; *right = mNewRightVolume; @@ -1562,39 +1701,52 @@ bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right) return hasControl; } +void AudioFlinger::EffectChain::syncHalEffectsState() +{ + Mutex::Autolock _l(mLock); + for (size_t i = 0; i < mEffects.size(); i++) { + if (mEffects[i]->state() == EffectModule::ACTIVE || + mEffects[i]->state() == EffectModule::STOPPING) { + mEffects[i]->addEffectToHal_l(); + } + } +} + void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId); + size_t numEffects = mEffects.size(); + snprintf(buffer, SIZE, " %d effects for session %d\n", numEffects, mSessionId); result.append(buffer); - bool locked = AudioFlinger::dumpTryLock(mLock); - // failed to lock - AudioFlinger is probably deadlocked - if (!locked) { - result.append("\tCould not lock mutex:\n"); - } + if (numEffects) { + bool locked = AudioFlinger::dumpTryLock(mLock); + // failed to lock - AudioFlinger is probably deadlocked + if (!locked) { + result.append("\tCould not lock mutex:\n"); + } - result.append("\tNum fx In buffer Out buffer Active tracks:\n"); - snprintf(buffer, SIZE, "\t%02zu %p %p %d\n", - mEffects.size(), - mInBuffer, - mOutBuffer, - mActiveTrackCnt); - result.append(buffer); - write(fd, result.string(), result.size()); + result.append("\tIn buffer Out buffer Active tracks:\n"); + snprintf(buffer, SIZE, "\t%p %p %d\n", + mInBuffer, + mOutBuffer, + mActiveTrackCnt); + result.append(buffer); + write(fd, result.string(), result.size()); - for (size_t i = 0; i < mEffects.size(); ++i) { - sp<EffectModule> effect = mEffects[i]; - if (effect != 0) { - effect->dump(fd, args); + for (size_t i = 0; i < numEffects; ++i) { + sp<EffectModule> effect = mEffects[i]; + if (effect != 0) { + effect->dump(fd, args); + } } - } - if (locked) { - mLock.unlock(); + if (locked) { + mLock.unlock(); + } } } @@ -1792,4 +1944,13 @@ bool AudioFlinger::EffectChain::isNonOffloadableEnabled() return false; } +void AudioFlinger::EffectChain::setThread(const sp<ThreadBase>& thread) +{ + Mutex::Autolock _l(mLock); + mThread = thread; + for (size_t i = 0; i < mEffects.size(); i++) { + mEffects[i]->setThread(thread); + } +} + }; // namespace android diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h index b717857..6f93f81 100644 --- a/services/audioflinger/Effects.h +++ b/services/audioflinger/Effects.h @@ -119,6 +119,7 @@ public: { return (mDescriptor.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) != 0; } status_t setOffloaded(bool offloaded, audio_io_handle_t io); bool isOffloaded() const; + void addEffectToHal_l(); void dump(int fd, const Vector<String16>& args); @@ -153,6 +154,7 @@ mutable Mutex mLock; // mutex for process, commands and handl uint32_t mDisableWaitCnt; // current process() calls count during disable period. bool mSuspended; // effect is suspended: temporarily disabled by framework bool mOffloaded; // effect is currently offloaded to the audio DSP + wp<AudioFlinger> mAudioFlinger; }; // The EffectHandle class implements the IEffect interface. It provides resources @@ -169,6 +171,7 @@ public: const sp<IEffectClient>& effectClient, int32_t priority); virtual ~EffectHandle(); + virtual status_t initCheck(); // IEffect virtual status_t enable(); @@ -208,7 +211,7 @@ public: // destroyed_l() must be called with the associated EffectModule mLock held bool destroyed_l() const { return mDestroyed; } - void dump(char* buffer, size_t size); + void dumpToBuffer(char* buffer, size_t size); protected: friend class AudioFlinger; // for mEffect, mHasControl, mEnabled @@ -269,6 +272,7 @@ public: sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor); sp<EffectModule> getEffectFromId_l(int id); sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type); + // FIXME use float to improve the dynamic range bool setVolume_l(uint32_t *left, uint32_t *right); void setDevice_l(audio_devices_t device); void setMode_l(audio_mode_t mode); @@ -315,6 +319,14 @@ public: // At least one non offloadable effect in the chain is enabled bool isNonOffloadableEnabled(); + // use release_cas because we don't care about the observed value, just want to make sure the + // new value is observable. + void forceVolume() { android_atomic_release_cas(false, true, &mForceVolume); } + // use acquire_cas because we are interested in the observed value and + // we are the only observers. + bool isVolumeForced() { return (android_atomic_acquire_cas(true, false, &mForceVolume) == 0); } + + void syncHalEffectsState(); void dump(int fd, const Vector<String16>& args); @@ -345,6 +357,8 @@ protected: void clearInputBuffer_l(sp<ThreadBase> thread); + void setThread(const sp<ThreadBase>& thread); + wp<ThreadBase> mThread; // parent mixer thread Mutex mLock; // mutex protecting effect list Vector< sp<EffectModule> > mEffects; // list of effect modules @@ -370,4 +384,5 @@ protected: // timeLow fields among effect type UUIDs. // Updated by updateSuspendedSessions_l() only. KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects; + volatile int32_t mForceVolume; // force next volume command because a new effect was enabled }; diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp new file mode 100644 index 0000000..0c9b976 --- /dev/null +++ b/services/audioflinger/FastCapture.cpp @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "FastCapture" +//#define LOG_NDEBUG 0 + +#define ATRACE_TAG ATRACE_TAG_AUDIO + +#include "Configuration.h" +#include <linux/futex.h> +#include <sys/syscall.h> +#include <media/AudioBufferProvider.h> +#include <utils/Log.h> +#include <utils/Trace.h> +#include "FastCapture.h" + +namespace android { + +/*static*/ const FastCaptureState FastCapture::initial; + +FastCapture::FastCapture() : FastThread(), + inputSource(NULL), inputSourceGen(0), pipeSink(NULL), pipeSinkGen(0), + readBuffer(NULL), readBufferState(-1), format(Format_Invalid), sampleRate(0), + // dummyDumpState + totalNativeFramesRead(0) +{ + previous = &initial; + current = &initial; + + mDummyDumpState = &dummyDumpState; +} + +FastCapture::~FastCapture() +{ +} + +FastCaptureStateQueue* FastCapture::sq() +{ + return &mSQ; +} + +const FastThreadState *FastCapture::poll() +{ + return mSQ.poll(); +} + +void FastCapture::setLog(NBLog::Writer *logWriter __unused) +{ +} + +void FastCapture::onIdle() +{ + preIdle = *(const FastCaptureState *)current; + current = &preIdle; +} + +void FastCapture::onExit() +{ + delete[] readBuffer; +} + +bool FastCapture::isSubClassCommand(FastThreadState::Command command) +{ + switch ((FastCaptureState::Command) command) { + case FastCaptureState::READ: + case FastCaptureState::WRITE: + case FastCaptureState::READ_WRITE: + return true; + default: + return false; + } +} + +void FastCapture::onStateChange() +{ + const FastCaptureState * const current = (const FastCaptureState *) this->current; + const FastCaptureState * const previous = (const FastCaptureState *) this->previous; + FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) this->dumpState; + const size_t frameCount = current->mFrameCount; + + bool eitherChanged = false; + + // check for change in input HAL configuration + NBAIO_Format previousFormat = format; + if (current->mInputSourceGen != inputSourceGen) { + inputSource = current->mInputSource; + inputSourceGen = current->mInputSourceGen; + if (inputSource == NULL) { + format = Format_Invalid; + sampleRate = 0; + } else { + format = inputSource->format(); + sampleRate = Format_sampleRate(format); + unsigned channelCount = Format_channelCount(format); + ALOG_ASSERT(channelCount == 1 || channelCount == 2); + } + dumpState->mSampleRate = sampleRate; + eitherChanged = true; + } + + // check for change in pipe + if (current->mPipeSinkGen != pipeSinkGen) { + pipeSink = current->mPipeSink; + pipeSinkGen = current->mPipeSinkGen; + eitherChanged = true; + } + + // input source and pipe sink must be compatible + if (eitherChanged && inputSource != NULL && pipeSink != NULL) { + ALOG_ASSERT(Format_isEqual(format, pipeSink->format())); + } + + if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) { + // FIXME to avoid priority inversion, don't delete here + delete[] readBuffer; + readBuffer = NULL; + if (frameCount > 0 && sampleRate > 0) { + // FIXME new may block for unbounded time at internal mutex of the heap + // implementation; it would be better to have normal capture thread allocate for + // us to avoid blocking here and to prevent possible priority inversion + unsigned channelCount = Format_channelCount(format); + // FIXME frameSize + readBuffer = new short[frameCount * channelCount]; + periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00 + underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75 + overrunNs = (frameCount * 500000000LL) / sampleRate; // 0.50 + forceNs = (frameCount * 950000000LL) / sampleRate; // 0.95 + warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50 + } else { + periodNs = 0; + underrunNs = 0; + overrunNs = 0; + forceNs = 0; + warmupNs = 0; + } + readBufferState = -1; + dumpState->mFrameCount = frameCount; + } + +} + +void FastCapture::onWork() +{ + const FastCaptureState * const current = (const FastCaptureState *) this->current; + FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) this->dumpState; + const FastCaptureState::Command command = this->command; + const size_t frameCount = current->mFrameCount; + + if ((command & FastCaptureState::READ) /*&& isWarm*/) { + ALOG_ASSERT(inputSource != NULL); + ALOG_ASSERT(readBuffer != NULL); + dumpState->mReadSequence++; + ATRACE_BEGIN("read"); + ssize_t framesRead = inputSource->read(readBuffer, frameCount, + AudioBufferProvider::kInvalidPTS); + ATRACE_END(); + dumpState->mReadSequence++; + if (framesRead >= 0) { + LOG_ALWAYS_FATAL_IF((size_t) framesRead > frameCount); + totalNativeFramesRead += framesRead; + dumpState->mFramesRead = totalNativeFramesRead; + readBufferState = framesRead; + } else { + dumpState->mReadErrors++; + readBufferState = 0; + } + // FIXME rename to attemptedIO + attemptedWrite = true; + } + + if (command & FastCaptureState::WRITE) { + ALOG_ASSERT(pipeSink != NULL); + ALOG_ASSERT(readBuffer != NULL); + if (readBufferState < 0) { + unsigned channelCount = Format_channelCount(format); + // FIXME frameSize + memset(readBuffer, 0, frameCount * channelCount * sizeof(short)); + readBufferState = frameCount; + } + if (readBufferState > 0) { + ssize_t framesWritten = pipeSink->write(readBuffer, readBufferState); + // FIXME This supports at most one fast capture client. + // To handle multiple clients this could be converted to an array, + // or with a lot more work the control block could be shared by all clients. + audio_track_cblk_t* cblk = current->mCblk; + if (cblk != NULL && framesWritten > 0) { + int32_t rear = cblk->u.mStreaming.mRear; + android_atomic_release_store(framesWritten + rear, &cblk->u.mStreaming.mRear); + cblk->mServer += framesWritten; + int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex); + if (!(old & CBLK_FUTEX_WAKE)) { + // client is never in server process, so don't use FUTEX_WAKE_PRIVATE + (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, 1); + } + } + } + } +} + +FastCaptureDumpState::FastCaptureDumpState() : FastThreadDumpState(), + mReadSequence(0), mFramesRead(0), mReadErrors(0), mSampleRate(0), mFrameCount(0) +{ +} + +FastCaptureDumpState::~FastCaptureDumpState() +{ +} + +} // namespace android diff --git a/services/audioflinger/FastCapture.h b/services/audioflinger/FastCapture.h new file mode 100644 index 0000000..e535b9d --- /dev/null +++ b/services/audioflinger/FastCapture.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_CAPTURE_H +#define ANDROID_AUDIO_FAST_CAPTURE_H + +#include "FastThread.h" +#include "StateQueue.h" +#include "FastCaptureState.h" + +namespace android { + +typedef StateQueue<FastCaptureState> FastCaptureStateQueue; + +struct FastCaptureDumpState : FastThreadDumpState { + FastCaptureDumpState(); + /*virtual*/ ~FastCaptureDumpState(); + + // FIXME by renaming, could pull up many of these to FastThreadDumpState + uint32_t mReadSequence; // incremented before and after each read() + uint32_t mFramesRead; // total number of frames read successfully + uint32_t mReadErrors; // total number of read() errors + uint32_t mSampleRate; + size_t mFrameCount; +}; + +class FastCapture : public FastThread { + +public: + FastCapture(); + virtual ~FastCapture(); + + FastCaptureStateQueue* sq(); + +private: + FastCaptureStateQueue mSQ; + + // callouts + virtual const FastThreadState *poll(); + virtual void setLog(NBLog::Writer *logWriter); + virtual void onIdle(); + virtual void onExit(); + virtual bool isSubClassCommand(FastThreadState::Command command); + virtual void onStateChange(); + virtual void onWork(); + + static const FastCaptureState initial; + FastCaptureState preIdle; // copy of state before we went into idle + // FIXME by renaming, could pull up many of these to FastThread + NBAIO_Source *inputSource; + int inputSourceGen; + NBAIO_Sink *pipeSink; + int pipeSinkGen; + short *readBuffer; + ssize_t readBufferState; // number of initialized frames in readBuffer, or -1 to clear + NBAIO_Format format; + unsigned sampleRate; + FastCaptureDumpState dummyDumpState; + uint32_t totalNativeFramesRead; // copied to dumpState->mFramesRead + +}; // class FastCapture + +} // namespace android + +#endif // ANDROID_AUDIO_FAST_CAPTURE_H diff --git a/services/audioflinger/FastCaptureState.cpp b/services/audioflinger/FastCaptureState.cpp new file mode 100644 index 0000000..1d029b7 --- /dev/null +++ b/services/audioflinger/FastCaptureState.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "FastCaptureState.h" + +namespace android { + +FastCaptureState::FastCaptureState() : FastThreadState(), + mInputSource(NULL), mInputSourceGen(0), mPipeSink(NULL), mPipeSinkGen(0), mFrameCount(0) +{ +} + +FastCaptureState::~FastCaptureState() +{ +} + +} // android diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h new file mode 100644 index 0000000..29c865a --- /dev/null +++ b/services/audioflinger/FastCaptureState.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_CAPTURE_STATE_H +#define ANDROID_AUDIO_FAST_CAPTURE_STATE_H + +#include <media/nbaio/NBAIO.h> +#include "FastThreadState.h" +#include <private/media/AudioTrackShared.h> + +namespace android { + +// Represent a single state of the fast capture +struct FastCaptureState : FastThreadState { + FastCaptureState(); + /*virtual*/ ~FastCaptureState(); + + // all pointer fields use raw pointers; objects are owned and ref-counted by RecordThread + NBAIO_Source *mInputSource; // HAL input device, must already be negotiated + // FIXME by renaming, could pull up these fields to FastThreadState + int mInputSourceGen; // increment when mInputSource is assigned + NBAIO_Sink *mPipeSink; // after reading from input source, write to this pipe sink + int mPipeSinkGen; // increment when mPipeSink is assigned + size_t mFrameCount; // number of frames per fast capture buffer + audio_track_cblk_t *mCblk; // control block for the single fast client, or NULL + + // Extends FastThreadState::Command + static const Command + // The following commands also process configuration changes, and can be "or"ed: + READ = 0x8, // read from input source + WRITE = 0x10, // write to pipe sink + READ_WRITE = 0x18; // read from input source and write to pipe sink + +}; // struct FastCaptureState + +} // namespace android + +#endif // ANDROID_AUDIO_FAST_CAPTURE_STATE_H diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp index 6d87838..2678cbf 100644 --- a/services/audioflinger/FastMixer.cpp +++ b/services/audioflinger/FastMixer.cpp @@ -36,620 +36,436 @@ #include <cpustats/ThreadCpuUsage.h> #endif #endif +#include <audio_utils/format.h> #include "AudioMixer.h" #include "FastMixer.h" -#define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling -#define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep -#define MIN_WARMUP_CYCLES 2 // minimum number of loop cycles to wait for warmup -#define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup - #define FCC_2 2 // fixed channel count assumption namespace android { -// Fast mixer thread -bool FastMixer::threadLoop() +/*static*/ const FastMixerState FastMixer::initial; + +FastMixer::FastMixer() : FastThread(), + slopNs(0), + // fastTrackNames + // generations + outputSink(NULL), + outputSinkGen(0), + mixer(NULL), + mSinkBuffer(NULL), + mSinkBufferSize(0), + mSinkChannelCount(FCC_2), + mMixerBuffer(NULL), + mMixerBufferSize(0), + mMixerBufferFormat(AUDIO_FORMAT_PCM_16_BIT), + mMixerBufferState(UNDEFINED), + format(Format_Invalid), + sampleRate(0), + fastTracksGen(0), + totalNativeFramesWritten(0), + // timestamp + nativeFramesWrittenButNotPresented(0) // the = 0 is to silence the compiler { - static const FastMixerState initial; - const FastMixerState *previous = &initial, *current = &initial; - FastMixerState preIdle; // copy of state before we went into idle - struct timespec oldTs = {0, 0}; - bool oldTsValid = false; - long slopNs = 0; // accumulated time we've woken up too early (> 0) or too late (< 0) - long sleepNs = -1; // -1: busy wait, 0: sched_yield, > 0: nanosleep - int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks - int generations[FastMixerState::kMaxFastTracks]; // last observed mFastTracks[i].mGeneration + // FIXME pass initial as parameter to base class constructor, and make it static local + previous = &initial; + current = &initial; + + mDummyDumpState = &dummyDumpState; + // TODO: Add channel mask to NBAIO_Format. + // We assume that the channel mask must be a valid positional channel mask. + mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount); + unsigned i; for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { fastTrackNames[i] = -1; generations[i] = 0; } - NBAIO_Sink *outputSink = NULL; - int outputSinkGen = 0; - AudioMixer* mixer = NULL; - short *mixBuffer = NULL; - enum {UNDEFINED, MIXED, ZEROED} mixBufferState = UNDEFINED; - NBAIO_Format format = Format_Invalid; - unsigned sampleRate = 0; - int fastTracksGen = 0; - long periodNs = 0; // expected period; the time required to render one mix buffer - long underrunNs = 0; // underrun likely when write cycle is greater than this value - long overrunNs = 0; // overrun likely when write cycle is less than this value - long forceNs = 0; // if overrun detected, force the write cycle to take this much time - long warmupNs = 0; // warmup complete when write cycle is greater than to this value - FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState; - bool ignoreNextOverrun = true; // used to ignore initial overrun and first after an underrun #ifdef FAST_MIXER_STATISTICS - struct timespec oldLoad = {0, 0}; // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) - bool oldLoadValid = false; // whether oldLoad is valid - uint32_t bounds = 0; - bool full = false; // whether we have collected at least mSamplingN samples -#ifdef CPU_FREQUENCY_STATISTICS - ThreadCpuUsage tcu; // for reading the current CPU clock frequency in kHz -#endif + oldLoad.tv_sec = 0; + oldLoad.tv_nsec = 0; #endif - unsigned coldGen = 0; // last observed mColdGen - bool isWarm = false; // true means ready to mix, false means wait for warmup before mixing - struct timespec measuredWarmupTs = {0, 0}; // how long did it take for warmup to complete - uint32_t warmupCycles = 0; // counter of number of loop cycles required to warmup - NBAIO_Sink* teeSink = NULL; // if non-NULL, then duplicate write() to this non-blocking sink - NBLog::Writer dummyLogWriter, *logWriter = &dummyLogWriter; - uint32_t totalNativeFramesWritten = 0; // copied to dumpState->mFramesWritten - - // next 2 fields are valid only when timestampStatus == NO_ERROR - AudioTimestamp timestamp; - uint32_t nativeFramesWrittenButNotPresented = 0; // the = 0 is to silence the compiler - status_t timestampStatus = INVALID_OPERATION; - - for (;;) { - - // either nanosleep, sched_yield, or busy wait - if (sleepNs >= 0) { - if (sleepNs > 0) { - ALOG_ASSERT(sleepNs < 1000000000); - const struct timespec req = {0, sleepNs}; - nanosleep(&req, NULL); - } else { - sched_yield(); - } - } - // default to long sleep for next cycle - sleepNs = FAST_DEFAULT_NS; - - // poll for state change - const FastMixerState *next = mSQ.poll(); - if (next == NULL) { - // continue to use the default initial state until a real state is available - ALOG_ASSERT(current == &initial && previous == &initial); - next = current; - } +} - FastMixerState::Command command = next->mCommand; - if (next != current) { +FastMixer::~FastMixer() +{ +} - // As soon as possible of learning of a new dump area, start using it - dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState; - teeSink = next->mTeeSink; - logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter; - if (mixer != NULL) { - mixer->setLog(logWriter); - } +FastMixerStateQueue* FastMixer::sq() +{ + return &mSQ; +} - // We want to always have a valid reference to the previous (non-idle) state. - // However, the state queue only guarantees access to current and previous states. - // So when there is a transition from a non-idle state into an idle state, we make a - // copy of the last known non-idle state so it is still available on return from idle. - // The possible transitions are: - // non-idle -> non-idle update previous from current in-place - // non-idle -> idle update previous from copy of current - // idle -> idle don't update previous - // idle -> non-idle don't update previous - if (!(current->mCommand & FastMixerState::IDLE)) { - if (command & FastMixerState::IDLE) { - preIdle = *current; - current = &preIdle; - oldTsValid = false; -#ifdef FAST_MIXER_STATISTICS - oldLoadValid = false; -#endif - ignoreNextOverrun = true; - } - previous = current; - } - current = next; - } -#if !LOG_NDEBUG - next = NULL; // not referenced again -#endif +const FastThreadState *FastMixer::poll() +{ + return mSQ.poll(); +} - dumpState->mCommand = command; - - switch (command) { - case FastMixerState::INITIAL: - case FastMixerState::HOT_IDLE: - sleepNs = FAST_HOT_IDLE_NS; - continue; - case FastMixerState::COLD_IDLE: - // only perform a cold idle command once - // FIXME consider checking previous state and only perform if previous != COLD_IDLE - if (current->mColdGen != coldGen) { - int32_t *coldFutexAddr = current->mColdFutexAddr; - ALOG_ASSERT(coldFutexAddr != NULL); - int32_t old = android_atomic_dec(coldFutexAddr); - if (old <= 0) { - (void) syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); - } - int policy = sched_getscheduler(0); - if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { - ALOGE("did not receive expected priority boost"); - } - // This may be overly conservative; there could be times that the normal mixer - // requests such a brief cold idle that it doesn't require resetting this flag. - isWarm = false; - measuredWarmupTs.tv_sec = 0; - measuredWarmupTs.tv_nsec = 0; - warmupCycles = 0; - sleepNs = -1; - coldGen = current->mColdGen; -#ifdef FAST_MIXER_STATISTICS - bounds = 0; - full = false; -#endif - oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs); - timestampStatus = INVALID_OPERATION; - } else { - sleepNs = FAST_HOT_IDLE_NS; - } - continue; - case FastMixerState::EXIT: - delete mixer; - delete[] mixBuffer; - return false; - case FastMixerState::MIX: - case FastMixerState::WRITE: - case FastMixerState::MIX_WRITE: - break; - default: - LOG_FATAL("bad command %d", command); +void FastMixer::setLog(NBLog::Writer *logWriter) +{ + if (mixer != NULL) { + mixer->setLog(logWriter); + } +} + +void FastMixer::onIdle() +{ + preIdle = *(const FastMixerState *)current; + current = &preIdle; +} + +void FastMixer::onExit() +{ + delete mixer; + free(mMixerBuffer); + free(mSinkBuffer); +} + +bool FastMixer::isSubClassCommand(FastThreadState::Command command) +{ + switch ((FastMixerState::Command) command) { + case FastMixerState::MIX: + case FastMixerState::WRITE: + case FastMixerState::MIX_WRITE: + return true; + default: + return false; + } +} + +void FastMixer::onStateChange() +{ + const FastMixerState * const current = (const FastMixerState *) this->current; + const FastMixerState * const previous = (const FastMixerState *) this->previous; + FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState; + const size_t frameCount = current->mFrameCount; + + // handle state change here, but since we want to diff the state, + // we're prepared for previous == &initial the first time through + unsigned previousTrackMask; + + // check for change in output HAL configuration + NBAIO_Format previousFormat = format; + if (current->mOutputSinkGen != outputSinkGen) { + outputSink = current->mOutputSink; + outputSinkGen = current->mOutputSinkGen; + if (outputSink == NULL) { + format = Format_Invalid; + sampleRate = 0; + mSinkChannelCount = 0; + mSinkChannelMask = AUDIO_CHANNEL_NONE; + } else { + format = outputSink->format(); + sampleRate = Format_sampleRate(format); + mSinkChannelCount = Format_channelCount(format); + LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS); + + // TODO: Add channel mask to NBAIO_Format + // We assume that the channel mask must be a valid positional channel mask. + mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount); } + dumpState->mSampleRate = sampleRate; + } - // there is a non-idle state available to us; did the state change? - size_t frameCount = current->mFrameCount; - if (current != previous) { - - // handle state change here, but since we want to diff the state, - // we're prepared for previous == &initial the first time through - unsigned previousTrackMask; - - // check for change in output HAL configuration - NBAIO_Format previousFormat = format; - if (current->mOutputSinkGen != outputSinkGen) { - outputSink = current->mOutputSink; - outputSinkGen = current->mOutputSinkGen; - if (outputSink == NULL) { - format = Format_Invalid; - sampleRate = 0; - } else { - format = outputSink->format(); - sampleRate = Format_sampleRate(format); - ALOG_ASSERT(Format_channelCount(format) == FCC_2); - } + if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) { + // FIXME to avoid priority inversion, don't delete here + delete mixer; + mixer = NULL; + free(mMixerBuffer); + mMixerBuffer = NULL; + free(mSinkBuffer); + mSinkBuffer = NULL; + if (frameCount > 0 && sampleRate > 0) { + // FIXME new may block for unbounded time at internal mutex of the heap + // implementation; it would be better to have normal mixer allocate for us + // to avoid blocking here and to prevent possible priority inversion + mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks); + const size_t mixerFrameSize = mSinkChannelCount + * audio_bytes_per_sample(mMixerBufferFormat); + mMixerBufferSize = mixerFrameSize * frameCount; + (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize); + const size_t sinkFrameSize = mSinkChannelCount + * audio_bytes_per_sample(format.mFormat); + if (sinkFrameSize > mixerFrameSize) { // need a sink buffer + mSinkBufferSize = sinkFrameSize * frameCount; + (void)posix_memalign(&mSinkBuffer, 32, mSinkBufferSize); } - - if ((format != previousFormat) || (frameCount != previous->mFrameCount)) { - // FIXME to avoid priority inversion, don't delete here - delete mixer; - mixer = NULL; - delete[] mixBuffer; - mixBuffer = NULL; - if (frameCount > 0 && sampleRate > 0) { - // FIXME new may block for unbounded time at internal mutex of the heap - // implementation; it would be better to have normal mixer allocate for us - // to avoid blocking here and to prevent possible priority inversion - mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks); - mixBuffer = new short[frameCount * FCC_2]; - periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00 - underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75 - overrunNs = (frameCount * 500000000LL) / sampleRate; // 0.50 - forceNs = (frameCount * 950000000LL) / sampleRate; // 0.95 - warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50 - } else { - periodNs = 0; - underrunNs = 0; - overrunNs = 0; - forceNs = 0; - warmupNs = 0; - } - mixBufferState = UNDEFINED; + periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00 + underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75 + overrunNs = (frameCount * 500000000LL) / sampleRate; // 0.50 + forceNs = (frameCount * 950000000LL) / sampleRate; // 0.95 + warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50 + } else { + periodNs = 0; + underrunNs = 0; + overrunNs = 0; + forceNs = 0; + warmupNs = 0; + } + mMixerBufferState = UNDEFINED; #if !LOG_NDEBUG - for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { - fastTrackNames[i] = -1; - } + for (unsigned i = 0; i < FastMixerState::kMaxFastTracks; ++i) { + fastTrackNames[i] = -1; + } #endif - // we need to reconfigure all active tracks - previousTrackMask = 0; - fastTracksGen = current->mFastTracksGen - 1; - dumpState->mFrameCount = frameCount; - } else { - previousTrackMask = previous->mTrackMask; - } + // we need to reconfigure all active tracks + previousTrackMask = 0; + fastTracksGen = current->mFastTracksGen - 1; + dumpState->mFrameCount = frameCount; + } else { + previousTrackMask = previous->mTrackMask; + } - // check for change in active track set - unsigned currentTrackMask = current->mTrackMask; - dumpState->mTrackMask = currentTrackMask; - if (current->mFastTracksGen != fastTracksGen) { - ALOG_ASSERT(mixBuffer != NULL); - int name; - - // process removed tracks first to avoid running out of track names - unsigned removedTracks = previousTrackMask & ~currentTrackMask; - while (removedTracks != 0) { - i = __builtin_ctz(removedTracks); - removedTracks &= ~(1 << i); - const FastTrack* fastTrack = ¤t->mFastTracks[i]; - ALOG_ASSERT(fastTrack->mBufferProvider == NULL); - if (mixer != NULL) { - name = fastTrackNames[i]; - ALOG_ASSERT(name >= 0); - mixer->deleteTrackName(name); - } + // check for change in active track set + const unsigned currentTrackMask = current->mTrackMask; + dumpState->mTrackMask = currentTrackMask; + if (current->mFastTracksGen != fastTracksGen) { + ALOG_ASSERT(mMixerBuffer != NULL); + int name; + + // process removed tracks first to avoid running out of track names + unsigned removedTracks = previousTrackMask & ~currentTrackMask; + while (removedTracks != 0) { + int i = __builtin_ctz(removedTracks); + removedTracks &= ~(1 << i); + const FastTrack* fastTrack = ¤t->mFastTracks[i]; + ALOG_ASSERT(fastTrack->mBufferProvider == NULL); + if (mixer != NULL) { + name = fastTrackNames[i]; + ALOG_ASSERT(name >= 0); + mixer->deleteTrackName(name); + } #if !LOG_NDEBUG - fastTrackNames[i] = -1; + fastTrackNames[i] = -1; #endif - // don't reset track dump state, since other side is ignoring it - generations[i] = fastTrack->mGeneration; - } + // don't reset track dump state, since other side is ignoring it + generations[i] = fastTrack->mGeneration; + } - // now process added tracks - unsigned addedTracks = currentTrackMask & ~previousTrackMask; - while (addedTracks != 0) { - i = __builtin_ctz(addedTracks); - addedTracks &= ~(1 << i); - const FastTrack* fastTrack = ¤t->mFastTracks[i]; - AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; - ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1); - if (mixer != NULL) { - // calling getTrackName with default channel mask and a random invalid - // sessionId (no effects here) - name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO, -555); - ALOG_ASSERT(name >= 0); - fastTrackNames[i] = name; - mixer->setBufferProvider(name, bufferProvider); - mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, - (void *) mixBuffer); - // newly allocated track names default to full scale volume - mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, - (void *)(uintptr_t)fastTrack->mChannelMask); - mixer->enable(name); - } - generations[i] = fastTrack->mGeneration; - } + // now process added tracks + unsigned addedTracks = currentTrackMask & ~previousTrackMask; + while (addedTracks != 0) { + int i = __builtin_ctz(addedTracks); + addedTracks &= ~(1 << i); + const FastTrack* fastTrack = ¤t->mFastTracks[i]; + AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; + ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1); + if (mixer != NULL) { + name = mixer->getTrackName(fastTrack->mChannelMask, + fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX); + ALOG_ASSERT(name >= 0); + fastTrackNames[i] = name; + mixer->setBufferProvider(name, bufferProvider); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (void *)mMixerBuffer); + // newly allocated track names default to full scale volume + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT, + (void *)(uintptr_t)fastTrack->mFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, + (void *)(uintptr_t)fastTrack->mChannelMask); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK, + (void *)(uintptr_t)mSinkChannelMask); + mixer->enable(name); + } + generations[i] = fastTrack->mGeneration; + } - // finally process (potentially) modified tracks; these use the same slot - // but may have a different buffer provider or volume provider - unsigned modifiedTracks = currentTrackMask & previousTrackMask; - while (modifiedTracks != 0) { - i = __builtin_ctz(modifiedTracks); - modifiedTracks &= ~(1 << i); - const FastTrack* fastTrack = ¤t->mFastTracks[i]; - if (fastTrack->mGeneration != generations[i]) { - // this track was actually modified - AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; - ALOG_ASSERT(bufferProvider != NULL); - if (mixer != NULL) { - name = fastTrackNames[i]; - ALOG_ASSERT(name >= 0); - mixer->setBufferProvider(name, bufferProvider); - if (fastTrack->mVolumeProvider == NULL) { - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, - (void *)0x1000); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, - (void *)0x1000); - } - mixer->setParameter(name, AudioMixer::RESAMPLE, - AudioMixer::REMOVE, NULL); - mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, - (void *)(uintptr_t) fastTrack->mChannelMask); - // already enabled - } - generations[i] = fastTrack->mGeneration; + // finally process (potentially) modified tracks; these use the same slot + // but may have a different buffer provider or volume provider + unsigned modifiedTracks = currentTrackMask & previousTrackMask; + while (modifiedTracks != 0) { + int i = __builtin_ctz(modifiedTracks); + modifiedTracks &= ~(1 << i); + const FastTrack* fastTrack = ¤t->mFastTracks[i]; + if (fastTrack->mGeneration != generations[i]) { + // this track was actually modified + AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; + ALOG_ASSERT(bufferProvider != NULL); + if (mixer != NULL) { + name = fastTrackNames[i]; + ALOG_ASSERT(name >= 0); + mixer->setBufferProvider(name, bufferProvider); + if (fastTrack->mVolumeProvider == NULL) { + float f = AudioMixer::UNITY_GAIN_FLOAT; + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); } + mixer->setParameter(name, AudioMixer::RESAMPLE, + AudioMixer::REMOVE, NULL); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT, + (void *)(uintptr_t)fastTrack->mFormat); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, + (void *)(uintptr_t)fastTrack->mChannelMask); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK, + (void *)(uintptr_t)mSinkChannelMask); + // already enabled } - - fastTracksGen = current->mFastTracksGen; - - dumpState->mNumTracks = popcount(currentTrackMask); + generations[i] = fastTrack->mGeneration; } - -#if 1 // FIXME shouldn't need this - // only process state change once - previous = current; -#endif } - // do work using current state here - if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) { - ALOG_ASSERT(mixBuffer != NULL); - // for each track, update volume and check for underrun - unsigned currentTrackMask = current->mTrackMask; - while (currentTrackMask != 0) { - i = __builtin_ctz(currentTrackMask); - currentTrackMask &= ~(1 << i); - const FastTrack* fastTrack = ¤t->mFastTracks[i]; - - // Refresh the per-track timestamp - if (timestampStatus == NO_ERROR) { - uint32_t trackFramesWrittenButNotPresented = - nativeFramesWrittenButNotPresented; - uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased(); - // Can't provide an AudioTimestamp before first frame presented, - // or during the brief 32-bit wraparound window - if (trackFramesWritten >= trackFramesWrittenButNotPresented) { - AudioTimestamp perTrackTimestamp; - perTrackTimestamp.mPosition = - trackFramesWritten - trackFramesWrittenButNotPresented; - perTrackTimestamp.mTime = timestamp.mTime; - fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp); - } - } + fastTracksGen = current->mFastTracksGen; - int name = fastTrackNames[i]; - ALOG_ASSERT(name >= 0); - if (fastTrack->mVolumeProvider != NULL) { - uint32_t vlr = fastTrack->mVolumeProvider->getVolumeLR(); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, - (void *)(uintptr_t)(vlr & 0xFFFF)); - mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, - (void *)(uintptr_t)(vlr >> 16)); - } - // FIXME The current implementation of framesReady() for fast tracks - // takes a tryLock, which can block - // up to 1 ms. If enough active tracks all blocked in sequence, this would result - // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO. - size_t framesReady = fastTrack->mBufferProvider->framesReady(); - if (ATRACE_ENABLED()) { - // I wish we had formatted trace names - char traceName[16]; - strcpy(traceName, "fRdy"); - traceName[4] = i + (i < 10 ? '0' : 'A' - 10); - traceName[5] = '\0'; - ATRACE_INT(traceName, framesReady); - } - FastTrackDump *ftDump = &dumpState->mTracks[i]; - FastTrackUnderruns underruns = ftDump->mUnderruns; - if (framesReady < frameCount) { - if (framesReady == 0) { - underruns.mBitFields.mEmpty++; - underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY; - mixer->disable(name); - } else { - // allow mixing partial buffer - underruns.mBitFields.mPartial++; - underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL; - mixer->enable(name); - } - } else { - underruns.mBitFields.mFull++; - underruns.mBitFields.mMostRecent = UNDERRUN_FULL; - mixer->enable(name); + dumpState->mNumTracks = popcount(currentTrackMask); + } +} + +void FastMixer::onWork() +{ + const FastMixerState * const current = (const FastMixerState *) this->current; + FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState; + const FastMixerState::Command command = this->command; + const size_t frameCount = current->mFrameCount; + + if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) { + ALOG_ASSERT(mMixerBuffer != NULL); + // for each track, update volume and check for underrun + unsigned currentTrackMask = current->mTrackMask; + while (currentTrackMask != 0) { + int i = __builtin_ctz(currentTrackMask); + currentTrackMask &= ~(1 << i); + const FastTrack* fastTrack = ¤t->mFastTracks[i]; + + // Refresh the per-track timestamp + if (timestampStatus == NO_ERROR) { + uint32_t trackFramesWrittenButNotPresented = + nativeFramesWrittenButNotPresented; + uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased(); + // Can't provide an AudioTimestamp before first frame presented, + // or during the brief 32-bit wraparound window + if (trackFramesWritten >= trackFramesWrittenButNotPresented) { + AudioTimestamp perTrackTimestamp; + perTrackTimestamp.mPosition = + trackFramesWritten - trackFramesWrittenButNotPresented; + perTrackTimestamp.mTime = timestamp.mTime; + fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp); } - ftDump->mUnderruns = underruns; - ftDump->mFramesReady = framesReady; } - int64_t pts; - if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) - pts = AudioBufferProvider::kInvalidPTS; + int name = fastTrackNames[i]; + ALOG_ASSERT(name >= 0); + if (fastTrack->mVolumeProvider != NULL) { + gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR(); + float vlf = float_from_gain(gain_minifloat_unpack_left(vlr)); + float vrf = float_from_gain(gain_minifloat_unpack_right(vlr)); - // process() is CPU-bound - mixer->process(pts); - mixBufferState = MIXED; - } else if (mixBufferState == MIXED) { - mixBufferState = UNDEFINED; - } - bool attemptedWrite = false; - //bool didFullWrite = false; // dumpsys could display a count of partial writes - if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) { - if (mixBufferState == UNDEFINED) { - memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short)); - mixBufferState = ZEROED; - } - if (teeSink != NULL) { - (void) teeSink->write(mixBuffer, frameCount); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf); } - // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink, - // but this code should be modified to handle both non-blocking and blocking sinks - dumpState->mWriteSequence++; - ATRACE_BEGIN("write"); - ssize_t framesWritten = outputSink->write(mixBuffer, frameCount); - ATRACE_END(); - dumpState->mWriteSequence++; - if (framesWritten >= 0) { - ALOG_ASSERT((size_t) framesWritten <= frameCount); - totalNativeFramesWritten += framesWritten; - dumpState->mFramesWritten = totalNativeFramesWritten; - //if ((size_t) framesWritten == frameCount) { - // didFullWrite = true; - //} - } else { - dumpState->mWriteErrors++; + // FIXME The current implementation of framesReady() for fast tracks + // takes a tryLock, which can block + // up to 1 ms. If enough active tracks all blocked in sequence, this would result + // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO. + size_t framesReady = fastTrack->mBufferProvider->framesReady(); + if (ATRACE_ENABLED()) { + // I wish we had formatted trace names + char traceName[16]; + strcpy(traceName, "fRdy"); + traceName[4] = i + (i < 10 ? '0' : 'A' - 10); + traceName[5] = '\0'; + ATRACE_INT(traceName, framesReady); } - attemptedWrite = true; - // FIXME count # of writes blocked excessively, CPU usage, etc. for dump - - timestampStatus = outputSink->getTimestamp(timestamp); - if (timestampStatus == NO_ERROR) { - uint32_t totalNativeFramesPresented = timestamp.mPosition; - if (totalNativeFramesPresented <= totalNativeFramesWritten) { - nativeFramesWrittenButNotPresented = - totalNativeFramesWritten - totalNativeFramesPresented; + FastTrackDump *ftDump = &dumpState->mTracks[i]; + FastTrackUnderruns underruns = ftDump->mUnderruns; + if (framesReady < frameCount) { + if (framesReady == 0) { + underruns.mBitFields.mEmpty++; + underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY; + mixer->disable(name); } else { - // HAL reported that more frames were presented than were written - timestampStatus = INVALID_OPERATION; + // allow mixing partial buffer + underruns.mBitFields.mPartial++; + underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL; + mixer->enable(name); } + } else { + underruns.mBitFields.mFull++; + underruns.mBitFields.mMostRecent = UNDERRUN_FULL; + mixer->enable(name); } + ftDump->mUnderruns = underruns; + ftDump->mFramesReady = framesReady; } - // To be exactly periodic, compute the next sleep time based on current time. - // This code doesn't have long-term stability when the sink is non-blocking. - // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. - struct timespec newTs; - int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); - if (rc == 0) { - //logWriter->logTimestamp(newTs); - if (oldTsValid) { - time_t sec = newTs.tv_sec - oldTs.tv_sec; - long nsec = newTs.tv_nsec - oldTs.tv_nsec; - ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), - "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", - oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec); - if (nsec < 0) { - --sec; - nsec += 1000000000; - } - // To avoid an initial underrun on fast tracks after exiting standby, - // do not start pulling data from tracks and mixing until warmup is complete. - // Warmup is considered complete after the earlier of: - // MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs - // MAX_WARMUP_CYCLES write() attempts. - // This is overly conservative, but to get better accuracy requires a new HAL API. - if (!isWarm && attemptedWrite) { - measuredWarmupTs.tv_sec += sec; - measuredWarmupTs.tv_nsec += nsec; - if (measuredWarmupTs.tv_nsec >= 1000000000) { - measuredWarmupTs.tv_sec++; - measuredWarmupTs.tv_nsec -= 1000000000; - } - ++warmupCycles; - if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) || - (warmupCycles >= MAX_WARMUP_CYCLES)) { - isWarm = true; - dumpState->mMeasuredWarmupTs = measuredWarmupTs; - dumpState->mWarmupCycles = warmupCycles; - } - } - sleepNs = -1; - if (isWarm) { - if (sec > 0 || nsec > underrunNs) { - ATRACE_NAME("underrun"); - // FIXME only log occasionally - ALOGV("underrun: time since last cycle %d.%03ld sec", - (int) sec, nsec / 1000000L); - dumpState->mUnderruns++; - ignoreNextOverrun = true; - } else if (nsec < overrunNs) { - if (ignoreNextOverrun) { - ignoreNextOverrun = false; - } else { - // FIXME only log occasionally - ALOGV("overrun: time since last cycle %d.%03ld sec", - (int) sec, nsec / 1000000L); - dumpState->mOverruns++; - } - // This forces a minimum cycle time. It: - // - compensates for an audio HAL with jitter due to sample rate conversion - // - works with a variable buffer depth audio HAL that never pulls at a - // rate < than overrunNs per buffer. - // - recovers from overrun immediately after underrun - // It doesn't work with a non-blocking audio HAL. - sleepNs = forceNs - nsec; - } else { - ignoreNextOverrun = false; - } - } -#ifdef FAST_MIXER_STATISTICS - if (isWarm) { - // advance the FIFO queue bounds - size_t i = bounds & (dumpState->mSamplingN - 1); - bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF); - if (full) { - bounds += 0x10000; - } else if (!(bounds & (dumpState->mSamplingN - 1))) { - full = true; - } - // compute the delta value of clock_gettime(CLOCK_MONOTONIC) - uint32_t monotonicNs = nsec; - if (sec > 0 && sec < 4) { - monotonicNs += sec * 1000000000; - } - // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) - uint32_t loadNs = 0; - struct timespec newLoad; - rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); - if (rc == 0) { - if (oldLoadValid) { - sec = newLoad.tv_sec - oldLoad.tv_sec; - nsec = newLoad.tv_nsec - oldLoad.tv_nsec; - if (nsec < 0) { - --sec; - nsec += 1000000000; - } - loadNs = nsec; - if (sec > 0 && sec < 4) { - loadNs += sec * 1000000000; - } - } else { - // first time through the loop - oldLoadValid = true; - } - oldLoad = newLoad; - } -#ifdef CPU_FREQUENCY_STATISTICS - // get the absolute value of CPU clock frequency in kHz - int cpuNum = sched_getcpu(); - uint32_t kHz = tcu.getCpukHz(cpuNum); - kHz = (kHz << 4) | (cpuNum & 0xF); -#endif - // save values in FIFO queues for dumpsys - // these stores #1, #2, #3 are not atomic with respect to each other, - // or with respect to store #4 below - dumpState->mMonotonicNs[i] = monotonicNs; - dumpState->mLoadNs[i] = loadNs; -#ifdef CPU_FREQUENCY_STATISTICS - dumpState->mCpukHz[i] = kHz; -#endif - // this store #4 is not atomic with respect to stores #1, #2, #3 above, but - // the newest open & oldest closed halves are atomic with respect to each other - dumpState->mBounds = bounds; - ATRACE_INT("cycle_ms", monotonicNs / 1000000); - ATRACE_INT("load_us", loadNs / 1000); - } -#endif + int64_t pts; + if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) { + pts = AudioBufferProvider::kInvalidPTS; + } + + // process() is CPU-bound + mixer->process(pts); + mMixerBufferState = MIXED; + } else if (mMixerBufferState == MIXED) { + mMixerBufferState = UNDEFINED; + } + //bool didFullWrite = false; // dumpsys could display a count of partial writes + if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mMixerBuffer != NULL)) { + if (mMixerBufferState == UNDEFINED) { + memset(mMixerBuffer, 0, mMixerBufferSize); + mMixerBufferState = ZEROED; + } + void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer; + if (format.mFormat != mMixerBufferFormat) { // sink format not the same as mixer format + memcpy_by_audio_format(buffer, format.mFormat, mMixerBuffer, mMixerBufferFormat, + frameCount * Format_channelCount(format)); + } + // if non-NULL, then duplicate write() to this non-blocking sink + NBAIO_Sink* teeSink; + if ((teeSink = current->mTeeSink) != NULL) { + (void) teeSink->write(buffer, frameCount); + } + // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink, + // but this code should be modified to handle both non-blocking and blocking sinks + dumpState->mWriteSequence++; + ATRACE_BEGIN("write"); + ssize_t framesWritten = outputSink->write(buffer, frameCount); + ATRACE_END(); + dumpState->mWriteSequence++; + if (framesWritten >= 0) { + ALOG_ASSERT((size_t) framesWritten <= frameCount); + totalNativeFramesWritten += framesWritten; + dumpState->mFramesWritten = totalNativeFramesWritten; + //if ((size_t) framesWritten == frameCount) { + // didFullWrite = true; + //} + } else { + dumpState->mWriteErrors++; + } + attemptedWrite = true; + // FIXME count # of writes blocked excessively, CPU usage, etc. for dump + + timestampStatus = outputSink->getTimestamp(timestamp); + if (timestampStatus == NO_ERROR) { + uint32_t totalNativeFramesPresented = timestamp.mPosition; + if (totalNativeFramesPresented <= totalNativeFramesWritten) { + nativeFramesWrittenButNotPresented = + totalNativeFramesWritten - totalNativeFramesPresented; } else { - // first time through the loop - oldTsValid = true; - sleepNs = periodNs; - ignoreNextOverrun = true; + // HAL reported that more frames were presented than were written + timestampStatus = INVALID_OPERATION; } - oldTs = newTs; - } else { - // monotonic clock is broken - oldTsValid = false; - sleepNs = periodNs; } - - - } // for (;;) - - // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion + } } FastMixerDumpState::FastMixerDumpState( #ifdef FAST_MIXER_STATISTICS uint32_t samplingN #endif - ) : - mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0), - mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0), - mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0), + ) : FastThreadDumpState(), + mWriteSequence(0), mFramesWritten(0), + mNumTracks(0), mWriteErrors(0), + mSampleRate(0), mFrameCount(0), mTrackMask(0) -#ifdef FAST_MIXER_STATISTICS - , mSamplingN(0), mBounds(0) -#endif { - mMeasuredWarmupTs.tv_sec = 0; - mMeasuredWarmupTs.tv_nsec = 0; #ifdef FAST_MIXER_STATISTICS increaseSamplingN(samplingN); #endif @@ -694,7 +510,7 @@ static int compare_uint32_t(const void *pa, const void *pb) void FastMixerDumpState::dump(int fd) const { if (mCommand == FastMixerState::INITIAL) { - dprintf(fd, "FastMixer not initialized\n"); + dprintf(fd, " FastMixer not initialized\n"); return; } #define COMMAND_MAX 32 @@ -728,10 +544,10 @@ void FastMixerDumpState::dump(int fd) const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) + (mMeasuredWarmupTs.tv_nsec / 1000000.0); double mixPeriodSec = (double) mFrameCount / (double) mSampleRate; - dprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n" - " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" - " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n" - " mixPeriod=%.2f ms\n", + dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n" + " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" + " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n" + " mixPeriod=%.2f ms\n", string, mWriteSequence, mFramesWritten, mNumTracks, mWriteErrors, mUnderruns, mOverruns, mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles, @@ -782,14 +598,20 @@ void FastMixerDumpState::dump(int fd) const previousCpukHz = sampleCpukHz; #endif } - dprintf(fd, "Simple moving statistics over last %.1f seconds:\n", wall.n() * mixPeriodSec); - dprintf(fd, " wall clock time in ms per mix cycle:\n" - " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", - wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, wall.stddev()*1e-6); - dprintf(fd, " raw CPU load in us per mix cycle:\n" - " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", - loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, - loadNs.stddev()*1e-3); + if (n) { + dprintf(fd, " Simple moving statistics over last %.1f seconds:\n", + wall.n() * mixPeriodSec); + dprintf(fd, " wall clock time in ms per mix cycle:\n" + " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", + wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, + wall.stddev()*1e-6); + dprintf(fd, " raw CPU load in us per mix cycle:\n" + " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", + loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, + loadNs.stddev()*1e-3); + } else { + dprintf(fd, " No FastMixer statistics available currently\n"); + } #ifdef CPU_FREQUENCY_STATISTICS dprintf(fd, " CPU clock frequency in MHz:\n" " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", @@ -807,9 +629,9 @@ void FastMixerDumpState::dump(int fd) const left.sample(tail[i]); right.sample(tail[n - (i + 1)]); } - dprintf(fd, "Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n" - " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n" - " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", + dprintf(fd, " Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n" + " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n" + " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6, right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6, right.stddev()*1e-6); @@ -822,9 +644,9 @@ void FastMixerDumpState::dump(int fd) const // Instead we always display all tracks, with an indication // of whether we think the track is active. uint32_t trackMask = mTrackMask; - dprintf(fd, "Fast tracks: kMaxFastTracks=%u activeMask=%#x\n", + dprintf(fd, " Fast tracks: kMaxFastTracks=%u activeMask=%#x\n", FastMixerState::kMaxFastTracks, trackMask); - dprintf(fd, "Index Active Full Partial Empty Recent Ready\n"); + dprintf(fd, " Index Active Full Partial Empty Recent Ready\n"); for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) { bool isActive = trackMask & 1; const FastTrackDump *ftDump = &mTracks[i]; @@ -844,7 +666,7 @@ void FastMixerDumpState::dump(int fd) const mostRecent = "?"; break; } - dprintf(fd, "%5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no", + dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no", (underruns.mBitFields.mFull) & UNDERRUN_MASK, (underruns.mBitFields.mPartial) & UNDERRUN_MASK, (underruns.mBitFields.mEmpty) & UNDERRUN_MASK, diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h index c356d31..fde8c2b 100644 --- a/services/audioflinger/FastMixer.h +++ b/services/audioflinger/FastMixer.h @@ -20,119 +20,70 @@ #include <linux/futex.h> #include <sys/syscall.h> #include <utils/Debug.h> +#include "FastThread.h" #include <utils/Thread.h> #include "StateQueue.h" #include "FastMixerState.h" +#include "FastMixerDumpState.h" namespace android { +class AudioMixer; + typedef StateQueue<FastMixerState> FastMixerStateQueue; -class FastMixer : public Thread { +class FastMixer : public FastThread { public: - FastMixer() : Thread(false /*canCallJava*/) { } - virtual ~FastMixer() { } + FastMixer(); + virtual ~FastMixer(); - FastMixerStateQueue* sq() { return &mSQ; } + FastMixerStateQueue* sq(); private: - virtual bool threadLoop(); FastMixerStateQueue mSQ; -}; // class FastMixer + // callouts + virtual const FastThreadState *poll(); + virtual void setLog(NBLog::Writer *logWriter); + virtual void onIdle(); + virtual void onExit(); + virtual bool isSubClassCommand(FastThreadState::Command command); + virtual void onStateChange(); + virtual void onWork(); + + // FIXME these former local variables need comments and to be renamed to have "m" prefix + static const FastMixerState initial; + FastMixerState preIdle; // copy of state before we went into idle + long slopNs; // accumulated time we've woken up too early (> 0) or too late (< 0) + int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks + int generations[FastMixerState::kMaxFastTracks]; // last observed mFastTracks[i].mGeneration + NBAIO_Sink *outputSink; + int outputSinkGen; + AudioMixer* mixer; + + // mSinkBuffer audio format is stored in format.mFormat. + void* mSinkBuffer; // used for mixer output format translation + // if sink format is different than mixer output. + size_t mSinkBufferSize; + uint32_t mSinkChannelCount; + audio_channel_mask_t mSinkChannelMask; + void* mMixerBuffer; // mixer output buffer. + size_t mMixerBufferSize; + audio_format_t mMixerBufferFormat; // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT). + + enum {UNDEFINED, MIXED, ZEROED} mMixerBufferState; + NBAIO_Format format; + unsigned sampleRate; + int fastTracksGen; + FastMixerDumpState dummyDumpState; + uint32_t totalNativeFramesWritten; // copied to dumpState->mFramesWritten + + // next 2 fields are valid only when timestampStatus == NO_ERROR + AudioTimestamp timestamp; + uint32_t nativeFramesWrittenButNotPresented; -// Describes the underrun status for a single "pull" attempt -enum FastTrackUnderrunStatus { - UNDERRUN_FULL, // framesReady() is full frame count, no underrun - UNDERRUN_PARTIAL, // framesReady() is non-zero but < full frame count, partial underrun - UNDERRUN_EMPTY, // framesReady() is zero, total underrun -}; - -// Underrun counters are not reset to zero for new tracks or if track generation changes. -// This packed representation is used to keep the information atomic. -union FastTrackUnderruns { - FastTrackUnderruns() { mAtomic = 0; - COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); } - FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { } - FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs) - { if (this != &rhs) mAtomic = rhs.mAtomic; return *this; } - struct { -#define UNDERRUN_BITS 10 -#define UNDERRUN_MASK ((1 << UNDERRUN_BITS) - 1) - uint32_t mFull : UNDERRUN_BITS; // framesReady() is full frame count - uint32_t mPartial : UNDERRUN_BITS; // framesReady() is non-zero but < full frame count - uint32_t mEmpty : UNDERRUN_BITS; // framesReady() is zero - FastTrackUnderrunStatus mMostRecent : 2; // status of most recent framesReady() - } mBitFields; -private: - uint32_t mAtomic; -}; - -// Represents the dump state of a fast track -struct FastTrackDump { - FastTrackDump() : mFramesReady(0) { } - /*virtual*/ ~FastTrackDump() { } - FastTrackUnderruns mUnderruns; - size_t mFramesReady; // most recent value only; no long-term statistics kept -}; - -// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys. -// Each individual native word-sized field is accessed atomically. But the -// overall structure is non-atomic, that is there may be an inconsistency between fields. -// No barriers or locks are used for either writing or reading. -// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks). -// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer. -struct FastMixerDumpState { - FastMixerDumpState( -#ifdef FAST_MIXER_STATISTICS - uint32_t samplingN = kSamplingNforLowRamDevice -#endif - ); - /*virtual*/ ~FastMixerDumpState(); - - void dump(int fd) const; // should only be called on a stable copy, not the original - - FastMixerState::Command mCommand; // current command - uint32_t mWriteSequence; // incremented before and after each write() - uint32_t mFramesWritten; // total number of frames written successfully - uint32_t mNumTracks; // total number of active fast tracks - uint32_t mWriteErrors; // total number of write() errors - uint32_t mUnderruns; // total number of underruns - uint32_t mOverruns; // total number of overruns - uint32_t mSampleRate; - size_t mFrameCount; - struct timespec mMeasuredWarmupTs; // measured warmup time - uint32_t mWarmupCycles; // number of loop cycles required to warmup - uint32_t mTrackMask; // mask of active tracks - FastTrackDump mTracks[FastMixerState::kMaxFastTracks]; - -#ifdef FAST_MIXER_STATISTICS - // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency. - // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000. - // The sample arrays are virtually allocated based on this compile-time constant, - // but are only initialized and used based on the runtime parameter mSamplingN. - static const uint32_t kSamplingN = 0x8000; - // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN. - // This value was chosen such that each array uses 1 small page (4 Kbytes). - static const uint32_t kSamplingNforLowRamDevice = 0x400; - // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN. - uint32_t mSamplingN; - // The bounds define the interval of valid samples, and are represented as follows: - // newest open (excluded) endpoint = lower 16 bits of bounds, modulo N - // oldest closed (included) endpoint = upper 16 bits of bounds, modulo N - // Number of valid samples is newest - oldest. - uint32_t mBounds; // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz - // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999. - uint32_t mMonotonicNs[kSamplingN]; // delta monotonic (wall clock) time - uint32_t mLoadNs[kSamplingN]; // delta CPU load in time -#ifdef CPU_FREQUENCY_STATISTICS - uint32_t mCpukHz[kSamplingN]; // absolute CPU clock frequency in kHz, bits 0-3 are CPU# -#endif - // Increase sampling window after construction, must be a power of 2 <= kSamplingN - void increaseSamplingN(uint32_t samplingN); -#endif -}; +}; // class FastMixer } // namespace android diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h new file mode 100644 index 0000000..6a1e464 --- /dev/null +++ b/services/audioflinger/FastMixerDumpState.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H +#define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H + +#include "Configuration.h" + +namespace android { + +// Describes the underrun status for a single "pull" attempt +enum FastTrackUnderrunStatus { + UNDERRUN_FULL, // framesReady() is full frame count, no underrun + UNDERRUN_PARTIAL, // framesReady() is non-zero but < full frame count, partial underrun + UNDERRUN_EMPTY, // framesReady() is zero, total underrun +}; + +// Underrun counters are not reset to zero for new tracks or if track generation changes. +// This packed representation is used to keep the information atomic. +union FastTrackUnderruns { + FastTrackUnderruns() { mAtomic = 0; + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); } + FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { } + FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs) + { if (this != &rhs) mAtomic = rhs.mAtomic; return *this; } + struct { +#define UNDERRUN_BITS 10 +#define UNDERRUN_MASK ((1 << UNDERRUN_BITS) - 1) + uint32_t mFull : UNDERRUN_BITS; // framesReady() is full frame count + uint32_t mPartial : UNDERRUN_BITS; // framesReady() is non-zero but < full frame count + uint32_t mEmpty : UNDERRUN_BITS; // framesReady() is zero + FastTrackUnderrunStatus mMostRecent : 2; // status of most recent framesReady() + } mBitFields; +private: + uint32_t mAtomic; +}; + +// Represents the dump state of a fast track +struct FastTrackDump { + FastTrackDump() : mFramesReady(0) { } + /*virtual*/ ~FastTrackDump() { } + FastTrackUnderruns mUnderruns; + size_t mFramesReady; // most recent value only; no long-term statistics kept +}; + +// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys. +// Each individual native word-sized field is accessed atomically. But the +// overall structure is non-atomic, that is there may be an inconsistency between fields. +// No barriers or locks are used for either writing or reading. +// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks). +// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer. +struct FastMixerDumpState : FastThreadDumpState { + FastMixerDumpState( +#ifdef FAST_MIXER_STATISTICS + uint32_t samplingN = kSamplingNforLowRamDevice +#endif + ); + /*virtual*/ ~FastMixerDumpState(); + + void dump(int fd) const; // should only be called on a stable copy, not the original + + uint32_t mWriteSequence; // incremented before and after each write() + uint32_t mFramesWritten; // total number of frames written successfully + uint32_t mNumTracks; // total number of active fast tracks + uint32_t mWriteErrors; // total number of write() errors + uint32_t mSampleRate; + size_t mFrameCount; + uint32_t mTrackMask; // mask of active tracks + FastTrackDump mTracks[FastMixerState::kMaxFastTracks]; + +#ifdef FAST_MIXER_STATISTICS + // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN. + // This value was chosen such that each array uses 1 small page (4 Kbytes). + static const uint32_t kSamplingNforLowRamDevice = 0x400; + // Increase sampling window after construction, must be a power of 2 <= kSamplingN + void increaseSamplingN(uint32_t samplingN); +#endif +}; + +} // android + +#endif // ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp index 43ff233..3aa8dad 100644 --- a/services/audioflinger/FastMixerState.cpp +++ b/services/audioflinger/FastMixerState.cpp @@ -14,14 +14,13 @@ * limitations under the License. */ -#include "Configuration.h" #include "FastMixerState.h" namespace android { FastTrack::FastTrack() : mBufferProvider(NULL), mVolumeProvider(NULL), - mChannelMask(AUDIO_CHANNEL_OUT_STEREO), mGeneration(0) + mChannelMask(AUDIO_CHANNEL_OUT_STEREO), mFormat(AUDIO_FORMAT_INVALID), mGeneration(0) { } @@ -29,10 +28,10 @@ FastTrack::~FastTrack() { } -FastMixerState::FastMixerState() : +FastMixerState::FastMixerState() : FastThreadState(), + // mFastTracks mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0), - mFrameCount(0), mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), - mDumpState(NULL), mTeeSink(NULL), mNBLogWriter(NULL) + mFrameCount(0), mTeeSink(NULL) { } diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h index 9739fe9..661c9ca 100644 --- a/services/audioflinger/FastMixerState.h +++ b/services/audioflinger/FastMixerState.h @@ -17,10 +17,12 @@ #ifndef ANDROID_AUDIO_FAST_MIXER_STATE_H #define ANDROID_AUDIO_FAST_MIXER_STATE_H +#include <audio_utils/minifloat.h> #include <system/audio.h> #include <media/ExtendedAudioBufferProvider.h> #include <media/nbaio/NBAIO.h> #include <media/nbaio/NBLog.h> +#include "FastThreadState.h" namespace android { @@ -28,9 +30,8 @@ struct FastMixerDumpState; class VolumeProvider { public: - // Return the track volume in U4_12 format: left in lower half, right in upper half. The - // provider implementation is responsible for validating that the return value is in range. - virtual uint32_t getVolumeLR() = 0; + // The provider implementation is responsible for validating that the return value is in range. + virtual gain_minifloat_packed_t getVolumeLR() = 0; protected: VolumeProvider() { } virtual ~VolumeProvider() { } @@ -44,11 +45,12 @@ struct FastTrack { ExtendedAudioBufferProvider* mBufferProvider; // must be NULL if inactive, or non-NULL if active VolumeProvider* mVolumeProvider; // optional; if NULL then full-scale audio_channel_mask_t mChannelMask; // AUDIO_CHANNEL_OUT_MONO or AUDIO_CHANNEL_OUT_STEREO + audio_format_t mFormat; // track format int mGeneration; // increment when any field is assigned }; // Represents a single state of the fast mixer -struct FastMixerState { +struct FastMixerState : FastThreadState { FastMixerState(); /*virtual*/ ~FastMixerState(); @@ -61,23 +63,16 @@ struct FastMixerState { NBAIO_Sink* mOutputSink; // HAL output device, must already be negotiated int mOutputSinkGen; // increment when mOutputSink is assigned size_t mFrameCount; // number of frames per fast mix buffer - enum Command { - INITIAL = 0, // used only for the initial state - HOT_IDLE = 1, // do nothing - COLD_IDLE = 2, // wait for the futex - IDLE = 3, // either HOT_IDLE or COLD_IDLE - EXIT = 4, // exit from thread + + // Extends FastThreadState::Command + static const Command // The following commands also process configuration changes, and can be "or"ed: MIX = 0x8, // mix tracks WRITE = 0x10, // write to output sink - MIX_WRITE = 0x18, // mix tracks and write to output sink - } mCommand; - int32_t* mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex - unsigned mColdGen; // increment when COLD_IDLE is requested so it's only performed once + MIX_WRITE = 0x18; // mix tracks and write to output sink + // This might be a one-time configuration rather than per-state - FastMixerDumpState* mDumpState; // if non-NULL, then update dump state periodically NBAIO_Sink* mTeeSink; // if non-NULL, then duplicate write()s to this non-blocking sink - NBLog::Writer* mNBLogWriter; // non-blocking logger }; // struct FastMixerState } // namespace android diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp new file mode 100644 index 0000000..216dace --- /dev/null +++ b/services/audioflinger/FastThread.cpp @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "FastThread" +//#define LOG_NDEBUG 0 + +#define ATRACE_TAG ATRACE_TAG_AUDIO + +#include "Configuration.h" +#include <linux/futex.h> +#include <sys/syscall.h> +#include <utils/Log.h> +#include <utils/Trace.h> +#include "FastThread.h" + +#define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep +#define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling +#define MIN_WARMUP_CYCLES 2 // minimum number of loop cycles to wait for warmup +#define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup + +namespace android { + +FastThread::FastThread() : Thread(false /*canCallJava*/), + // re-initialized to &initial by subclass constructor + previous(NULL), current(NULL), + /* oldTs({0, 0}), */ + oldTsValid(false), + sleepNs(-1), + periodNs(0), + underrunNs(0), + overrunNs(0), + forceNs(0), + warmupNs(0), + // re-initialized to &dummyDumpState by subclass constructor + mDummyDumpState(NULL), + dumpState(NULL), + ignoreNextOverrun(true), +#ifdef FAST_MIXER_STATISTICS + // oldLoad + oldLoadValid(false), + bounds(0), + full(false), + // tcu +#endif + coldGen(0), + isWarm(false), + /* measuredWarmupTs({0, 0}), */ + warmupCycles(0), + // dummyLogWriter + logWriter(&dummyLogWriter), + timestampStatus(INVALID_OPERATION), + + command(FastThreadState::INITIAL), +#if 0 + frameCount(0), +#endif + attemptedWrite(false) +{ + oldTs.tv_sec = 0; + oldTs.tv_nsec = 0; + measuredWarmupTs.tv_sec = 0; + measuredWarmupTs.tv_nsec = 0; +} + +FastThread::~FastThread() +{ +} + +bool FastThread::threadLoop() +{ + for (;;) { + + // either nanosleep, sched_yield, or busy wait + if (sleepNs >= 0) { + if (sleepNs > 0) { + ALOG_ASSERT(sleepNs < 1000000000); + const struct timespec req = {0, sleepNs}; + nanosleep(&req, NULL); + } else { + sched_yield(); + } + } + // default to long sleep for next cycle + sleepNs = FAST_DEFAULT_NS; + + // poll for state change + const FastThreadState *next = poll(); + if (next == NULL) { + // continue to use the default initial state until a real state is available + // FIXME &initial not available, should save address earlier + //ALOG_ASSERT(current == &initial && previous == &initial); + next = current; + } + + command = next->mCommand; + if (next != current) { + + // As soon as possible of learning of a new dump area, start using it + dumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState; + logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter; + setLog(logWriter); + + // We want to always have a valid reference to the previous (non-idle) state. + // However, the state queue only guarantees access to current and previous states. + // So when there is a transition from a non-idle state into an idle state, we make a + // copy of the last known non-idle state so it is still available on return from idle. + // The possible transitions are: + // non-idle -> non-idle update previous from current in-place + // non-idle -> idle update previous from copy of current + // idle -> idle don't update previous + // idle -> non-idle don't update previous + if (!(current->mCommand & FastThreadState::IDLE)) { + if (command & FastThreadState::IDLE) { + onIdle(); + oldTsValid = false; +#ifdef FAST_MIXER_STATISTICS + oldLoadValid = false; +#endif + ignoreNextOverrun = true; + } + previous = current; + } + current = next; + } +#if !LOG_NDEBUG + next = NULL; // not referenced again +#endif + + dumpState->mCommand = command; + + // << current, previous, command, dumpState >> + + switch (command) { + case FastThreadState::INITIAL: + case FastThreadState::HOT_IDLE: + sleepNs = FAST_HOT_IDLE_NS; + continue; + case FastThreadState::COLD_IDLE: + // only perform a cold idle command once + // FIXME consider checking previous state and only perform if previous != COLD_IDLE + if (current->mColdGen != coldGen) { + int32_t *coldFutexAddr = current->mColdFutexAddr; + ALOG_ASSERT(coldFutexAddr != NULL); + int32_t old = android_atomic_dec(coldFutexAddr); + if (old <= 0) { + syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); + } + int policy = sched_getscheduler(0); + if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { + ALOGE("did not receive expected priority boost"); + } + // This may be overly conservative; there could be times that the normal mixer + // requests such a brief cold idle that it doesn't require resetting this flag. + isWarm = false; + measuredWarmupTs.tv_sec = 0; + measuredWarmupTs.tv_nsec = 0; + warmupCycles = 0; + sleepNs = -1; + coldGen = current->mColdGen; +#ifdef FAST_MIXER_STATISTICS + bounds = 0; + full = false; +#endif + oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs); + timestampStatus = INVALID_OPERATION; + } else { + sleepNs = FAST_HOT_IDLE_NS; + } + continue; + case FastThreadState::EXIT: + onExit(); + return false; + default: + LOG_ALWAYS_FATAL_IF(!isSubClassCommand(command)); + break; + } + + // there is a non-idle state available to us; did the state change? + if (current != previous) { + onStateChange(); +#if 1 // FIXME shouldn't need this + // only process state change once + previous = current; +#endif + } + + // do work using current state here + attemptedWrite = false; + onWork(); + + // To be exactly periodic, compute the next sleep time based on current time. + // This code doesn't have long-term stability when the sink is non-blocking. + // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. + struct timespec newTs; + int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); + if (rc == 0) { + //logWriter->logTimestamp(newTs); + if (oldTsValid) { + time_t sec = newTs.tv_sec - oldTs.tv_sec; + long nsec = newTs.tv_nsec - oldTs.tv_nsec; + ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), + "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", + oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec); + if (nsec < 0) { + --sec; + nsec += 1000000000; + } + // To avoid an initial underrun on fast tracks after exiting standby, + // do not start pulling data from tracks and mixing until warmup is complete. + // Warmup is considered complete after the earlier of: + // MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs + // MAX_WARMUP_CYCLES write() attempts. + // This is overly conservative, but to get better accuracy requires a new HAL API. + if (!isWarm && attemptedWrite) { + measuredWarmupTs.tv_sec += sec; + measuredWarmupTs.tv_nsec += nsec; + if (measuredWarmupTs.tv_nsec >= 1000000000) { + measuredWarmupTs.tv_sec++; + measuredWarmupTs.tv_nsec -= 1000000000; + } + ++warmupCycles; + if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) || + (warmupCycles >= MAX_WARMUP_CYCLES)) { + isWarm = true; + dumpState->mMeasuredWarmupTs = measuredWarmupTs; + dumpState->mWarmupCycles = warmupCycles; + } + } + sleepNs = -1; + if (isWarm) { + if (sec > 0 || nsec > underrunNs) { + ATRACE_NAME("underrun"); + // FIXME only log occasionally + ALOGV("underrun: time since last cycle %d.%03ld sec", + (int) sec, nsec / 1000000L); + dumpState->mUnderruns++; + ignoreNextOverrun = true; + } else if (nsec < overrunNs) { + if (ignoreNextOverrun) { + ignoreNextOverrun = false; + } else { + // FIXME only log occasionally + ALOGV("overrun: time since last cycle %d.%03ld sec", + (int) sec, nsec / 1000000L); + dumpState->mOverruns++; + } + // This forces a minimum cycle time. It: + // - compensates for an audio HAL with jitter due to sample rate conversion + // - works with a variable buffer depth audio HAL that never pulls at a + // rate < than overrunNs per buffer. + // - recovers from overrun immediately after underrun + // It doesn't work with a non-blocking audio HAL. + sleepNs = forceNs - nsec; + } else { + ignoreNextOverrun = false; + } + } +#ifdef FAST_MIXER_STATISTICS + if (isWarm) { + // advance the FIFO queue bounds + size_t i = bounds & (dumpState->mSamplingN - 1); + bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF); + if (full) { + bounds += 0x10000; + } else if (!(bounds & (dumpState->mSamplingN - 1))) { + full = true; + } + // compute the delta value of clock_gettime(CLOCK_MONOTONIC) + uint32_t monotonicNs = nsec; + if (sec > 0 && sec < 4) { + monotonicNs += sec * 1000000000; + } + // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) + uint32_t loadNs = 0; + struct timespec newLoad; + rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); + if (rc == 0) { + if (oldLoadValid) { + sec = newLoad.tv_sec - oldLoad.tv_sec; + nsec = newLoad.tv_nsec - oldLoad.tv_nsec; + if (nsec < 0) { + --sec; + nsec += 1000000000; + } + loadNs = nsec; + if (sec > 0 && sec < 4) { + loadNs += sec * 1000000000; + } + } else { + // first time through the loop + oldLoadValid = true; + } + oldLoad = newLoad; + } +#ifdef CPU_FREQUENCY_STATISTICS + // get the absolute value of CPU clock frequency in kHz + int cpuNum = sched_getcpu(); + uint32_t kHz = tcu.getCpukHz(cpuNum); + kHz = (kHz << 4) | (cpuNum & 0xF); +#endif + // save values in FIFO queues for dumpsys + // these stores #1, #2, #3 are not atomic with respect to each other, + // or with respect to store #4 below + dumpState->mMonotonicNs[i] = monotonicNs; + dumpState->mLoadNs[i] = loadNs; +#ifdef CPU_FREQUENCY_STATISTICS + dumpState->mCpukHz[i] = kHz; +#endif + // this store #4 is not atomic with respect to stores #1, #2, #3 above, but + // the newest open & oldest closed halves are atomic with respect to each other + dumpState->mBounds = bounds; + ATRACE_INT("cycle_ms", monotonicNs / 1000000); + ATRACE_INT("load_us", loadNs / 1000); + } +#endif + } else { + // first time through the loop + oldTsValid = true; + sleepNs = periodNs; + ignoreNextOverrun = true; + } + oldTs = newTs; + } else { + // monotonic clock is broken + oldTsValid = false; + sleepNs = periodNs; + } + + } // for (;;) + + // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion +} + +} // namespace android diff --git a/services/audioflinger/FastThread.h b/services/audioflinger/FastThread.h new file mode 100644 index 0000000..1330334 --- /dev/null +++ b/services/audioflinger/FastThread.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_THREAD_H +#define ANDROID_AUDIO_FAST_THREAD_H + +#include "Configuration.h" +#ifdef CPU_FREQUENCY_STATISTICS +#include <cpustats/ThreadCpuUsage.h> +#endif +#include <utils/Thread.h> +#include "FastThreadState.h" + +namespace android { + +// FastThread is the common abstract base class of FastMixer and FastCapture +class FastThread : public Thread { + +public: + FastThread(); + virtual ~FastThread(); + +private: + // implement Thread::threadLoop() + virtual bool threadLoop(); + +protected: + // callouts to subclass in same lexical order as they were in original FastMixer.cpp + // FIXME need comments + virtual const FastThreadState *poll() = 0; + virtual void setLog(NBLog::Writer *logWriter __unused) { } + virtual void onIdle() = 0; + virtual void onExit() = 0; + virtual bool isSubClassCommand(FastThreadState::Command command) = 0; + virtual void onStateChange() = 0; + virtual void onWork() = 0; + + // FIXME these former local variables need comments and to be renamed to have an "m" prefix + const FastThreadState *previous; + const FastThreadState *current; + struct timespec oldTs; + bool oldTsValid; + long sleepNs; // -1: busy wait, 0: sched_yield, > 0: nanosleep + long periodNs; // expected period; the time required to render one mix buffer + long underrunNs; // underrun likely when write cycle is greater than this value + long overrunNs; // overrun likely when write cycle is less than this value + long forceNs; // if overrun detected, force the write cycle to take this much time + long warmupNs; // warmup complete when write cycle is greater than to this value + FastThreadDumpState *mDummyDumpState; + FastThreadDumpState *dumpState; + bool ignoreNextOverrun; // used to ignore initial overrun and first after an underrun +#ifdef FAST_MIXER_STATISTICS + struct timespec oldLoad; // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) + bool oldLoadValid; // whether oldLoad is valid + uint32_t bounds; + bool full; // whether we have collected at least mSamplingN samples +#ifdef CPU_FREQUENCY_STATISTICS + ThreadCpuUsage tcu; // for reading the current CPU clock frequency in kHz +#endif +#endif + unsigned coldGen; // last observed mColdGen + bool isWarm; // true means ready to mix, false means wait for warmup before mixing + struct timespec measuredWarmupTs; // how long did it take for warmup to complete + uint32_t warmupCycles; // counter of number of loop cycles required to warmup + NBLog::Writer dummyLogWriter; + NBLog::Writer *logWriter; + status_t timestampStatus; + + FastThreadState::Command command; +#if 0 + size_t frameCount; +#endif + bool attemptedWrite; + +}; // class FastThread + +} // android + +#endif // ANDROID_AUDIO_FAST_THREAD_H diff --git a/services/audioflinger/FastThreadState.cpp b/services/audioflinger/FastThreadState.cpp new file mode 100644 index 0000000..6994872 --- /dev/null +++ b/services/audioflinger/FastThreadState.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Configuration.h" +#include "FastThreadState.h" + +namespace android { + +FastThreadState::FastThreadState() : + mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), mDumpState(NULL), mNBLogWriter(NULL) + +{ +} + +FastThreadState::~FastThreadState() +{ +} + + +FastThreadDumpState::FastThreadDumpState() : + mCommand(FastThreadState::INITIAL), mUnderruns(0), mOverruns(0), + /* mMeasuredWarmupTs({0, 0}), */ + mWarmupCycles(0) +#ifdef FAST_MIXER_STATISTICS + , mSamplingN(1), mBounds(0) +#endif +{ + mMeasuredWarmupTs.tv_sec = 0; + mMeasuredWarmupTs.tv_nsec = 0; +} + +FastThreadDumpState::~FastThreadDumpState() +{ +} + +} // namespace android diff --git a/services/audioflinger/FastThreadState.h b/services/audioflinger/FastThreadState.h new file mode 100644 index 0000000..1ab8a0a --- /dev/null +++ b/services/audioflinger/FastThreadState.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_THREAD_STATE_H +#define ANDROID_AUDIO_FAST_THREAD_STATE_H + +#include "Configuration.h" +#include <stdint.h> +#include <media/nbaio/NBLog.h> + +namespace android { + +struct FastThreadDumpState; + +// Represents a single state of a FastThread +struct FastThreadState { + FastThreadState(); + /*virtual*/ ~FastThreadState(); + + typedef uint32_t Command; + static const Command + INITIAL = 0, // used only for the initial state + HOT_IDLE = 1, // do nothing + COLD_IDLE = 2, // wait for the futex + IDLE = 3, // either HOT_IDLE or COLD_IDLE + EXIT = 4; // exit from thread + // additional values defined per subclass + Command mCommand; // current command + int32_t* mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex + unsigned mColdGen; // increment when COLD_IDLE is requested so it's only performed once + + // This might be a one-time configuration rather than per-state + FastThreadDumpState* mDumpState; // if non-NULL, then update dump state periodically + NBLog::Writer* mNBLogWriter; // non-blocking logger + +}; // struct FastThreadState + + +// FIXME extract common part of comment at FastMixerDumpState +struct FastThreadDumpState { + FastThreadDumpState(); + /*virtual*/ ~FastThreadDumpState(); + + FastThreadState::Command mCommand; // current command + uint32_t mUnderruns; // total number of underruns + uint32_t mOverruns; // total number of overruns + struct timespec mMeasuredWarmupTs; // measured warmup time + uint32_t mWarmupCycles; // number of loop cycles required to warmup + +#ifdef FAST_MIXER_STATISTICS + // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency. + // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000. + // The sample arrays are virtually allocated based on this compile-time constant, + // but are only initialized and used based on the runtime parameter mSamplingN. + static const uint32_t kSamplingN = 0x8000; + // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN. + uint32_t mSamplingN; + // The bounds define the interval of valid samples, and are represented as follows: + // newest open (excluded) endpoint = lower 16 bits of bounds, modulo N + // oldest closed (included) endpoint = upper 16 bits of bounds, modulo N + // Number of valid samples is newest - oldest. + uint32_t mBounds; // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz + // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999. + uint32_t mMonotonicNs[kSamplingN]; // delta monotonic (wall clock) time + uint32_t mLoadNs[kSamplingN]; // delta CPU load in time +#ifdef CPU_FREQUENCY_STATISTICS + uint32_t mCpukHz[kSamplingN]; // absolute CPU clock frequency in kHz, bits 0-3 are CPU# +#endif +#endif + +}; // struct FastThreadDumpState + +} // android + +#endif // ANDROID_AUDIO_FAST_THREAD_STATE_H diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp new file mode 100644 index 0000000..7544052 --- /dev/null +++ b/services/audioflinger/PatchPanel.cpp @@ -0,0 +1,695 @@ +/* +** +** Copyright 2014, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#define LOG_TAG "AudioFlinger::PatchPanel" +//#define LOG_NDEBUG 0 + +#include "Configuration.h" +#include <utils/Log.h> +#include <audio_utils/primitives.h> + +#include "AudioFlinger.h" +#include "ServiceUtilities.h" +#include <media/AudioParameter.h> + +// ---------------------------------------------------------------------------- + +// Note: the following macro is used for extremely verbose logging message. In +// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to +// 0; but one side effect of this is to turn all LOGV's as well. Some messages +// are so verbose that we want to suppress them even when we have ALOG_ASSERT +// turned on. Do not uncomment the #def below unless you really know what you +// are doing and want to see all of the extremely verbose messages. +//#define VERY_VERY_VERBOSE_LOGGING +#ifdef VERY_VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +#else +#define ALOGVV(a...) do { } while(0) +#endif + +namespace android { + +/* List connected audio ports and their attributes */ +status_t AudioFlinger::listAudioPorts(unsigned int *num_ports, + struct audio_port *ports) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->listAudioPorts(num_ports, ports); + } + return NO_INIT; +} + +/* Get supported attributes for a given audio port */ +status_t AudioFlinger::getAudioPort(struct audio_port *port) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->getAudioPort(port); + } + return NO_INIT; +} + + +/* Connect a patch between several source and sink ports */ +status_t AudioFlinger::createAudioPatch(const struct audio_patch *patch, + audio_patch_handle_t *handle) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->createAudioPatch(patch, handle); + } + return NO_INIT; +} + +/* Disconnect a patch */ +status_t AudioFlinger::releaseAudioPatch(audio_patch_handle_t handle) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->releaseAudioPatch(handle); + } + return NO_INIT; +} + + +/* List connected audio ports and they attributes */ +status_t AudioFlinger::listAudioPatches(unsigned int *num_patches, + struct audio_patch *patches) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->listAudioPatches(num_patches, patches); + } + return NO_INIT; +} + +/* Set audio port configuration */ +status_t AudioFlinger::setAudioPortConfig(const struct audio_port_config *config) +{ + Mutex::Autolock _l(mLock); + if (mPatchPanel != 0) { + return mPatchPanel->setAudioPortConfig(config); + } + return NO_INIT; +} + + +AudioFlinger::PatchPanel::PatchPanel(const sp<AudioFlinger>& audioFlinger) + : mAudioFlinger(audioFlinger) +{ +} + +AudioFlinger::PatchPanel::~PatchPanel() +{ +} + +/* List connected audio ports and their attributes */ +status_t AudioFlinger::PatchPanel::listAudioPorts(unsigned int *num_ports __unused, + struct audio_port *ports __unused) +{ + ALOGV("listAudioPorts"); + return NO_ERROR; +} + +/* Get supported attributes for a given audio port */ +status_t AudioFlinger::PatchPanel::getAudioPort(struct audio_port *port __unused) +{ + ALOGV("getAudioPort"); + return NO_ERROR; +} + + +/* Connect a patch between several source and sink ports */ +status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch, + audio_patch_handle_t *handle) +{ + ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d", + patch->num_sources, patch->num_sinks, *handle); + status_t status = NO_ERROR; + audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE; + sp<AudioFlinger> audioflinger = mAudioFlinger.promote(); + if (audioflinger == 0) { + return NO_INIT; + } + + if (handle == NULL || patch == NULL) { + return BAD_VALUE; + } + if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX || + patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) { + return BAD_VALUE; + } + // limit number of sources to 1 for now or 2 sources for special cross hw module case. + // only the audio policy manager can request a patch creation with 2 sources. + if (patch->num_sources > 2) { + return INVALID_OPERATION; + } + + if (*handle != AUDIO_PATCH_HANDLE_NONE) { + for (size_t index = 0; *handle != 0 && index < mPatches.size(); index++) { + if (*handle == mPatches[index]->mHandle) { + ALOGV("createAudioPatch() removing patch handle %d", *handle); + halHandle = mPatches[index]->mHalHandle; + mPatches.removeAt(index); + break; + } + } + } + + Patch *newPatch = new Patch(patch); + + switch (patch->sources[0].type) { + case AUDIO_PORT_TYPE_DEVICE: { + audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module; + ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); + if (index < 0) { + ALOGW("createAudioPatch() bad src hw module %d", srcModule); + status = BAD_VALUE; + goto exit; + } + AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); + for (unsigned int i = 0; i < patch->num_sinks; i++) { + // support only one sink if connection to a mix or across HW modules + if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX || + patch->sinks[i].ext.mix.hw_module != srcModule) && + patch->num_sinks > 1) { + status = INVALID_OPERATION; + goto exit; + } + // reject connection to different sink types + if (patch->sinks[i].type != patch->sinks[0].type) { + ALOGW("createAudioPatch() different sink types in same patch not supported"); + status = BAD_VALUE; + goto exit; + } + // limit to connections between devices and input streams for HAL before 3.0 + if (patch->sinks[i].ext.mix.hw_module == srcModule && + (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) && + (patch->sinks[i].type != AUDIO_PORT_TYPE_MIX)) { + ALOGW("createAudioPatch() invalid sink type %d for device source", + patch->sinks[i].type); + status = BAD_VALUE; + goto exit; + } + } + + if (patch->sinks[0].ext.device.hw_module != srcModule) { + // limit to device to device connection if not on same hw module + if (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) { + ALOGW("createAudioPatch() invalid sink type for cross hw module"); + status = INVALID_OPERATION; + goto exit; + } + // special case num sources == 2 -=> reuse an exiting output mix to connect to the + // sink + if (patch->num_sources == 2) { + if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX || + patch->sinks[0].ext.device.hw_module != + patch->sources[1].ext.mix.hw_module) { + ALOGW("createAudioPatch() invalid source combination"); + status = INVALID_OPERATION; + goto exit; + } + + sp<ThreadBase> thread = + audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle); + newPatch->mPlaybackThread = (MixerThread *)thread.get(); + if (thread == 0) { + ALOGW("createAudioPatch() cannot get playback thread"); + status = INVALID_OPERATION; + goto exit; + } + } else { + audio_config_t config = AUDIO_CONFIG_INITIALIZER; + audio_devices_t device = patch->sinks[0].ext.device.type; + String8 address = String8(patch->sinks[0].ext.device.address); + audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; + newPatch->mPlaybackThread = audioflinger->openOutput_l( + patch->sinks[0].ext.device.hw_module, + &output, + &config, + device, + address, + AUDIO_OUTPUT_FLAG_NONE); + ALOGV("audioflinger->openOutput_l() returned %p", + newPatch->mPlaybackThread.get()); + if (newPatch->mPlaybackThread == 0) { + status = NO_MEMORY; + goto exit; + } + } + uint32_t channelCount = newPatch->mPlaybackThread->channelCount(); + audio_devices_t device = patch->sources[0].ext.device.type; + String8 address = String8(patch->sources[0].ext.device.address); + audio_config_t config = AUDIO_CONFIG_INITIALIZER; + audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount); + config.sample_rate = newPatch->mPlaybackThread->sampleRate(); + config.channel_mask = inChannelMask; + config.format = newPatch->mPlaybackThread->format(); + audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; + newPatch->mRecordThread = audioflinger->openInput_l(srcModule, + &input, + &config, + device, + address, + AUDIO_SOURCE_MIC, + AUDIO_INPUT_FLAG_NONE); + ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x", + newPatch->mRecordThread.get(), inChannelMask); + if (newPatch->mRecordThread == 0) { + status = NO_MEMORY; + goto exit; + } + status = createPatchConnections(newPatch, patch); + if (status != NO_ERROR) { + goto exit; + } + } else { + if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) { + sp<ThreadBase> thread = audioflinger->checkRecordThread_l( + patch->sinks[0].ext.mix.handle); + if (thread == 0) { + ALOGW("createAudioPatch() bad capture I/O handle %d", + patch->sinks[0].ext.mix.handle); + status = BAD_VALUE; + goto exit; + } + status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle); + } else { + audio_hw_device_t *hwDevice = audioHwDevice->hwDevice(); + status = hwDevice->create_audio_patch(hwDevice, + patch->num_sources, + patch->sources, + patch->num_sinks, + patch->sinks, + &halHandle); + } + } else { + sp<ThreadBase> thread = audioflinger->checkRecordThread_l( + patch->sinks[0].ext.mix.handle); + if (thread == 0) { + ALOGW("createAudioPatch() bad capture I/O handle %d", + patch->sinks[0].ext.mix.handle); + status = BAD_VALUE; + goto exit; + } + char *address; + if (strcmp(patch->sources[0].ext.device.address, "") != 0) { + address = audio_device_address_to_parameter( + patch->sources[0].ext.device.type, + patch->sources[0].ext.device.address); + } else { + address = (char *)calloc(1, 1); + } + AudioParameter param = AudioParameter(String8(address)); + free(address); + param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), + (int)patch->sources[0].ext.device.type); + param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE), + (int)patch->sinks[0].ext.mix.usecase.source); + ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", + param.toString().string()); + status = thread->setParameters(param.toString()); + } + } + } break; + case AUDIO_PORT_TYPE_MIX: { + audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module; + ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); + if (index < 0) { + ALOGW("createAudioPatch() bad src hw module %d", srcModule); + status = BAD_VALUE; + goto exit; + } + // limit to connections between devices and output streams + for (unsigned int i = 0; i < patch->num_sinks; i++) { + if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) { + ALOGW("createAudioPatch() invalid sink type %d for mix source", + patch->sinks[i].type); + status = BAD_VALUE; + goto exit; + } + // limit to connections between sinks and sources on same HW module + if (patch->sinks[i].ext.device.hw_module != srcModule) { + status = BAD_VALUE; + goto exit; + } + } + AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); + sp<ThreadBase> thread = + audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle); + if (thread == 0) { + ALOGW("createAudioPatch() bad playback I/O handle %d", + patch->sources[0].ext.mix.handle); + status = BAD_VALUE; + goto exit; + } + if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle); + } else { + audio_devices_t type = AUDIO_DEVICE_NONE; + for (unsigned int i = 0; i < patch->num_sinks; i++) { + type |= patch->sinks[i].ext.device.type; + } + char *address; + if (strcmp(patch->sinks[0].ext.device.address, "") != 0) { + //FIXME: we only support address on first sink with HAL version < 3.0 + address = audio_device_address_to_parameter( + patch->sinks[0].ext.device.type, + patch->sinks[0].ext.device.address); + } else { + address = (char *)calloc(1, 1); + } + AudioParameter param = AudioParameter(String8(address)); + free(address); + param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type); + status = thread->setParameters(param.toString()); + } + + } break; + default: + status = BAD_VALUE; + goto exit; + } +exit: + ALOGV("createAudioPatch() status %d", status); + if (status == NO_ERROR) { + *handle = audioflinger->nextUniqueId(); + newPatch->mHandle = *handle; + newPatch->mHalHandle = halHandle; + mPatches.add(newPatch); + ALOGV("createAudioPatch() added new patch handle %d halHandle %d", *handle, halHandle); + } else { + clearPatchConnections(newPatch); + delete newPatch; + } + return status; +} + +status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch, + const struct audio_patch *audioPatch) +{ + // create patch from source device to record thread input + struct audio_patch subPatch; + subPatch.num_sources = 1; + subPatch.sources[0] = audioPatch->sources[0]; + subPatch.num_sinks = 1; + + patch->mRecordThread->getAudioPortConfig(&subPatch.sinks[0]); + subPatch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_MIC; + + status_t status = createAudioPatch(&subPatch, &patch->mRecordPatchHandle); + if (status != NO_ERROR) { + patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE; + return status; + } + + // create patch from playback thread output to sink device + patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]); + subPatch.sinks[0] = audioPatch->sinks[0]; + status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle); + if (status != NO_ERROR) { + patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE; + return status; + } + + // use a pseudo LCM between input and output framecount + size_t playbackFrameCount = patch->mPlaybackThread->frameCount(); + int playbackShift = __builtin_ctz(playbackFrameCount); + size_t recordFramecount = patch->mRecordThread->frameCount(); + int shift = __builtin_ctz(recordFramecount); + if (playbackShift < shift) { + shift = playbackShift; + } + size_t frameCount = (playbackFrameCount * recordFramecount) >> shift; + ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ", + playbackFrameCount, recordFramecount, frameCount); + + // create a special record track to capture from record thread + uint32_t channelCount = patch->mPlaybackThread->channelCount(); + audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount); + audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask(); + uint32_t sampleRate = patch->mPlaybackThread->sampleRate(); + audio_format_t format = patch->mPlaybackThread->format(); + + patch->mPatchRecord = new RecordThread::PatchRecord( + patch->mRecordThread.get(), + sampleRate, + inChannelMask, + format, + frameCount, + NULL, + IAudioFlinger::TRACK_DEFAULT); + if (patch->mPatchRecord == 0) { + return NO_MEMORY; + } + status = patch->mPatchRecord->initCheck(); + if (status != NO_ERROR) { + return status; + } + patch->mRecordThread->addPatchRecord(patch->mPatchRecord); + + // create a special playback track to render to playback thread. + // this track is given the same buffer as the PatchRecord buffer + patch->mPatchTrack = new PlaybackThread::PatchTrack( + patch->mPlaybackThread.get(), + sampleRate, + outChannelMask, + format, + frameCount, + patch->mPatchRecord->buffer(), + IAudioFlinger::TRACK_DEFAULT); + if (patch->mPatchTrack == 0) { + return NO_MEMORY; + } + status = patch->mPatchTrack->initCheck(); + if (status != NO_ERROR) { + return status; + } + patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack); + + // tie playback and record tracks together + patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get()); + patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get()); + + // start capture and playback + patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, 0); + patch->mPatchTrack->start(); + + return status; +} + +void AudioFlinger::PatchPanel::clearPatchConnections(Patch *patch) +{ + sp<AudioFlinger> audioflinger = mAudioFlinger.promote(); + if (audioflinger == 0) { + return; + } + + ALOGV("clearPatchConnections() patch->mRecordPatchHandle %d patch->mPlaybackPatchHandle %d", + patch->mRecordPatchHandle, patch->mPlaybackPatchHandle); + + if (patch->mPatchRecord != 0) { + patch->mPatchRecord->stop(); + } + if (patch->mPatchTrack != 0) { + patch->mPatchTrack->stop(); + } + if (patch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE) { + releaseAudioPatch(patch->mRecordPatchHandle); + patch->mRecordPatchHandle = AUDIO_PATCH_HANDLE_NONE; + } + if (patch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) { + releaseAudioPatch(patch->mPlaybackPatchHandle); + patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE; + } + if (patch->mRecordThread != 0) { + if (patch->mPatchRecord != 0) { + patch->mRecordThread->deletePatchRecord(patch->mPatchRecord); + patch->mPatchRecord.clear(); + } + audioflinger->closeInputInternal_l(patch->mRecordThread); + patch->mRecordThread.clear(); + } + if (patch->mPlaybackThread != 0) { + if (patch->mPatchTrack != 0) { + patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack); + patch->mPatchTrack.clear(); + } + // if num sources == 2 we are reusing an existing playback thread so we do not close it + if (patch->mAudioPatch.num_sources != 2) { + audioflinger->closeOutputInternal_l(patch->mPlaybackThread); + } + patch->mPlaybackThread.clear(); + } +} + +/* Disconnect a patch */ +status_t AudioFlinger::PatchPanel::releaseAudioPatch(audio_patch_handle_t handle) +{ + ALOGV("releaseAudioPatch handle %d", handle); + status_t status = NO_ERROR; + size_t index; + + sp<AudioFlinger> audioflinger = mAudioFlinger.promote(); + if (audioflinger == 0) { + return NO_INIT; + } + + for (index = 0; index < mPatches.size(); index++) { + if (handle == mPatches[index]->mHandle) { + break; + } + } + if (index == mPatches.size()) { + return BAD_VALUE; + } + Patch *removedPatch = mPatches[index]; + mPatches.removeAt(index); + + struct audio_patch *patch = &removedPatch->mAudioPatch; + + switch (patch->sources[0].type) { + case AUDIO_PORT_TYPE_DEVICE: { + audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module; + ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); + if (index < 0) { + ALOGW("releaseAudioPatch() bad src hw module %d", srcModule); + status = BAD_VALUE; + break; + } + + if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE && + patch->sinks[0].ext.device.hw_module != srcModule) { + clearPatchConnections(removedPatch); + break; + } + + AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); + if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) { + sp<ThreadBase> thread = audioflinger->checkRecordThread_l( + patch->sinks[0].ext.mix.handle); + if (thread == 0) { + ALOGW("releaseAudioPatch() bad capture I/O handle %d", + patch->sinks[0].ext.mix.handle); + status = BAD_VALUE; + break; + } + status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle); + } else { + audio_hw_device_t *hwDevice = audioHwDevice->hwDevice(); + status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle); + } + } else { + sp<ThreadBase> thread = audioflinger->checkRecordThread_l( + patch->sinks[0].ext.mix.handle); + if (thread == 0) { + ALOGW("releaseAudioPatch() bad capture I/O handle %d", + patch->sinks[0].ext.mix.handle); + status = BAD_VALUE; + break; + } + AudioParameter param; + param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0); + ALOGV("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s", + param.toString().string()); + status = thread->setParameters(param.toString()); + } + } break; + case AUDIO_PORT_TYPE_MIX: { + audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module; + ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); + if (index < 0) { + ALOGW("releaseAudioPatch() bad src hw module %d", srcModule); + status = BAD_VALUE; + break; + } + sp<ThreadBase> thread = + audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle); + if (thread == 0) { + ALOGW("releaseAudioPatch() bad playback I/O handle %d", + patch->sources[0].ext.mix.handle); + status = BAD_VALUE; + break; + } + AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); + if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle); + } else { + AudioParameter param; + param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0); + status = thread->setParameters(param.toString()); + } + } break; + default: + status = BAD_VALUE; + break; + } + + delete removedPatch; + return status; +} + + +/* List connected audio ports and they attributes */ +status_t AudioFlinger::PatchPanel::listAudioPatches(unsigned int *num_patches __unused, + struct audio_patch *patches __unused) +{ + ALOGV("listAudioPatches"); + return NO_ERROR; +} + +/* Set audio port configuration */ +status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config) +{ + ALOGV("setAudioPortConfig"); + status_t status = NO_ERROR; + + sp<AudioFlinger> audioflinger = mAudioFlinger.promote(); + if (audioflinger == 0) { + return NO_INIT; + } + + audio_module_handle_t module; + if (config->type == AUDIO_PORT_TYPE_DEVICE) { + module = config->ext.device.hw_module; + } else { + module = config->ext.mix.hw_module; + } + + ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(module); + if (index < 0) { + ALOGW("setAudioPortConfig() bad hw module %d", module); + return BAD_VALUE; + } + + AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); + if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + audio_hw_device_t *hwDevice = audioHwDevice->hwDevice(); + return hwDevice->set_audio_port_config(hwDevice, config); + } else { + return INVALID_OPERATION; + } + return NO_ERROR; +} + + +}; // namespace android diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h new file mode 100644 index 0000000..e31179c --- /dev/null +++ b/services/audioflinger/PatchPanel.h @@ -0,0 +1,78 @@ +/* +** +** Copyright 2014, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef INCLUDING_FROM_AUDIOFLINGER_H + #error This header file should only be included from AudioFlinger.h +#endif + +class PatchPanel : public RefBase { +public: + + class Patch; + + PatchPanel(const sp<AudioFlinger>& audioFlinger); + virtual ~PatchPanel(); + + /* List connected audio ports and their attributes */ + status_t listAudioPorts(unsigned int *num_ports, + struct audio_port *ports); + + /* Get supported attributes for a given audio port */ + status_t getAudioPort(struct audio_port *port); + + /* Create a patch between several source and sink ports */ + status_t createAudioPatch(const struct audio_patch *patch, + audio_patch_handle_t *handle); + + /* Release a patch */ + status_t releaseAudioPatch(audio_patch_handle_t handle); + + /* List connected audio devices and they attributes */ + status_t listAudioPatches(unsigned int *num_patches, + struct audio_patch *patches); + + /* Set audio port configuration */ + status_t setAudioPortConfig(const struct audio_port_config *config); + + status_t createPatchConnections(Patch *patch, + const struct audio_patch *audioPatch); + void clearPatchConnections(Patch *patch); + + class Patch { + public: + Patch(const struct audio_patch *patch) : + mAudioPatch(*patch), mHandle(AUDIO_PATCH_HANDLE_NONE), + mHalHandle(AUDIO_PATCH_HANDLE_NONE), mRecordPatchHandle(AUDIO_PATCH_HANDLE_NONE), + mPlaybackPatchHandle(AUDIO_PATCH_HANDLE_NONE) {} + ~Patch() {} + + struct audio_patch mAudioPatch; + audio_patch_handle_t mHandle; + audio_patch_handle_t mHalHandle; + sp<PlaybackThread> mPlaybackThread; + sp<PlaybackThread::PatchTrack> mPatchTrack; + sp<RecordThread> mRecordThread; + sp<RecordThread::PatchRecord> mPatchRecord; + audio_patch_handle_t mRecordPatchHandle; + audio_patch_handle_t mPlaybackPatchHandle; + + }; + +private: + const wp<AudioFlinger> mAudioFlinger; + SortedVector <Patch *> mPatches; +}; diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 43b77f3..ee48276 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -29,14 +29,17 @@ public: audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, + void *buffer, const sp<IMemory>& sharedBuffer, int sessionId, int uid, - IAudioFlinger::track_flags_t flags); + IAudioFlinger::track_flags_t flags, + track_type type); virtual ~Track(); + virtual status_t initCheck() const; static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); + void dump(char* buffer, size_t size, bool active); virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, int triggerSession = 0); @@ -53,6 +56,7 @@ public: return mStreamType; } bool isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; } + bool isDirect() const { return (mFlags & IAudioFlinger::TRACK_DIRECT) != 0; } status_t setParameters(const String8& keyValuePairs); status_t attachAuxEffect(int EffectId); void setAuxBuffer(int EffectId, int32_t *buffer); @@ -64,7 +68,7 @@ public: void signal(); // implement FastMixerState::VolumeProvider interface - virtual uint32_t getVolumeLR(); + virtual gain_minifloat_packed_t getVolumeLR(); virtual status_t setSyncEvent(const sp<SyncEvent>& event); @@ -93,10 +97,10 @@ protected: bool isReady() const; void setPaused() { mState = PAUSED; } void reset(); - - bool isOutputTrack() const { - return (mStreamType == AUDIO_STREAM_CNT); - } + bool isFlushPending() const { return mFlushHwPending; } + void flushAck(); + bool isResumePending(); + void resumeAck(); sp<IMemory> sharedBuffer() const { return mSharedBuffer; } @@ -109,8 +113,6 @@ public: void triggerEvents(AudioSystem::sync_event_t type); void invalidate(); bool isInvalid() const { return mIsInvalid; } - virtual bool isTimedTrack() const { return false; } - bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; } int fastIndex() const { return mFastIndex; } protected: @@ -137,8 +139,6 @@ protected: // audio HAL when this track will be fully rendered // zero means not monitoring private: - IAudioFlinger::track_flags_t mFlags; - // The following fields are only for fast tracks, and should be in a subclass int mFastIndex; // index within FastMixerState::mFastTracks[]; // either mFastIndex == -1 if not isFastTrack() @@ -154,6 +154,12 @@ private: bool mIsInvalid; // non-resettable latch, set by invalidate() AudioTrackServerProxy* mAudioTrackServerProxy; bool mResumeToStopping; // track was paused in stopping state. + bool mFlushHwPending; // track requests for thread flush + + // for last call to getTimestamp + bool mPreviousValid; + uint32_t mPreviousFramesWritten; + AudioTimestamp mPreviousTimestamp; }; // end of Track class TimedTrack : public Track { @@ -185,7 +191,6 @@ class TimedTrack : public Track { }; // Mixer facing methods. - virtual bool isTimedTrack() const { return true; } virtual size_t framesReady() const; // AudioBufferProvider interface @@ -286,3 +291,34 @@ private: DuplicatingThread* const mSourceThread; // for waitTimeMs() in write() AudioTrackClientProxy* mClientProxy; }; // end of OutputTrack + +// playback track, used by PatchPanel +class PatchTrack : public Track, public PatchProxyBufferProvider { +public: + + PatchTrack(PlaybackThread *playbackThread, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_format_t format, + size_t frameCount, + void *buffer, + IAudioFlinger::track_flags_t flags); + virtual ~PatchTrack(); + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + + // PatchProxyBufferProvider interface + virtual status_t obtainBuffer(Proxy::Buffer* buffer, + const struct timespec *timeOut = NULL); + virtual void releaseBuffer(Proxy::Buffer* buffer); + + void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; } + +private: + sp<ClientProxy> mProxy; + PatchProxyBufferProvider* mPeerProxy; + struct timespec mPeerTimeout; +}; // end of PatchTrack diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h index 57de568..204a9d6 100644 --- a/services/audioflinger/RecordTracks.h +++ b/services/audioflinger/RecordTracks.h @@ -28,8 +28,11 @@ public: audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, + void *buffer, int sessionId, - int uid); + int uid, + IAudioFlinger::track_flags_t flags, + track_type type); virtual ~RecordTrack(); virtual status_t start(AudioSystem::sync_event_t event, int triggerSession); @@ -45,7 +48,10 @@ public: return tmp; } static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); + void dump(char* buffer, size_t size, bool active); + + void handleSyncStartEvent(const sp<SyncEvent>& event); + void clearSyncStartEvent(); private: friend class AudioFlinger; // for mState @@ -59,5 +65,64 @@ private: // releaseBuffer() not overridden bool mOverflow; // overflow on most recent attempt to fill client buffer - AudioRecordServerProxy* mAudioRecordServerProxy; + + // updated by RecordThread::readInputParameters_l() + AudioResampler *mResampler; + + // interleaved stereo pairs of fixed-point Q4.27 + int32_t *mRsmpOutBuffer; + // current allocated frame count for the above, which may be larger than needed + size_t mRsmpOutFrameCount; + + size_t mRsmpInUnrel; // unreleased frames remaining from + // most recent getNextBuffer + // for debug only + + // rolling counter that is never cleared + int32_t mRsmpInFront; // next available frame + + AudioBufferProvider::Buffer mSink; // references client's buffer sink in shared memory + + // sync event triggering actual audio capture. Frames read before this event will + // be dropped and therefore not read by the application. + sp<SyncEvent> mSyncStartEvent; + + // number of captured frames to drop after the start sync event has been received. + // when < 0, maximum frames to drop before starting capture even if sync event is + // not received + ssize_t mFramesToDrop; + + // used by resampler to find source frames + ResamplerBufferProvider *mResamplerBufferProvider; }; + +// playback track, used by PatchPanel +class PatchRecord : virtual public RecordTrack, public PatchProxyBufferProvider { +public: + + PatchRecord(RecordThread *recordThread, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_format_t format, + size_t frameCount, + void *buffer, + IAudioFlinger::track_flags_t flags); + virtual ~PatchRecord(); + + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, + int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + + // PatchProxyBufferProvider interface + virtual status_t obtainBuffer(Proxy::Buffer *buffer, + const struct timespec *timeOut = NULL); + virtual void releaseBuffer(Proxy::Buffer *buffer); + + void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; } + +private: + sp<ClientProxy> mProxy; + PatchProxyBufferProvider* mPeerProxy; + struct timespec mPeerTimeout; +}; // end of PatchRecord diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp index 152455d..8246fef 100644 --- a/services/audioflinger/ServiceUtilities.cpp +++ b/services/audioflinger/ServiceUtilities.cpp @@ -59,6 +59,13 @@ bool settingsAllowed() { return ok; } +bool modifyAudioRoutingAllowed() { + static const String16 sModifyAudioRoutingAllowed("android.permission.MODIFY_AUDIO_ROUTING"); + bool ok = checkCallingPermission(sModifyAudioRoutingAllowed); + if (!ok) ALOGE("android.permission.MODIFY_AUDIO_ROUTING"); + return ok; +} + bool dumpAllowed() { // don't optimize for same pid, since mediaserver never dumps itself static const String16 sDump("android.permission.DUMP"); diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h index 531bc56..df6f6f4 100644 --- a/services/audioflinger/ServiceUtilities.h +++ b/services/audioflinger/ServiceUtilities.h @@ -24,6 +24,7 @@ bool recordingAllowed(); bool captureAudioOutputAllowed(); bool captureHotwordAllowed(); bool settingsAllowed(); +bool modifyAudioRoutingAllowed(); bool dumpAllowed(); } diff --git a/services/audioflinger/StateQueue.h b/services/audioflinger/StateQueue.h index ef01df7..27f6a28 100644 --- a/services/audioflinger/StateQueue.h +++ b/services/audioflinger/StateQueue.h @@ -91,6 +91,8 @@ // arithmetic on the state pointers. However to the mutator, the state pointers // are in a definite circular order. +#include "Configuration.h" + namespace android { #ifdef STATE_QUEUE_DUMP diff --git a/services/audioflinger/StateQueueInstantiations.cpp b/services/audioflinger/StateQueueInstantiations.cpp index 0d5cd0c..6f4505e 100644 --- a/services/audioflinger/StateQueueInstantiations.cpp +++ b/services/audioflinger/StateQueueInstantiations.cpp @@ -16,12 +16,14 @@ #include "Configuration.h" #include "FastMixerState.h" +#include "FastCaptureState.h" #include "StateQueue.h" // FIXME hack for gcc namespace android { -template class StateQueue<FastMixerState>; // typedef FastMixerStateQueue +template class StateQueue<FastMixerState>; // typedef FastMixerStateQueue +template class StateQueue<FastCaptureState>; // typedef FastCaptureStateQueue } diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index 23a2174..44e34b7 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -26,6 +26,7 @@ #include <sys/stat.h> #include <cutils/properties.h> #include <media/AudioParameter.h> +#include <media/AudioResamplerPublic.h> #include <utils/Log.h> #include <utils/Trace.h> @@ -34,8 +35,11 @@ #include <audio_effects/effect_ns.h> #include <audio_effects/effect_aec.h> #include <audio_utils/primitives.h> +#include <audio_utils/format.h> +#include <audio_utils/minifloat.h> // NBAIO implementations +#include <media/nbaio/AudioStreamInSource.h> #include <media/nbaio/AudioStreamOutSink.h> #include <media/nbaio/MonoPipe.h> #include <media/nbaio/MonoPipeReader.h> @@ -51,6 +55,7 @@ #include "AudioFlinger.h" #include "AudioMixer.h" #include "FastMixer.h" +#include "FastCapture.h" #include "ServiceUtilities.h" #include "SchedulingPolicyService.h" @@ -79,6 +84,8 @@ #define ALOGVV(a...) do { } while(0) #endif +#define max(a, b) ((a) > (b) ? (a) : (b)) + namespace android { // retry counts for buffer fill timeout @@ -96,18 +103,18 @@ static const nsecs_t kWarningThrottleNs = seconds(5); // RecordThread loop sleep time upon application overrun or audio HAL read error static const int kRecordThreadSleepUs = 5000; -// maximum time to wait for setParameters to complete -static const nsecs_t kSetParametersTimeoutNs = seconds(2); +// maximum time to wait in sendConfigEvent_l() for a status to be received +static const nsecs_t kConfigEventTimeoutNs = seconds(2); // minimum sleep time for the mixer thread loop when tracks are active but in underrun static const uint32_t kMinThreadSleepTimeUs = 5000; // maximum divider applied to the active sleep time in the mixer thread loop static const uint32_t kMaxThreadSleepTimeShift = 2; -// minimum normal mix buffer size, expressed in milliseconds rather than frames -static const uint32_t kMinNormalMixBufferSizeMs = 20; -// maximum normal mix buffer size -static const uint32_t kMaxNormalMixBufferSizeMs = 24; +// minimum normal sink buffer size, expressed in milliseconds rather than frames +static const uint32_t kMinNormalSinkBufferSizeMs = 20; +// maximum normal sink buffer size +static const uint32_t kMaxNormalSinkBufferSizeMs = 24; // Offloaded output thread standby delay: allows track transition without going to standby static const nsecs_t kOffloadStandbyDelayNs = seconds(1); @@ -129,9 +136,17 @@ static const enum { // up large writes into smaller ones, and the wrapper would need to deal with scheduler. } kUseFastMixer = FastMixer_Static; +// Whether to use fast capture +static const enum { + FastCapture_Never, // never initialize or use: for debugging only + FastCapture_Always, // always initialize and use, even if not needed: for debugging only + FastCapture_Static, // initialize if needed, then use all the time if initialized +} kUseFastCapture = FastCapture_Static; + // Priorities for requestPriority static const int kPriorityAudioApp = 2; static const int kPriorityFastMixer = 3; +static const int kPriorityFastCapture = 3; // IAudioFlinger::createTrack() reports back to client the total size of shared memory area // for the track. The client then sub-divides this into smaller buffers for its use. @@ -140,8 +155,39 @@ static const int kPriorityFastMixer = 3; // FIXME It would be better for client to tell AudioFlinger the value of N, // so AudioFlinger could allocate the right amount of memory. // See the client's minBufCount and mNotificationFramesAct calculations for details. + +// This is the default value, if not specified by property. static const int kFastTrackMultiplier = 2; +// The minimum and maximum allowed values +static const int kFastTrackMultiplierMin = 1; +static const int kFastTrackMultiplierMax = 2; + +// The actual value to use, which can be specified per-device via property af.fast_track_multiplier. +static int sFastTrackMultiplier = kFastTrackMultiplier; + +// See Thread::readOnlyHeap(). +// Initially this heap is used to allocate client buffers for "fast" AudioRecord. +// Eventually it will be the single buffer that FastCapture writes into via HAL read(), +// and that all "fast" AudioRecord clients read from. In either case, the size can be small. +static const size_t kRecordThreadReadOnlyHeapSize = 0x2000; + +// ---------------------------------------------------------------------------- + +static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT; + +static void sFastTrackMultiplierInit() +{ + char value[PROPERTY_VALUE_MAX]; + if (property_get("af.fast_track_multiplier", value, NULL) > 0) { + char *endptr; + unsigned long ul = strtoul(value, &endptr, 0); + if (*endptr == '\0' && kFastTrackMultiplierMin <= ul && ul <= kFastTrackMultiplierMax) { + sFastTrackMultiplier = (int) ul; + } + } +} + // ---------------------------------------------------------------------------- #ifdef ADD_BATTERY_DATA @@ -185,7 +231,11 @@ CpuStats::CpuStats() { } -void CpuStats::sample(const String8 &title) { +void CpuStats::sample(const String8 &title +#ifndef DEBUG_CPU_USAGE + __unused +#endif + ) { #ifdef DEBUG_CPU_USAGE // get current thread's delta CPU time in wall clock ns double wcNs; @@ -269,9 +319,9 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio : Thread(false /*canCallJava*/), mType(type), mAudioFlinger(audioFlinger), - // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are - // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters() - mParamStatus(NO_ERROR), + // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize + // are set by PlaybackThread::readOutputParameters_l() or + // RecordThread::readInputParameters_l() //FIXME: mStandby should be true here. Is this some kind of hack? mStandby(false), mOutDevice(outDevice), mInDevice(inDevice), mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id), @@ -283,12 +333,8 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio AudioFlinger::ThreadBase::~ThreadBase() { // mConfigEvents should be empty, but just in case it isn't, free the memory it owns - for (size_t i = 0; i < mConfigEvents.size(); i++) { - delete mConfigEvents[i]; - } mConfigEvents.clear(); - mParamCond.broadcast(); // do not lock the mutex in destructor releaseWakeLock_l(); if (mPowerManager != 0) { @@ -297,6 +343,17 @@ AudioFlinger::ThreadBase::~ThreadBase() } } +status_t AudioFlinger::ThreadBase::readyToRun() +{ + status_t status = initCheck(); + if (status == NO_ERROR) { + ALOGI("AudioFlinger's thread %p ready to run", this); + } else { + ALOGE("No working audio driver found."); + } + return status; +} + void AudioFlinger::ThreadBase::exit() { ALOGV("ThreadBase::exit"); @@ -328,16 +385,30 @@ status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs) ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string()); Mutex::Autolock _l(mLock); - mNewParameters.add(keyValuePairs); + return sendSetParameterConfigEvent_l(keyValuePairs); +} + +// sendConfigEvent_l() must be called with ThreadBase::mLock held +// Can temporarily release the lock if waiting for a reply from processConfigEvents_l(). +status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event) +{ + status_t status = NO_ERROR; + + mConfigEvents.add(event); + ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType); mWaitWorkCV.signal(); - // wait condition with timeout in case the thread loop has exited - // before the request could be processed - if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) { - status = mParamStatus; - mWaitWorkCV.signal(); - } else { - status = TIMED_OUT; + mLock.unlock(); + { + Mutex::Autolock _l(event->mLock); + while (event->mWaitStatus) { + if (event->mCond.waitRelative(event->mLock, kConfigEventTimeoutNs) != NO_ERROR) { + event->mStatus = TIMED_OUT; + event->mWaitStatus = false; + } + } + status = event->mStatus; } + mLock.lock(); return status; } @@ -350,62 +421,155 @@ void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param) // sendIoConfigEvent_l() must be called with ThreadBase::mLock held void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param) { - IoConfigEvent *ioEvent = new IoConfigEvent(event, param); - mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent)); - ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, - param); - mWaitWorkCV.signal(); + sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, param); + sendConfigEvent_l(configEvent); } // sendPrioConfigEvent_l() must be called with ThreadBase::mLock held void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio) { - PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio); - mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent)); - ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d", - mConfigEvents.size(), pid, tid, prio); - mWaitWorkCV.signal(); + sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio); + sendConfigEvent_l(configEvent); } -void AudioFlinger::ThreadBase::processConfigEvents() +// sendSetParameterConfigEvent_l() must be called with ThreadBase::mLock held +status_t AudioFlinger::ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair) { - mLock.lock(); - while (!mConfigEvents.isEmpty()) { - ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size()); - ConfigEvent *event = mConfigEvents[0]; - mConfigEvents.removeAt(0); - // release mLock before locking AudioFlinger mLock: lock order is always - // AudioFlinger then ThreadBase to avoid cross deadlock - mLock.unlock(); - switch(event->type()) { - case CFG_EVENT_PRIO: { - PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); - // FIXME Need to understand why this has be done asynchronously - int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), - true /*asynchronous*/); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; " - "error %d", - prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); - } - } break; - case CFG_EVENT_IO: { - IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); - mAudioFlinger->mLock.lock(); - audioConfigChanged_l(ioEvent->event(), ioEvent->param()); - mAudioFlinger->mLock.unlock(); - } break; - default: - ALOGE("processConfigEvents() unknown event type %d", event->type()); - break; - } - delete event; - mLock.lock(); + sp<ConfigEvent> configEvent = (ConfigEvent *)new SetParameterConfigEvent(keyValuePair); + return sendConfigEvent_l(configEvent); +} + +status_t AudioFlinger::ThreadBase::sendCreateAudioPatchConfigEvent( + const struct audio_patch *patch, + audio_patch_handle_t *handle) +{ + Mutex::Autolock _l(mLock); + sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle); + status_t status = sendConfigEvent_l(configEvent); + if (status == NO_ERROR) { + CreateAudioPatchConfigEventData *data = + (CreateAudioPatchConfigEventData *)configEvent->mData.get(); + *handle = data->mHandle; } - mLock.unlock(); + return status; } -void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) +status_t AudioFlinger::ThreadBase::sendReleaseAudioPatchConfigEvent( + const audio_patch_handle_t handle) +{ + Mutex::Autolock _l(mLock); + sp<ConfigEvent> configEvent = (ConfigEvent *)new ReleaseAudioPatchConfigEvent(handle); + return sendConfigEvent_l(configEvent); +} + + +// post condition: mConfigEvents.isEmpty() +void AudioFlinger::ThreadBase::processConfigEvents_l() +{ + bool configChanged = false; + + while (!mConfigEvents.isEmpty()) { + ALOGV("processConfigEvents_l() remaining events %d", mConfigEvents.size()); + sp<ConfigEvent> event = mConfigEvents[0]; + mConfigEvents.removeAt(0); + switch (event->mType) { + case CFG_EVENT_PRIO: { + PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get(); + // FIXME Need to understand why this has to be done asynchronously + int err = requestPriority(data->mPid, data->mTid, data->mPrio, + true /*asynchronous*/); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + data->mPrio, data->mPid, data->mTid, err); + } + } break; + case CFG_EVENT_IO: { + IoConfigEventData *data = (IoConfigEventData *)event->mData.get(); + audioConfigChanged(data->mEvent, data->mParam); + } break; + case CFG_EVENT_SET_PARAMETER: { + SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get(); + if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) { + configChanged = true; + } + } break; + case CFG_EVENT_CREATE_AUDIO_PATCH: { + CreateAudioPatchConfigEventData *data = + (CreateAudioPatchConfigEventData *)event->mData.get(); + event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle); + } break; + case CFG_EVENT_RELEASE_AUDIO_PATCH: { + ReleaseAudioPatchConfigEventData *data = + (ReleaseAudioPatchConfigEventData *)event->mData.get(); + event->mStatus = releaseAudioPatch_l(data->mHandle); + } break; + default: + ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType); + break; + } + { + Mutex::Autolock _l(event->mLock); + if (event->mWaitStatus) { + event->mWaitStatus = false; + event->mCond.signal(); + } + } + ALOGV_IF(mConfigEvents.isEmpty(), "processConfigEvents_l() DONE thread %p", this); + } + + if (configChanged) { + cacheParameters_l(); + } +} + +String8 channelMaskToString(audio_channel_mask_t mask, bool output) { + String8 s; + if (output) { + if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, "); + if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, "); + if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, "); + if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,"); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " ); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " ); + if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, "); + } else { + if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, "); + if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, "); + if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, "); + if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, "); + if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, "); + if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, "); + if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, "); + if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, "); + if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, "); + if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, "); + if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, "); + if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, "); + if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, "); + if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, "); + if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, "); + } + int len = s.length(); + if (s.length() > 2) { + char *str = s.lockBuffer(len); + s.unlockBuffer(len - 2); + } + return s; +} + +void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -413,48 +577,32 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) bool locked = AudioFlinger::dumpTryLock(mLock); if (!locked) { - snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this); - write(fd, buffer, strlen(buffer)); - } - - snprintf(buffer, SIZE, "io handle: %d\n", mId); - result.append(buffer); - snprintf(buffer, SIZE, "TID: %d\n", getTid()); - result.append(buffer); - snprintf(buffer, SIZE, "standby: %d\n", mStandby); - result.append(buffer); - snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate); - result.append(buffer); - snprintf(buffer, SIZE, "HAL frame count: %zu\n", mFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask); - result.append(buffer); - snprintf(buffer, SIZE, "Format: %d\n", mFormat); - result.append(buffer); - snprintf(buffer, SIZE, "Frame size: %zu\n", mFrameSize); - result.append(buffer); - - snprintf(buffer, SIZE, "\nPending setParameters commands: \n"); - result.append(buffer); - result.append(" Index Command"); - for (size_t i = 0; i < mNewParameters.size(); ++i) { - snprintf(buffer, SIZE, "\n %02zu ", i); - result.append(buffer); - result.append(mNewParameters[i]); + dprintf(fd, "thread %p maybe dead locked\n", this); + } + + dprintf(fd, " I/O handle: %d\n", mId); + dprintf(fd, " TID: %d\n", getTid()); + dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no"); + dprintf(fd, " Sample rate: %u\n", mSampleRate); + dprintf(fd, " HAL frame count: %zu\n", mFrameCount); + dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize); + dprintf(fd, " Channel Count: %u\n", mChannelCount); + dprintf(fd, " Channel Mask: 0x%08x (%s)\n", mChannelMask, + channelMaskToString(mChannelMask, mType != RECORD).string()); + dprintf(fd, " Format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat)); + dprintf(fd, " Frame size: %zu\n", mFrameSize); + dprintf(fd, " Pending config events:"); + size_t numConfig = mConfigEvents.size(); + if (numConfig) { + for (size_t i = 0; i < numConfig; i++) { + mConfigEvents[i]->dump(buffer, SIZE); + dprintf(fd, "\n %s", buffer); + } + dprintf(fd, "\n"); + } else { + dprintf(fd, " none\n"); } - snprintf(buffer, SIZE, "\n\nPending config events: \n"); - result.append(buffer); - for (size_t i = 0; i < mConfigEvents.size(); i++) { - mConfigEvents[i]->dump(buffer, SIZE); - result.append(buffer); - } - result.append("\n"); - - write(fd, result.string(), result.size()); - if (locked) { mLock.unlock(); } @@ -466,10 +614,11 @@ void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "\n- %zu Effect Chains:\n", mEffectChains.size()); + size_t numEffectChains = mEffectChains.size(); + snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains); write(fd, buffer, strlen(buffer)); - for (size_t i = 0; i < mEffectChains.size(); ++i) { + for (size_t i = 0; i < numEffectChains; ++i) { sp<EffectChain> chain = mEffectChains[i]; if (chain != 0) { chain->dump(fd, args); @@ -513,12 +662,14 @@ void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid) binder, getWakeLockTag(), String16("media"), - uid); + uid, + true /* FIXME force oneway contrary to .aidl */); } else { status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK, binder, getWakeLockTag(), - String16("media")); + String16("media"), + true /* FIXME force oneway contrary to .aidl */); } if (status == NO_ERROR) { mWakeLockToken = binder; @@ -538,7 +689,8 @@ void AudioFlinger::ThreadBase::releaseWakeLock_l() if (mWakeLockToken != 0) { ALOGV("releaseWakeLock_l() %s", mName); if (mPowerManager != 0) { - mPowerManager->releaseWakeLock(mWakeLockToken, 0); + mPowerManager->releaseWakeLock(mWakeLockToken, 0, + true /* FIXME force oneway contrary to .aidl */); } mWakeLockToken.clear(); } @@ -574,7 +726,8 @@ void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uid if (mPowerManager != 0) { sp<IBinder> binder = new BBinder(); status_t status; - status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array()); + status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(), + true /* FIXME force oneway contrary to .aidl */); ALOGV("acquireWakeLock_l() %s status %d", mName, status); } } @@ -586,7 +739,7 @@ void AudioFlinger::ThreadBase::clearPowerManager() mPowerManager.clear(); } -void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who) +void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) { sp<ThreadBase> thread = mThread.promote(); if (thread != 0) { @@ -739,8 +892,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status - ) + status_t *status) { sp<EffectModule> effect; sp<EffectHandle> handle; @@ -756,6 +908,24 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( goto Exit; } + // Reject any effect on Direct output threads for now, since the format of + // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo). + if (mType == DIRECT) { + ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s", + desc->name, mName); + lStatus = BAD_VALUE; + goto Exit; + } + + // Reject any effect on mixer or duplicating multichannel sinks. + // TODO: fix both format and multichannel issues with effects. + if ((mType == MIXER || mType == DUPLICATING) && mChannelCount != FCC_2) { + ALOGW("createEffect_l() Cannot add effect %s for multichannel(%d) %s threads", + desc->name, mChannelCount, mType == MIXER ? "MIXER" : "DUPLICATING"); + lStatus = BAD_VALUE; + goto Exit; + } + // Allow global effects only on offloaded and mixer threads if (sessionId == AUDIO_SESSION_OUTPUT_MIX) { switch (mType) { @@ -829,7 +999,10 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( } // create effect handle and connect it to effect module handle = new EffectHandle(effect, client, effectClient, priority); - lStatus = effect->addHandle(handle.get()); + lStatus = handle->initCheck(); + if (lStatus == OK) { + lStatus = effect->addHandle(handle.get()); + } if (enabled != NULL) { *enabled = (int)effect->isEnabled(); } @@ -850,9 +1023,7 @@ Exit: handle.clear(); } - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } @@ -976,21 +1147,18 @@ void AudioFlinger::ThreadBase::setMode(audio_mode_t mode) } } -void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect, - EffectHandle *handle, - bool unpinIfLast) { - - Mutex::Autolock _l(mLock); - ALOGV("disconnectEffect() %p effect %p", this, effect.get()); - // delete the effect module if removing last handle on it - if (effect->removeHandle(handle) == 0) { - if (!effect->isPinned() || unpinIfLast) { - removeEffect_l(effect); - AudioSystem::unregisterEffect(effect->id()); - } - } +void AudioFlinger::ThreadBase::getAudioPortConfig(struct audio_port_config *config) +{ + config->type = AUDIO_PORT_TYPE_MIX; + config->ext.mix.handle = mId; + config->sample_rate = mSampleRate; + config->format = mFormat; + config->channel_mask = mChannelMask; + config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK| + AUDIO_PORT_CONFIG_FORMAT; } + // ---------------------------------------------------------------------------- // Playback // ---------------------------------------------------------------------------- @@ -1001,8 +1169,18 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge audio_devices_t device, type_t type) : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), - mNormalFrameCount(0), mMixBuffer(NULL), - mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0), + mNormalFrameCount(0), mSinkBuffer(NULL), + mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision), + mMixerBuffer(NULL), + mMixerBufferSize(0), + mMixerBufferFormat(AUDIO_FORMAT_INVALID), + mMixerBufferValid(false), + mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision), + mEffectBuffer(NULL), + mEffectBufferSize(0), + mEffectBufferFormat(AUDIO_FORMAT_INVALID), + mEffectBufferValid(false), + mSuspended(0), mBytesWritten(0), mActiveTracksGeneration(0), // mStreamTypes[] initialized in constructor body mOutput(output), @@ -1044,11 +1222,11 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge } } - readOutputParameters(); + readOutputParameters_l(); // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor // There is no AUDIO_STREAM_MIN, and ++ operator does not compile - for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT; + for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_CNT; stream = (audio_stream_type_t) (stream + 1)) { mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream); mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream); @@ -1060,7 +1238,9 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge AudioFlinger::PlaybackThread::~PlaybackThread() { mAudioFlinger->unregisterWriter(mNBLogWriter); - delete [] mAllocMixBuffer; + free(mSinkBuffer); + free(mMixerBuffer); + free(mEffectBuffer); } void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) @@ -1070,13 +1250,13 @@ void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) dumpEffectChains(fd, args); } -void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args) +void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; - result.appendFormat("Output thread %p stream volumes in dB:\n ", this); + result.appendFormat(" Stream volumes in dB: "); for (int i = 0; i < AUDIO_STREAM_CNT; ++i) { const stream_type_t *st = &mStreamTypes[i]; if (i > 0) { @@ -1091,75 +1271,68 @@ void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& ar write(fd, result.string(), result.length()); result.clear(); - snprintf(buffer, SIZE, "Output thread %p tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. + FastTrackUnderruns underruns = getFastTrackUnderruns(0); + dprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n", + underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); + + size_t numtracks = mTracks.size(); + size_t numactive = mActiveTracks.size(); + dprintf(fd, " %d Tracks", numtracks); + size_t numactiveseen = 0; + if (numtracks) { + dprintf(fd, " of which %d are active\n", numactive); + Track::appendDumpHeader(result); + for (size_t i = 0; i < numtracks; ++i) { + sp<Track> track = mTracks[i]; + if (track != 0) { + bool active = mActiveTracks.indexOf(track) >= 0; + if (active) { + numactiveseen++; + } + track->dump(buffer, SIZE, active); + result.append(buffer); + } } + } else { + result.append("\n"); } - - snprintf(buffer, SIZE, "Output thread %p active tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mActiveTracks.size(); ++i) { - sp<Track> track = mActiveTracks[i].promote(); - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + if (numactiveseen != numactive) { + // some tracks in the active list were not in the tracks list + snprintf(buffer, SIZE, " The following tracks are in the active list but" + " not in the track list\n"); + result.append(buffer); + Track::appendDumpHeader(result); + for (size_t i = 0; i < numactive; ++i) { + sp<Track> track = mActiveTracks[i].promote(); + if (track != 0 && mTracks.indexOf(track) < 0) { + track->dump(buffer, SIZE, true); + result.append(buffer); + } } } - write(fd, result.string(), result.size()); - // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. - FastTrackUnderruns underruns = getFastTrackUnderruns(0); - dprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n", - underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); + write(fd, result.string(), result.size()); } void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args) { - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this); - result.append(buffer); - snprintf(buffer, SIZE, "Normal frame count: %zu\n", mNormalFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", - ns2ms(systemTime() - mLastWriteTime)); - result.append(buffer); - snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites); - result.append(buffer); - snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites); - result.append(buffer); - snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite); - result.append(buffer); - snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended); - result.append(buffer); - snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer); - result.append(buffer); - write(fd, result.string(), result.size()); - dprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask); + dprintf(fd, "\nOutput thread %p:\n", this); + dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount); + dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime)); + dprintf(fd, " Total writes: %d\n", mNumWrites); + dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites); + dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no"); + dprintf(fd, " Suspend count: %d\n", mSuspended); + dprintf(fd, " Sink buffer : %p\n", mSinkBuffer); + dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer); + dprintf(fd, " Effect buffer: %p\n", mEffectBuffer); + dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask); dumpBase(fd, args); } // Thread virtuals -status_t AudioFlinger::PlaybackThread::readyToRun() -{ - status_t status = initCheck(); - if (status == NO_ERROR) { - ALOGI("AudioFlinger's thread %p ready to run", this); - } else { - ALOGE("No working audio driver found."); - } - return status; -} void AudioFlinger::PlaybackThread::onFirstRef() { @@ -1182,7 +1355,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, @@ -1190,6 +1363,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac int uid, status_t *status) { + size_t frameCount = *pFrameCount; sp<Track> track; status_t lStatus; @@ -1215,9 +1389,10 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac ) && // PCM data audio_is_linear_pcm(format) && - // mono or stereo - ( (channelMask == AUDIO_CHANNEL_OUT_MONO) || - (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) && + // identical channel mask to sink, or mono in and stereo sink + (channelMask == mChannelMask || + (channelMask == AUDIO_CHANNEL_OUT_MONO && + mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) && // hardware sample rate (sampleRate == mSampleRate) && // normal mixer has an associated fast mixer @@ -1229,15 +1404,21 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac ) { // if frameCount not specified, then it defaults to fast mixer (HAL) frame count if (frameCount == 0) { - frameCount = mFrameCount * kFastTrackMultiplier; + // read the fast track multiplier property the first time it is needed + int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit); + if (ok != 0) { + ALOGE("%s pthread_once failed: %d", __func__, ok); + } + frameCount = mFrameCount * sFastTrackMultiplier; } ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", frameCount, mFrameCount); } else { ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d " - "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " + "mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x " + "sampleRate=%u mSampleRate=%u " "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x", - isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, + isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, mFormat, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); *flags &= ~IAudioFlinger::TRACK_FAST; @@ -1256,44 +1437,52 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac } } } + *pFrameCount = frameCount; - if (mType == DIRECT) { - if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) { + switch (mType) { + + case DIRECT: + if (audio_is_linear_pcm(format)) { if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { - ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x " - "for output %p with format %d", + ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x " + "for output %p with format %#x", sampleRate, format, channelMask, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; } } - } else if (mType == OFFLOAD) { + break; + + case OFFLOAD: if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { - ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \"" - "for output %p with format %d", + ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \"" + "for output %p with format %#x", sampleRate, format, channelMask, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; } - } else { - if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) { - ALOGE("createTrack_l() Bad parameter: format %d \"" - "for output %p with format %d", + break; + + default: + if (!audio_is_linear_pcm(format)) { + ALOGE("createTrack_l() Bad parameter: format %#x \"" + "for output %p with format %#x", format, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; } - // Resampler implementation limits input sampling rate to 2 x output sampling rate. - if (sampleRate > mSampleRate*2) { + if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) { ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate); lStatus = BAD_VALUE; goto Exit; } + break; + } lStatus = initCheck(); if (lStatus != NO_ERROR) { - ALOGE("Audio driver not initialized."); + ALOGE("createTrack_l() audio driver not initialized"); goto Exit; } @@ -1306,7 +1495,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac uint32_t strategy = AudioSystem::getStrategyForStream(streamType); for (size_t i = 0; i < mTracks.size(); ++i) { sp<Track> t = mTracks[i]; - if (t != 0 && !t->isOutputTrack()) { + if (t != 0 && t->isExternalTrack()) { uint32_t actual = AudioSystem::getStrategyForStream(t->streamType()); if (sessionId == t->sessionId() && strategy != actual) { ALOGE("createTrack_l() mismatched strategy; expected %u but found %u", @@ -1319,18 +1508,21 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac if (!isTimed) { track = new Track(this, client, streamType, sampleRate, format, - channelMask, frameCount, sharedBuffer, sessionId, uid, *flags); + channelMask, frameCount, NULL, sharedBuffer, + sessionId, uid, *flags, TrackBase::TYPE_DEFAULT); } else { track = TimedTrack::create(this, client, streamType, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId, uid); } - if (track == 0 || track->getCblk() == NULL || track->name() < 0) { - lStatus = NO_MEMORY; + // new Track always returns non-NULL, + // but TimedTrack::create() is a factory that could fail by returning NULL + lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY; + if (lStatus != NO_ERROR) { + ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus); // track must be cleared from the caller as the caller has the AF lock goto Exit; } - mTracks.add(track); sp<EffectChain> chain = getEffectChain_l(sessionId); @@ -1352,9 +1544,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -1432,7 +1622,7 @@ status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) // the track is newly added, make sure it fills up all its // buffers before playing. This is to ensure the client will // effectively get the latency it requested. - if (!track->isOutputTrack()) { + if (track->isExternalTrack()) { TrackBase::track_state state = track->mState; mLock.unlock(); status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId()); @@ -1473,9 +1663,7 @@ status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) status = NO_ERROR; } - ALOGV("signal playback thread"); - broadcast_l(); - + onAddNewTrack_l(); return status; } @@ -1487,7 +1675,7 @@ bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) track->mState = TrackBase::STOPPED; if (!trackActive) { removeTrack_l(track); - } else if (track->isFastTrack() || track->isOffloaded()) { + } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) { track->mState = TrackBase::STOPPING_1; } @@ -1538,12 +1726,11 @@ String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys) return out_s8; } -// audioConfigChanged_l() must be called with AudioFlinger::mLock held -void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) { +void AudioFlinger::PlaybackThread::audioConfigChanged(int event, int param) { AudioSystem::OutputDescriptor desc; void *param2 = NULL; - ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, + ALOGV("PlaybackThread::audioConfigChanged, thread %p, event %d, param %d", this, event, param); switch (event) { @@ -1554,7 +1741,7 @@ void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) { desc.format = mFormat; desc.frameCount = mNormalFrameCount; // FIXME see // AudioFlinger::frameCount(audio_io_handle_t) - desc.latency = latency(); + desc.latency = latency_l(); param2 = &desc; break; @@ -1564,7 +1751,7 @@ void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) { default: break; } - mAudioFlinger->audioConfigChanged_l(event, mId, param2); + mAudioFlinger->audioConfigChanged(event, mId, param2); } void AudioFlinger::PlaybackThread::writeCallback() @@ -1601,7 +1788,7 @@ void AudioFlinger::PlaybackThread::resetDraining(uint32_t sequence) // static int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event, - void *param, + void *param __unused, void *cookie) { AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie; @@ -1620,29 +1807,33 @@ int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event, return 0; } -void AudioFlinger::PlaybackThread::readOutputParameters() +void AudioFlinger::PlaybackThread::readOutputParameters_l() { - // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL + // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common); mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common); if (!audio_is_output_channel(mChannelMask)) { - LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask); + LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask); } - if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) { - LOG_FATAL("HAL channel mask %#x not supported for mixed output; " - "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask); + if ((mType == MIXER || mType == DUPLICATING) + && !isValidPcmSinkChannelMask(mChannelMask)) { + LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output", + mChannelMask); } - mChannelCount = popcount(mChannelMask); - mFormat = mOutput->stream->common.get_format(&mOutput->stream->common); + mChannelCount = audio_channel_count_from_out_mask(mChannelMask); + mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common); + mFormat = mHALFormat; if (!audio_is_valid_format(mFormat)) { - LOG_FATAL("HAL format %d not valid for output", mFormat); + LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat); } - if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) { - LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT", + if ((mType == MIXER || mType == DUPLICATING) + && !isValidPcmSinkFormat(mFormat)) { + LOG_FATAL("HAL format %#x not supported for mixed output", mFormat); } - mFrameSize = audio_stream_frame_size(&mOutput->stream->common); - mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize; + mFrameSize = audio_stream_out_frame_size(mOutput->stream); + mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common); + mFrameCount = mBufferSize / mFrameSize; if (mFrameCount & 15) { ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames", mFrameCount); @@ -1657,12 +1848,12 @@ void AudioFlinger::PlaybackThread::readOutputParameters() } } - // Calculate size of normal mix buffer relative to the HAL output buffer size + // Calculate size of normal sink buffer relative to the HAL output buffer size double multiplier = 1.0; if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) { - size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000; - size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000; + size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000; + size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000; // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer minNormalFrameCount = (minNormalFrameCount + 15) & ~15; maxNormalFrameCount = maxNormalFrameCount & ~15; @@ -1680,7 +1871,7 @@ void AudioFlinger::PlaybackThread::readOutputParameters() } } else { // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL - // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast + // SRC (it would be unusual for the normal sink buffer size to not be a multiple of fast // track, but we sometimes have to do this to satisfy the maximum frame count // constraint) // FIXME this rounding up should not be done if no HAL SRC @@ -1695,19 +1886,43 @@ void AudioFlinger::PlaybackThread::readOutputParameters() } mNormalFrameCount = multiplier * mFrameCount; // round up to nearest 16 frames to satisfy AudioMixer - mNormalFrameCount = (mNormalFrameCount + 15) & ~15; - ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, + if (mType == MIXER || mType == DUPLICATING) { + mNormalFrameCount = (mNormalFrameCount + 15) & ~15; + } + ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount, mNormalFrameCount); - delete[] mAllocMixBuffer; - size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize; - mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1]; - mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align); - memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize); + // mSinkBuffer is the sink buffer. Size is always multiple-of-16 frames. + // Originally this was int16_t[] array, need to remove legacy implications. + free(mSinkBuffer); + mSinkBuffer = NULL; + // For sink buffer size, we use the frame size from the downstream sink to avoid problems + // with non PCM formats for compressed music, e.g. AAC, and Offload threads. + const size_t sinkBufferSize = mNormalFrameCount * mFrameSize; + (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize); + + // We resize the mMixerBuffer according to the requirements of the sink buffer which + // drives the output. + free(mMixerBuffer); + mMixerBuffer = NULL; + if (mMixerBufferEnabled) { + mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // also valid: AUDIO_FORMAT_PCM_16_BIT. + mMixerBufferSize = mNormalFrameCount * mChannelCount + * audio_bytes_per_sample(mMixerBufferFormat); + (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize); + } + free(mEffectBuffer); + mEffectBuffer = NULL; + if (mEffectBufferEnabled) { + mEffectBufferFormat = AUDIO_FORMAT_PCM_16_BIT; // Note: Effects support 16b only + mEffectBufferSize = mNormalFrameCount * mChannelCount + * audio_bytes_per_sample(mEffectBufferFormat); + (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize); + } // force reconfiguration of effect chains and engines to take new buffer size and audio // parameters into account - // Note that mLock is not held when readOutputParameters() is called from the constructor + // Note that mLock is not held when readOutputParameters_l() is called from the constructor // but in this case nothing is done below as no audio sessions have effect yet so it doesn't // matter. // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains @@ -1841,10 +2056,10 @@ void AudioFlinger::PlaybackThread::threadLoop_removeTracks( const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i = 0 ; i < count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); - if (!track->isOutputTrack()) { + if (track->isExternalTrack()) { AudioSystem::stopOutput(mId, track->streamType(), track->sessionId()); #ifdef ADD_BATTERY_DATA // to track the speaker usage @@ -1882,12 +2097,13 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() mLastWriteTime = systemTime(); mInWrite = true; ssize_t bytesWritten; + const size_t offset = mCurrentWriteLength - mBytesRemaining; // If an NBAIO sink is present, use it to write the normal mixer's submix if (mNormalSink != 0) { -#define mBitShift 2 // FIXME - size_t count = mBytesRemaining >> mBitShift; - size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1; + + const size_t count = mBytesRemaining / mFrameSize; + ATRACE_BEGIN("write"); // update the setpoint when AudioFlinger::mScreenState changes uint32_t screenState = AudioFlinger::mScreenState; @@ -1899,10 +2115,10 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); } } - ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count); + ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count); ATRACE_END(); if (framesWritten > 0) { - bytesWritten = framesWritten << mBitShift; + bytesWritten = framesWritten * mFrameSize; } else { bytesWritten = framesWritten; } @@ -1911,13 +2127,14 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() size_t totalFramesWritten = mNormalSink->framesWritten(); if (totalFramesWritten >= mLatchD.mTimestamp.mPosition) { mLatchD.mUnpresentedFrames = totalFramesWritten - mLatchD.mTimestamp.mPosition; + // mLatchD.mFramesReleased is set immediately before D is clocked into Q mLatchDValid = true; } } // otherwise use the HAL / AudioStreamOut directly } else { // Direct output and offload threads - size_t offset = (mCurrentWriteLength - mBytesRemaining); + if (mUseAsyncWrite) { ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request"); mWriteAckSequence += 2; @@ -1928,7 +2145,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() // FIXME We should have an implementation of timestamps for direct output threads. // They are used e.g for multichannel PCM playback over HDMI. bytesWritten = mOutput->stream->write(mOutput->stream, - (char *)mMixBuffer + offset, mBytesRemaining); + (char *)mSinkBuffer + offset, mBytesRemaining); if (mUseAsyncWrite && ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) { // do not wait for async callback in case of error of full write @@ -1967,7 +2184,7 @@ void AudioFlinger::PlaybackThread::threadLoop_exit() /* The derived values that are cached: - - mixBufferSize from frame count * frame size + - mSinkBufferSize from frame count * frame size - activeSleepTime from activeSleepTimeUs() - idleSleepTime from idleSleepTimeUs() - standbyDelay from mActiveSleepTimeUs (DIRECT only) @@ -1986,7 +2203,7 @@ The parameters that affect these derived values are: void AudioFlinger::PlaybackThread::cacheParameters_l() { - mixBufferSize = mNormalFrameCount * mFrameSize; + mSinkBufferSize = mNormalFrameCount * mFrameSize; activeSleepTime = activeSleepTimeUs(); idleSleepTime = idleSleepTimeUs(); } @@ -2009,13 +2226,14 @@ void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamTy status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain) { int session = chain->sessionId(); - int16_t *buffer = mMixBuffer; + int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled + ? mEffectBuffer : mSinkBuffer); bool ownsBuffer = false; ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session); if (session > 0) { // Only one effect chain can be present in direct output thread and it uses - // the mix buffer as input + // the sink buffer as input if (mType != DIRECT) { size_t numSamples = mNormalFrameCount * mChannelCount; buffer = new int16_t[numSamples]; @@ -2047,9 +2265,10 @@ status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& c } } } - + chain->setThread(this); chain->setInBuffer(buffer, ownsBuffer); - chain->setOutBuffer(mMixBuffer); + chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled + ? mEffectBuffer : mSinkBuffer)); // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect // chains list in order to be processed last as it contains output stage effects // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before @@ -2099,7 +2318,7 @@ size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& for (size_t i = 0; i < mTracks.size(); ++i) { sp<Track> track = mTracks[i]; if (session == track->sessionId()) { - track->setMainBuffer(mMixBuffer); + track->setMainBuffer(reinterpret_cast<int16_t*>(mSinkBuffer)); chain->decTrackCnt(); } } @@ -2189,28 +2408,36 @@ bool AudioFlinger::PlaybackThread::threadLoop() Vector< sp<EffectChain> > effectChains; - processConfigEvents(); - { // scope for mLock Mutex::Autolock _l(mLock); + processConfigEvents_l(); + if (logString != NULL) { mNBLogWriter->logTimestamp(); mNBLogWriter->log(logString); logString = NULL; } + // Gather the framesReleased counters for all active tracks, + // and latch them atomically with the timestamp. + // FIXME We're using raw pointers as indices. A unique track ID would be a better index. + mLatchD.mFramesReleased.clear(); + size_t size = mActiveTracks.size(); + for (size_t i = 0; i < size; i++) { + sp<Track> t = mActiveTracks[i].promote(); + if (t != 0) { + mLatchD.mFramesReleased.add(t.get(), + t->mAudioTrackServerProxy->framesReleased()); + } + } if (mLatchDValid) { mLatchQ = mLatchD; mLatchDValid = false; mLatchQValid = true; } - if (checkForNewParameters_l()) { - cacheParameters_l(); - } - saveOutputTracks(); if (mSignalPending) { // A signal was raised while we were unlocked @@ -2302,14 +2529,32 @@ bool AudioFlinger::PlaybackThread::threadLoop() // must be written to HAL threadLoop_sleepTime(); if (sleepTime == 0) { - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; } } + // Either threadLoop_mix() or threadLoop_sleepTime() should have set + // mMixerBuffer with data if mMixerBufferValid is true and sleepTime == 0. + // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid) + // or mSinkBuffer (if there are no effects). + // + // This is done pre-effects computation; if effects change to + // support higher precision, this needs to move. + // + // mMixerBufferValid is only set true by MixerThread::prepareTracks_l(). + // TODO use sleepTime == 0 as an additional condition. + if (mMixerBufferValid) { + void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer; + audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat; + + memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat, + mNormalFrameCount * mChannelCount); + } + mBytesRemaining = mCurrentWriteLength; if (isSuspended()) { sleepTime = suspendSleepTimeUs(); // simulate write to HAL when suspended - mBytesWritten += mixBufferSize; + mBytesWritten += mSinkBufferSize; mBytesRemaining = 0; } @@ -2330,6 +2575,16 @@ bool AudioFlinger::PlaybackThread::threadLoop() } } + // Only if the Effects buffer is enabled and there is data in the + // Effects buffer (buffer valid), we need to + // copy into the sink buffer. + // TODO use sleepTime == 0 as an additional condition. + if (mEffectBufferValid) { + //ALOGV("writing effect buffer to sink buffer format %#x", mFormat); + memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat, + mNormalFrameCount * mChannelCount); + } + // enable changes in effect chain unlockEffectChains(effectChains); @@ -2348,20 +2603,20 @@ bool AudioFlinger::PlaybackThread::threadLoop() (mMixerStatus == MIXER_DRAIN_ALL)) { threadLoop_drain(); } -if (mType == MIXER) { - // write blocked detection - nsecs_t now = systemTime(); - nsecs_t delta = now - mLastWriteTime; - if (!mStandby && delta > maxPeriod) { - mNumDelayedWrites++; - if ((now - lastWarning) > kWarningThrottleNs) { - ATRACE_NAME("underrun"); - ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", - ns2ms(delta), mNumDelayedWrites, this); - lastWarning = now; + if (mType == MIXER) { + // write blocked detection + nsecs_t now = systemTime(); + nsecs_t delta = now - mLastWriteTime; + if (!mStandby && delta > maxPeriod) { + mNumDelayedWrites++; + if ((now - lastWarning) > kWarningThrottleNs) { + ATRACE_NAME("underrun"); + ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", + ns2ms(delta), mNumDelayedWrites, this); + lastWarning = now; + } } } -} } else { usleep(sleepTime); @@ -2389,12 +2644,9 @@ if (mType == MIXER) { threadLoop_exit(); - // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ... - if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) { - // put output stream into standby mode - if (!mStandby) { - mOutput->stream->common.standby(&mOutput->stream->common); - } + if (!mStandby) { + threadLoop_standby(); + mStandby = true; } releaseWakeLock(); @@ -2409,7 +2661,7 @@ if (mType == MIXER) { void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i=0 ; i<count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); mActiveTracks.remove(track); @@ -2435,7 +2687,7 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp) if (mNormalSink != 0) { return mNormalSink->getTimestamp(timestamp); } - if (mType == OFFLOAD && mOutput->stream->get_presentation_position) { + if ((mType == OFFLOAD || mType == DIRECT) && mOutput->stream->get_presentation_position) { uint64_t position64; int ret = mOutput->stream->get_presentation_position( mOutput->stream, &position64, ×tamp.mTime); @@ -2446,6 +2698,67 @@ status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp) } return INVALID_OPERATION; } + +status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch, + audio_patch_handle_t *handle) +{ + status_t status = NO_ERROR; + if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + // store new device and send to effects + audio_devices_t type = AUDIO_DEVICE_NONE; + for (unsigned int i = 0; i < patch->num_sinks; i++) { + type |= patch->sinks[i].ext.device.type; + } + mOutDevice = type; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mOutDevice); + } + + audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice(); + status = hwDevice->create_audio_patch(hwDevice, + patch->num_sources, + patch->sources, + patch->num_sinks, + patch->sinks, + handle); + } else { + ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL"); + } + return status; +} + +status_t AudioFlinger::PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle) +{ + status_t status = NO_ERROR; + if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice(); + status = hwDevice->release_audio_patch(hwDevice, handle); + } else { + ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL"); + } + return status; +} + +void AudioFlinger::PlaybackThread::addPatchTrack(const sp<PatchTrack>& track) +{ + Mutex::Autolock _l(mLock); + mTracks.add(track); +} + +void AudioFlinger::PlaybackThread::deletePatchTrack(const sp<PatchTrack>& track) +{ + Mutex::Autolock _l(mLock); + destroyTrack_l(track); +} + +void AudioFlinger::PlaybackThread::getAudioPortConfig(struct audio_port_config *config) +{ + ThreadBase::getAudioPortConfig(config); + config->role = AUDIO_PORT_ROLE_SOURCE; + config->ext.mix.hw_module = mOutput->audioHwDev->handle(); + config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT; +} + // ---------------------------------------------------------------------------- AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, @@ -2465,15 +2778,10 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud mNormalFrameCount); mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); - // FIXME - Current mixer implementation only supports stereo output - if (mChannelCount != FCC_2) { - ALOGE("Invalid audio hardware channel count %d", mChannelCount); - } - // create an NBAIO sink for the HAL output stream, and negotiate mOutputSink = new AudioStreamOutSink(output->stream); size_t numCounterOffers = 0; - const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)}; + const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)}; ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers); ALOG_ASSERT(index == 0); @@ -2492,9 +2800,28 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud break; } if (initFastMixer) { + audio_format_t fastMixerFormat; + if (mMixerBufferEnabled && mEffectBufferEnabled) { + fastMixerFormat = AUDIO_FORMAT_PCM_FLOAT; + } else { + fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT; + } + if (mFormat != fastMixerFormat) { + // change our Sink format to accept our intermediate precision + mFormat = fastMixerFormat; + free(mSinkBuffer); + mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat); + const size_t sinkBufferSize = mNormalFrameCount * mFrameSize; + (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize); + } // create a MonoPipe to connect our submix to FastMixer NBAIO_Format format = mOutputSink->format(); + NBAIO_Format origformat = format; + // adjust format to match that of the Fast Mixer + format.mFormat = fastMixerFormat; + format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount; + // This pipe depth compensates for scheduling latency of the normal mixer thread. // When it wakes up after a maximum latency, it runs a few cycles quickly before // finally blocking. Note the pipe implementation rounds up the request to a power of 2. @@ -2510,14 +2837,15 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud #ifdef TEE_SINK if (mTeeSinkOutputEnabled) { // create a Pipe to archive a copy of FastMixer's output for dumpsys - Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, format); + Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, origformat); + const NBAIO_Format offers2[1] = {origformat}; numCounterOffers = 0; - index = teeSink->negotiate(offers, 1, NULL, numCounterOffers); + index = teeSink->negotiate(offers2, 1, NULL, numCounterOffers); ALOG_ASSERT(index == 0); mTeeSink = teeSink; PipeReader *teeSource = new PipeReader(*teeSink); numCounterOffers = 0; - index = teeSource->negotiate(offers, 1, NULL, numCounterOffers); + index = teeSource->negotiate(offers2, 1, NULL, numCounterOffers); ALOG_ASSERT(index == 0); mTeeSource = teeSource; } @@ -2535,6 +2863,8 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud // wrap the source side of the MonoPipe to make it an AudioBufferProvider fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe)); fastTrack->mVolumeProvider = NULL; + fastTrack->mChannelMask = mChannelMask; // mPipeSink channel mask for audio to FastMixer + fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer fastTrack->mGeneration++; state->mFastTracksGen++; state->mTrackMask = 1; @@ -2578,8 +2908,6 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud } #endif - } else { - mFastMixer = NULL; } switch (kUseFastMixer) { @@ -2598,7 +2926,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud AudioFlinger::MixerThread::~MixerThread() { - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (state->mCommand == FastMixerState::COLD_IDLE) { @@ -2620,7 +2948,7 @@ AudioFlinger::MixerThread::~MixerThread() ALOG_ASSERT(fastTrack->mBufferProvider != NULL); delete fastTrack->mBufferProvider; sq->end(false /*didModify*/); - delete mFastMixer; + mFastMixer.clear(); #ifdef AUDIO_WATCHDOG if (mAudioWatchdog != 0) { mAudioWatchdog->requestExit(); @@ -2636,7 +2964,7 @@ AudioFlinger::MixerThread::~MixerThread() uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const { - if (mFastMixer != NULL) { + if (mFastMixer != 0) { MonoPipe *pipe = (MonoPipe *)mPipeSink.get(); latency += (pipe->getAvgFrames() * 1000) / mSampleRate; } @@ -2653,7 +2981,7 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write() { // FIXME we should only do one push per cycle; confirm this is true // Start the fast mixer if it's not already running - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (state->mCommand != FastMixerState::MIX_WRITE && @@ -2687,7 +3015,7 @@ ssize_t AudioFlinger::MixerThread::threadLoop_write() void AudioFlinger::MixerThread::threadLoop_standby() { // Idle the fast mixer if it's currently running - if (mFastMixer != NULL) { + if (mFastMixer != 0) { FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); if (!(state->mCommand & FastMixerState::IDLE)) { @@ -2713,12 +3041,6 @@ void AudioFlinger::MixerThread::threadLoop_standby() PlaybackThread::threadLoop_standby(); } -// Empty implementation for standard mixer -// Overridden for offloaded playback -void AudioFlinger::PlaybackThread::flushOutput_l() -{ -} - bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l() { return false; @@ -2750,6 +3072,12 @@ void AudioFlinger::PlaybackThread::threadLoop_standby() } } +void AudioFlinger::PlaybackThread::onAddNewTrack_l() +{ + ALOGV("signal playback thread"); + broadcast_l(); +} + void AudioFlinger::MixerThread::threadLoop_mix() { // obtain the presentation timestamp of the next output buffer @@ -2768,7 +3096,7 @@ void AudioFlinger::MixerThread::threadLoop_mix() // mix buffers... mAudioMixer->process(pts); - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; // increase sleep time progressively when application underrun condition clears. // Only increase sleep time if the mixer is ready for two consecutive times to avoid // that a steady state of alternating ready/not ready conditions keeps the sleep time @@ -2779,6 +3107,7 @@ void AudioFlinger::MixerThread::threadLoop_mix() sleepTime = 0; standbyTime = systemTime() + standbyDelay; //TODO: delay standby when effects have a tail + } void AudioFlinger::MixerThread::threadLoop_sleepTime() @@ -2802,7 +3131,13 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime() sleepTime = idleSleepTime; } } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) { - memset (mMixBuffer, 0, mixBufferSize); + // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared + // before effects processing or output. + if (mMixerBufferValid) { + memset(mMixerBuffer, 0, mMixerBufferSize); + } else { + memset(mSinkBuffer, 0, mSinkBufferSize); + } sleepTime = 0; ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED), "anticipated start"); @@ -2844,11 +3179,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac FastMixerState *state = NULL; bool didModify = false; FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED; - if (mFastMixer != NULL) { + if (mFastMixer != 0) { sq = mFastMixer->sq(); state = sq->begin(); } + mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found. + mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found. + for (size_t i=0 ; i<count ; i++) { const sp<Track> t = mActiveTracks[i].promote(); if (t == 0) { @@ -2967,7 +3305,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac break; case TrackBase::IDLE: default: - LOG_FATAL("unexpected track state %d", track->mState); + LOG_ALWAYS_FATAL("unexpected track state %d", track->mState); } if (isActive) { @@ -2978,6 +3316,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac fastTrack->mBufferProvider = eabp; fastTrack->mVolumeProvider = vp; fastTrack->mChannelMask = track->mChannelMask; + fastTrack->mFormat = track->mFormat; fastTrack->mGeneration++; state->mTrackMask |= 1 << j; didModify = true; @@ -2998,7 +3337,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // because we're about to decrement the last sp<> on those tracks. block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; } else { - LOG_FATAL("fast track %d should have been active", j); + LOG_ALWAYS_FATAL("fast track %d should have been active", j); } tracksToRemove->add(track); // Avoids a misleading display in dumpsys @@ -3027,12 +3366,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // +1 for rounding and +1 for additional sample needed for interpolation desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1; // add frames already consumed but not yet released by the resampler - // because cblk->framesReady() will include these frames + // because mAudioTrackServerProxy->framesReady() will include these frames desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); +#if 0 // the minimum track buffer size is normally twice the number of frames necessary // to fill one buffer and the resampler should not leave more than one buffer worth // of unreleased frames after each pass, but just in case... ALOG_ASSERT(desiredFrames <= cblk->frameCount_); +#endif } uint32_t minFrames = 1; if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && @@ -3048,10 +3389,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac mixedTracks++; - // track->mainBuffer() != mMixBuffer means there is an effect chain - // connected to the track + // track->mainBuffer() != mSinkBuffer or mMixerBuffer means + // there is an effect chain connected to the track chain.clear(); - if (track->mainBuffer() != mMixBuffer) { + if (track->mainBuffer() != mSinkBuffer && + track->mainBuffer() != mMixerBuffer) { + if (mEffectBufferEnabled) { + mEffectBufferValid = true; // Later can set directly. + } chain = getEffectChain_l(track->sessionId()); // Delegate volume control to effect in track effect chain if needed if (chain != 0) { @@ -3081,9 +3426,11 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac } // compute volume for this track - uint32_t vl, vr, va; + uint32_t vl, vr; // in U8.24 integer format + float vlf, vrf, vaf; // in [0.0, 1.0] float format if (track->isPausing() || mStreamTypes[track->streamType()].mute) { - vl = vr = va = 0; + vl = vr = 0; + vlf = vrf = vaf = 0.; if (track->isPausing()) { track->setPaused(); } @@ -3093,37 +3440,44 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac float typeVolume = mStreamTypes[track->streamType()].volume; float v = masterVolume * typeVolume; AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy; - uint32_t vlr = proxy->getVolumeLR(); - vl = vlr & 0xFFFF; - vr = vlr >> 16; + gain_minifloat_packed_t vlr = proxy->getVolumeLR(); + vlf = float_from_gain(gain_minifloat_unpack_left(vlr)); + vrf = float_from_gain(gain_minifloat_unpack_right(vlr)); // track volumes come from shared memory, so can't be trusted and must be clamped - if (vl > MAX_GAIN_INT) { - ALOGV("Track left volume out of range: %04X", vl); - vl = MAX_GAIN_INT; + if (vlf > GAIN_FLOAT_UNITY) { + ALOGV("Track left volume out of range: %.3g", vlf); + vlf = GAIN_FLOAT_UNITY; } - if (vr > MAX_GAIN_INT) { - ALOGV("Track right volume out of range: %04X", vr); - vr = MAX_GAIN_INT; + if (vrf > GAIN_FLOAT_UNITY) { + ALOGV("Track right volume out of range: %.3g", vrf); + vrf = GAIN_FLOAT_UNITY; } // now apply the master volume and stream type volume - vl = (uint32_t)(v * vl) << 12; - vr = (uint32_t)(v * vr) << 12; + vlf *= v; + vrf *= v; // assuming master volume and stream type volume each go up to 1.0, - // vl and vr are now in 8.24 format - + // then derive vl and vr as U8.24 versions for the effect chain + const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT; + vl = (uint32_t) (scaleto8_24 * vlf); + vr = (uint32_t) (scaleto8_24 * vrf); + // vl and vr are now in U8.24 format uint16_t sendLevel = proxy->getSendLevel_U4_12(); // send level comes from shared memory and so may be corrupt if (sendLevel > MAX_GAIN_INT) { ALOGV("Track send level out of range: %04X", sendLevel); sendLevel = MAX_GAIN_INT; } - va = (uint32_t)(v * sendLevel); + // vaf is represented as [0.0, 1.0] float by rescaling sendLevel + vaf = v * sendLevel * (1. / MAX_GAIN_INT); } // Delegate volume control to effect in track effect chain if needed if (chain != 0 && chain->setVolume_l(&vl, &vr)) { // Do not ramp volume if volume is controlled by effect param = AudioMixer::VOLUME; + // Update remaining floating point volume levels + vlf = (float)vl / (1 << 24); + vrf = (float)vr / (1 << 24); track->mHasVolumeController = true; } else { // force no volume ramp when volume controller was just disabled or removed @@ -3134,28 +3488,13 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac track->mHasVolumeController = false; } - // Convert volumes from 8.24 to 4.12 format - // This additional clamping is needed in case chain->setVolume_l() overshot - vl = (vl + (1 << 11)) >> 12; - if (vl > MAX_GAIN_INT) { - vl = MAX_GAIN_INT; - } - vr = (vr + (1 << 11)) >> 12; - if (vr > MAX_GAIN_INT) { - vr = MAX_GAIN_INT; - } - - if (va > MAX_GAIN_INT) { - va = MAX_GAIN_INT; // va is uint32_t, so no need to check for - - } - // XXX: these things DON'T need to be done each time mAudioMixer->setBufferProvider(name, track); mAudioMixer->enable(name); - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)(uintptr_t)vl); - mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)(uintptr_t)vr); - mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)(uintptr_t)va); + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, &vlf); + mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, &vrf); + mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, &vaf); mAudioMixer->setParameter( name, AudioMixer::TRACK, @@ -3164,8 +3503,12 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask()); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask); // limit track sample rate to 2 x output sample rate, which changes at re-configuration - uint32_t maxSampleRate = mSampleRate * 2; + uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX; uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate(); if (reqSampleRate == 0) { reqSampleRate = mSampleRate; @@ -3177,10 +3520,41 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac AudioMixer::RESAMPLE, AudioMixer::SAMPLE_RATE, (void *)(uintptr_t)reqSampleRate); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); + /* + * Select the appropriate output buffer for the track. + * + * Tracks with effects go into their own effects chain buffer + * and from there into either mEffectBuffer or mSinkBuffer. + * + * Other tracks can use mMixerBuffer for higher precision + * channel accumulation. If this buffer is enabled + * (mMixerBufferEnabled true), then selected tracks will accumulate + * into it. + * + */ + if (mMixerBufferEnabled + && (track->mainBuffer() == mSinkBuffer + || track->mainBuffer() == mMixerBuffer)) { + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer); + // TODO: override track->mainBuffer()? + mMixerBufferValid = true; + } else { + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_16_BIT); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); + } mAudioMixer->setParameter( name, AudioMixer::TRACK, @@ -3294,13 +3668,34 @@ track_is_ready: ; // remove all the tracks that need to be... removeTracks_l(*tracksToRemove); - // mix buffer must be cleared if all tracks are connected to an - // effect chain as in this case the mixer will not write to - // mix buffer and track effects will accumulate into it + if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) { + mEffectBufferValid = true; + } + + // sink or mix buffer must be cleared if all tracks are connected to an + // effect chain as in this case the mixer will not write to the sink or mix buffer + // and track effects will accumulate into it if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || (mixedTracks == 0 && fastTracks > 0))) { // FIXME as a performance optimization, should remember previous zero status - memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); + if (mMixerBufferValid) { + memset(mMixerBuffer, 0, mMixerBufferSize); + // TODO: In testing, mSinkBuffer below need not be cleared because + // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer + // after mixing. + // + // To enforce this guarantee: + // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || + // (mixedTracks == 0 && fastTracks > 0)) + // must imply MIXER_TRACKS_READY. + // Later, we may clear buffers regardless, and skip much of this logic. + } + // TODO - either mEffectBuffer or mSinkBuffer needs to be cleared. + if (mEffectBufferValid) { + memset(mEffectBuffer, 0, mEffectBufferSize); + } + // FIXME as a performance optimization, should remember previous zero status + memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize); } // if any fast tracks, then status is ready @@ -3312,9 +3707,10 @@ track_is_ready: ; } // getTrackName_l() must be called with ThreadBase::mLock held -int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, int sessionId) +int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId) { - return mAudioMixer->getTrackName(channelMask, sessionId); + return mAudioMixer->getTrackName(channelMask, format, sessionId); } // deleteTrackName_l() must be called with ThreadBase::mLock held @@ -3324,130 +3720,122 @@ void AudioFlinger::MixerThread::deleteTrackName_l(int name) mAudioMixer->deleteTrackName(name); } -// checkForNewParameters_l() must be called with ThreadBase::mLock held -bool AudioFlinger::MixerThread::checkForNewParameters_l() +// checkForNewParameter_l() must be called with ThreadBase::mLock held +bool AudioFlinger::MixerThread::checkForNewParameter_l(const String8& keyValuePair, + status_t& status) { - // if !&IDLE, holds the FastMixer state to restore after new parameters processed - FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE; bool reconfig = false; - while (!mNewParameters.isEmpty()) { + status = NO_ERROR; - if (mFastMixer != NULL) { - FastMixerStateQueue *sq = mFastMixer->sq(); - FastMixerState *state = sq->begin(); - if (!(state->mCommand & FastMixerState::IDLE)) { - previousCommand = state->mCommand; - state->mCommand = FastMixerState::HOT_IDLE; - sq->end(); - sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); - } else { - sq->end(false /*didModify*/); - } + // if !&IDLE, holds the FastMixer state to restore after new parameters processed + FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE; + if (mFastMixer != 0) { + FastMixerStateQueue *sq = mFastMixer->sq(); + FastMixerState *state = sq->begin(); + if (!(state->mCommand & FastMixerState::IDLE)) { + previousCommand = state->mCommand; + state->mCommand = FastMixerState::HOT_IDLE; + sq->end(); + sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED); + } else { + sq->end(false /*didModify*/); } + } - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - - if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { + AudioParameter param = AudioParameter(keyValuePair); + int value; + if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { + if (!isValidPcmSinkFormat((audio_format_t) value)) { + status = BAD_VALUE; + } else { + // no need to save value, since it's constant reconfig = true; } - if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { - if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { - status = BAD_VALUE; - } else { - reconfig = true; - } - } - if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) { - status = BAD_VALUE; - } else { - reconfig = true; - } + } + if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { + if (!isValidPcmSinkChannelMask((audio_channel_mask_t) value)) { + status = BAD_VALUE; + } else { + // no need to save value, since it's constant + reconfig = true; } - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be guaranteed - // if frame count is changed after track creation - if (!mTracks.isEmpty()) { - status = INVALID_OPERATION; - } else { - reconfig = true; - } + } + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be guaranteed + // if frame count is changed after track creation + if (!mTracks.isEmpty()) { + status = INVALID_OPERATION; + } else { + reconfig = true; } - if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { + } + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { #ifdef ADD_BATTERY_DATA - // when changing the audio output device, call addBatteryData to notify - // the change - if (mOutDevice != value) { - uint32_t params = 0; - // check whether speaker is on - if (value & AUDIO_DEVICE_OUT_SPEAKER) { - params |= IMediaPlayerService::kBatteryDataSpeakerOn; - } + // when changing the audio output device, call addBatteryData to notify + // the change + if (mOutDevice != value) { + uint32_t params = 0; + // check whether speaker is on + if (value & AUDIO_DEVICE_OUT_SPEAKER) { + params |= IMediaPlayerService::kBatteryDataSpeakerOn; + } - audio_devices_t deviceWithoutSpeaker - = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER; - // check if any other device (except speaker) is on - if (value & deviceWithoutSpeaker ) { - params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn; - } + audio_devices_t deviceWithoutSpeaker + = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER; + // check if any other device (except speaker) is on + if (value & deviceWithoutSpeaker ) { + params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn; + } - if (params != 0) { - addBatteryData(params); - } + if (params != 0) { + addBatteryData(params); } + } #endif - // forward device change to effects that have requested to be - // aware of attached audio device. - if (value != AUDIO_DEVICE_NONE) { - mOutDevice = value; - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setDevice_l(mOutDevice); - } + // forward device change to effects that have requested to be + // aware of attached audio device. + if (value != AUDIO_DEVICE_NONE) { + mOutDevice = value; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mOutDevice); } } + } - if (status == NO_ERROR) { + if (status == NO_ERROR) { + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + if (!mStandby && status == INVALID_OPERATION) { + mOutput->stream->common.standby(&mOutput->stream->common); + mStandby = true; + mBytesWritten = 0; status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); - mStandby = true; - mBytesWritten = 0; - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - } - if (status == NO_ERROR && reconfig) { - readOutputParameters(); - delete mAudioMixer; - mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); - for (size_t i = 0; i < mTracks.size() ; i++) { - int name = getTrackName_l(mTracks[i]->mChannelMask, mTracks[i]->mSessionId); - if (name < 0) { - break; - } - mTracks[i]->mName = name; + keyValuePair.string()); + } + if (status == NO_ERROR && reconfig) { + readOutputParameters_l(); + delete mAudioMixer; + mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); + for (size_t i = 0; i < mTracks.size() ; i++) { + int name = getTrackName_l(mTracks[i]->mChannelMask, + mTracks[i]->mFormat, mTracks[i]->mSessionId); + if (name < 0) { + break; } - sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); + mTracks[i]->mName = name; } + sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); } if (!(previousCommand & FastMixerState::IDLE)) { - ALOG_ASSERT(mFastMixer != NULL); + ALOG_ASSERT(mFastMixer != 0); FastMixerStateQueue *sq = mFastMixer->sq(); FastMixerState *state = sq->begin(); ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE); @@ -3468,9 +3856,7 @@ void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& ar PlaybackThread::dumpInternals(fd, args); - snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames()); - result.append(buffer); - write(fd, result.string(), result.size()); + dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames()); // Make a non-atomic copy of fast mixer dump state so it won't change underneath us const FastMixerDumpState copy(mFastMixerDumpState); @@ -3551,13 +3937,17 @@ void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTr float typeVolume = mStreamTypes[track->streamType()].volume; float v = mMasterVolume * typeVolume; AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy; - uint32_t vlr = proxy->getVolumeLR(); - float v_clamped = v * (vlr & 0xFFFF); - if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN; - left = v_clamped/MAX_GAIN; - v_clamped = v * (vlr >> 16); - if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN; - right = v_clamped/MAX_GAIN; + gain_minifloat_packed_t vlr = proxy->getVolumeLR(); + left = float_from_gain(gain_minifloat_unpack_left(vlr)); + if (left > GAIN_FLOAT_UNITY) { + left = GAIN_FLOAT_UNITY; + } + left *= v; + right = float_from_gain(gain_minifloat_unpack_right(vlr)); + if (right > GAIN_FLOAT_UNITY) { + right = GAIN_FLOAT_UNITY; + } + right *= v; } if (lastTrack) { @@ -3612,14 +4002,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep // The first time a track is added we wait // for all its buffers to be filled before processing it uint32_t minFrames; - if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) { + if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()) { minFrames = mNormalFrameCount; } else { minFrames = 1; } - if ((track->framesReady() >= minFrames) && track->isReady() && - !track->isPaused() && !track->isTerminated()) + if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() && + !track->isStopping_2() && !track->isStopped()) { ALOGVV("track %d s=%08x [OK]", track->name(), cblk->mServer); @@ -3646,18 +4036,30 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep if (!mEffectChains.isEmpty() && last) { mEffectChains[0]->clearInputBuffer(); } - - ALOGVV("track %d s=%08x [NOT READY]", track->name(), cblk->mServer); - if ((track->sharedBuffer() != 0) || track->isTerminated() || - track->isStopped() || track->isPaused()) { + if (track->isStopping_1()) { + track->mState = TrackBase::STOPPING_2; + } + if ((track->sharedBuffer() != 0) || track->isStopped() || + track->isStopping_2() || track->isPaused()) { // We have consumed all the buffers of this track. // Remove it from the list of active tracks. - // TODO: implement behavior for compressed audio - size_t audioHALFrames = (latency_l() * mSampleRate) / 1000; + size_t audioHALFrames; + if (audio_is_linear_pcm(mFormat)) { + audioHALFrames = (latency_l() * mSampleRate) / 1000; + } else { + audioHALFrames = 0; + } + size_t framesWritten = mBytesWritten / mFrameSize; if (mStandby || !last || track->presentationComplete(framesWritten, audioHALFrames)) { + if (track->isStopping_2()) { + track->mState = TrackBase::STOPPED; + } if (track->isStopped()) { + if (track->mState == TrackBase::FLUSHED) { + flushHw_l(); + } track->reset(); } tracksToRemove->add(track); @@ -3688,7 +4090,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep void AudioFlinger::DirectOutputThread::threadLoop_mix() { size_t frameCount = mFrameCount; - int8_t *curBuf = (int8_t *)mMixBuffer; + int8_t *curBuf = (int8_t *)mSinkBuffer; // output audio to hardware while (frameCount) { AudioBufferProvider::Buffer buffer; @@ -3703,7 +4105,7 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix() curBuf += buffer.frameCount * mFrameSize; mActiveTrack->releaseBuffer(&buffer); } - mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer; + mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer; sleepTime = 0; standbyTime = systemTime() + standbyDelay; mActiveTrack.clear(); @@ -3718,68 +4120,69 @@ void AudioFlinger::DirectOutputThread::threadLoop_sleepTime() sleepTime = idleSleepTime; } } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) { - memset(mMixBuffer, 0, mFrameCount * mFrameSize); + memset(mSinkBuffer, 0, mFrameCount * mFrameSize); sleepTime = 0; } } // getTrackName_l() must be called with ThreadBase::mLock held -int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask, - int sessionId) +int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused, + audio_format_t format __unused, int sessionId __unused) { return 0; } // deleteTrackName_l() must be called with ThreadBase::mLock held -void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name) +void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name __unused) { } -// checkForNewParameters_l() must be called with ThreadBase::mLock held -bool AudioFlinger::DirectOutputThread::checkForNewParameters_l() +// checkForNewParameter_l() must be called with ThreadBase::mLock held +bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& keyValuePair, + status_t& status) { bool reconfig = false; - while (!mNewParameters.isEmpty()) { - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be garantied - // if frame count is changed after track creation - if (!mTracks.isEmpty()) { - status = INVALID_OPERATION; - } else { - reconfig = true; + status = NO_ERROR; + + AudioParameter param = AudioParameter(keyValuePair); + int value; + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { + // forward device change to effects that have requested to be + // aware of attached audio device. + if (value != AUDIO_DEVICE_NONE) { + mOutDevice = value; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mOutDevice); } } - if (status == NO_ERROR) { + } + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be garantied + // if frame count is changed after track creation + if (!mTracks.isEmpty()) { + status = INVALID_OPERATION; + } else { + reconfig = true; + } + } + if (status == NO_ERROR) { + status = mOutput->stream->common.set_parameters(&mOutput->stream->common, + keyValuePair.string()); + if (!mStandby && status == INVALID_OPERATION) { + mOutput->stream->common.standby(&mOutput->stream->common); + mStandby = true; + mBytesWritten = 0; status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - if (!mStandby && status == INVALID_OPERATION) { - mOutput->stream->common.standby(&mOutput->stream->common); - mStandby = true; - mBytesWritten = 0; - status = mOutput->stream->common.set_parameters(&mOutput->stream->common, - keyValuePair.string()); - } - if (status == NO_ERROR && reconfig) { - readOutputParameters(); - sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); - } + keyValuePair.string()); + } + if (status == NO_ERROR && reconfig) { + readOutputParameters_l(); + sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); } + return reconfig; } @@ -3829,6 +4232,12 @@ void AudioFlinger::DirectOutputThread::cacheParameters_l() } } +void AudioFlinger::DirectOutputThread::flushHw_l() +{ + if (mOutput->stream->flush != NULL) + mOutput->stream->flush(mOutput->stream); +} + // ---------------------------------------------------------------------------- AudioFlinger::AsyncCallbackThread::AsyncCallbackThread( @@ -3987,6 +4396,17 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr sp<Track> l = mLatestActiveTrack.promote(); bool last = l.get() == track; + if (track->isInvalid()) { + ALOGW("An invalidated track shouldn't be in active list"); + tracksToRemove->add(track); + continue; + } + + if (track->mState == TrackBase::IDLE) { + ALOGW("An idle track shouldn't be in active list"); + continue; + } + if (track->isPausing()) { track->setPaused(); if (last) { @@ -4005,32 +4425,39 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr mBytesRemaining = 0; // stop writing } tracksToRemove->add(track); - } else if (track->framesReady() && track->isReady() && + } else if (track->isFlushPending()) { + track->flushAck(); + if (last) { + mFlushPending = true; + } + } else if (track->isResumePending()){ + track->resumeAck(); + if (last) { + if (mPausedBytesRemaining) { + // Need to continue write that was interrupted + mCurrentWriteLength = mPausedWriteLength; + mBytesRemaining = mPausedBytesRemaining; + mPausedBytesRemaining = 0; + } + if (mHwPaused) { + doHwResume = true; + mHwPaused = false; + // threadLoop_mix() will handle the case that we need to + // resume an interrupted write + } + // enable write to audio HAL + sleepTime = 0; + + // Do not handle new data in this iteration even if track->framesReady() + mixerStatus = MIXER_TRACKS_ENABLED; + } + } else if (track->framesReady() && track->isReady() && !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) { ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->mServer); if (track->mFillingUpStatus == Track::FS_FILLED) { track->mFillingUpStatus = Track::FS_ACTIVE; // make sure processVolume_l() will apply new volume even if 0 mLeftVolFloat = mRightVolFloat = -1.0; - if (track->mState == TrackBase::RESUMING) { - track->mState = TrackBase::ACTIVE; - if (last) { - if (mPausedBytesRemaining) { - // Need to continue write that was interrupted - mCurrentWriteLength = mPausedWriteLength; - mBytesRemaining = mPausedBytesRemaining; - mPausedBytesRemaining = 0; - } - if (mHwPaused) { - doHwResume = true; - mHwPaused = false; - // threadLoop_mix() will handle the case that we need to - // resume an interrupted write - } - // enable write to audio HAL - sleepTime = 0; - } - } } if (last) { @@ -4054,7 +4481,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr // seek when resuming. if (previousTrack->sessionId() != track->sessionId()) { previousTrack->invalidate(); - mFlushPending = true; } } } @@ -4100,7 +4526,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr size_t audioHALFrames = (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000; size_t framesWritten = - mBytesWritten / audio_stream_frame_size(&mOutput->stream->common); + mBytesWritten / audio_stream_out_frame_size(mOutput->stream); track->presentationComplete(framesWritten, audioHALFrames); track->reset(); tracksToRemove->add(track); @@ -4130,9 +4556,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr // if resume is received before pause is executed. if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) { mOutput->stream->pause(mOutput->stream); - if (!doHwPause) { - doHwResume = true; - } } if (mFlushPending) { flushHw_l(); @@ -4148,11 +4571,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr return mixerStatus; } -void AudioFlinger::OffloadThread::flushOutput_l() -{ - mFlushPending = true; -} - // must be called with thread mutex locked bool AudioFlinger::OffloadThread::waitingAsyncCallback_l() { @@ -4167,15 +4585,15 @@ bool AudioFlinger::OffloadThread::waitingAsyncCallback_l() // must be called with thread mutex locked bool AudioFlinger::OffloadThread::shouldStandby_l() { - bool TrackPaused = false; + bool trackPaused = false; // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack // after a timeout and we will enter standby then. if (mTracks.size() > 0) { - TrackPaused = mTracks[mTracks.size() - 1]->isPaused(); + trackPaused = mTracks[mTracks.size() - 1]->isPaused(); } - return !mStandby && !TrackPaused; + return !mStandby && !trackPaused; } @@ -4187,12 +4605,14 @@ bool AudioFlinger::OffloadThread::waitingAsyncCallback() void AudioFlinger::OffloadThread::flushHw_l() { - mOutput->stream->flush(mOutput->stream); + DirectOutputThread::flushHw_l(); // Flush anything still waiting in the mixbuffer mCurrentWriteLength = 0; mBytesRemaining = 0; mPausedWriteLength = 0; mPausedBytesRemaining = 0; + mHwPaused = false; + if (mUseAsyncWrite) { // discard any pending drain or write ack by incrementing sequence mWriteAckSequence = (mWriteAckSequence + 2) & ~1; @@ -4203,6 +4623,18 @@ void AudioFlinger::OffloadThread::flushHw_l() } } +void AudioFlinger::OffloadThread::onAddNewTrack_l() +{ + sp<Track> previousTrack = mPreviousTrack.promote(); + sp<Track> latestTrack = mLatestActiveTrack.promote(); + + if (previousTrack != 0 && latestTrack != 0 && + (previousTrack->sessionId() != latestTrack->sessionId())) { + mFlushPending = true; + } + PlaybackThread::onAddNewTrack_l(); +} + // ---------------------------------------------------------------------------- AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, @@ -4227,11 +4659,11 @@ void AudioFlinger::DuplicatingThread::threadLoop_mix() if (outputsReady(outputTracks)) { mAudioMixer->process(AudioBufferProvider::kInvalidPTS); } else { - memset(mMixBuffer, 0, mixBufferSize); + memset(mSinkBuffer, 0, mSinkBufferSize); } sleepTime = 0; writeFrames = mNormalFrameCount; - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; standbyTime = systemTime() + standbyDelay; } @@ -4246,7 +4678,7 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() } else if (mBytesWritten != 0) { if (mMixerStatus == MIXER_TRACKS_ENABLED) { writeFrames = mNormalFrameCount; - memset(mMixBuffer, 0, mixBufferSize); + memset(mSinkBuffer, 0, mSinkBufferSize); } else { // flush remaining overflow buffers in output tracks writeFrames = 0; @@ -4258,10 +4690,18 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() ssize_t AudioFlinger::DuplicatingThread::threadLoop_write() { for (size_t i = 0; i < outputTracks.size(); i++) { - outputTracks[i]->write(mMixBuffer, writeFrames); + // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT + // for delivery downstream as needed. This in-place conversion is safe as + // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format + // (AUDIO_FORMAT_PCM_8_BIT is not allowed here). + if (mFormat != AUDIO_FORMAT_PCM_16_BIT) { + memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT, + mSinkBuffer, mFormat, writeFrames * mChannelCount); + } + outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames); } mStandby = false; - return (ssize_t)mixBufferSize; + return (ssize_t)mSinkBufferSize; } void AudioFlinger::DuplicatingThread::threadLoop_standby() @@ -4287,10 +4727,16 @@ void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread) Mutex::Autolock _l(mLock); // FIXME explain this formula size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate(); + // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat + // due to current usage case and restrictions on the AudioBufferProvider. + // Actual buffer conversion is done in threadLoop_write(). + // + // TODO: This may change in the future, depending on multichannel + // (and non int16_t*) support on AF::PlaybackThread::OutputTrack OutputTrack *outputTrack = new OutputTrack(thread, this, mSampleRate, - mFormat, + AUDIO_FORMAT_PCM_16_BIT, mChannelMask, frameCount, IPCThreadState::self()->getCallingUid()); @@ -4372,8 +4818,6 @@ void AudioFlinger::DuplicatingThread::cacheParameters_l() AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, audio_io_handle_t id, audio_devices_t outDevice, audio_devices_t inDevice @@ -4382,27 +4826,162 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, #endif ) : ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD), - mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), - // mRsmpInIndex and mBufferSize set by readInputParameters() - mReqChannelCount(popcount(channelMask)), - mReqSampleRate(sampleRate) - // mBytesRead is only meaningful while active, and so is cleared in start() - // (but might be better to also clear here for dump?) + mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL), + // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l() + mRsmpInRear(0) #ifdef TEE_SINK , mTeeSink(teeSink) #endif + , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize, + "RecordThreadRO", MemoryHeapBase::READ_ONLY)) + // mFastCapture below + , mFastCaptureFutex(0) + // mInputSource + // mPipeSink + // mPipeSource + , mPipeFramesP2(0) + // mPipeMemory + // mFastCaptureNBLogWriter + , mFastTrackAvail(false) { snprintf(mName, kNameLength, "AudioIn_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); + + readInputParameters_l(); + + // create an NBAIO source for the HAL input stream, and negotiate + mInputSource = new AudioStreamInSource(input->stream); + size_t numCounterOffers = 0; + const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)}; + ssize_t index = mInputSource->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + + // initialize fast capture depending on configuration + bool initFastCapture; + switch (kUseFastCapture) { + case FastCapture_Never: + initFastCapture = false; + break; + case FastCapture_Always: + initFastCapture = true; + break; + case FastCapture_Static: + uint32_t primaryOutputSampleRate; + { + AutoMutex _l(audioFlinger->mHardwareLock); + primaryOutputSampleRate = audioFlinger->mPrimaryOutputSampleRate; + } + initFastCapture = + // either capture sample rate is same as (a reasonable) primary output sample rate + (((primaryOutputSampleRate == 44100 || primaryOutputSampleRate == 48000) && + (mSampleRate == primaryOutputSampleRate)) || + // or primary output sample rate is unknown, and capture sample rate is reasonable + ((primaryOutputSampleRate == 0) && + ((mSampleRate == 44100 || mSampleRate == 48000)))) && + // and the buffer size is < 12 ms + (mFrameCount * 1000) / mSampleRate < 12; + break; + // case FastCapture_Dynamic: + } + + if (initFastCapture) { + // create a Pipe for FastMixer to write to, and for us and fast tracks to read from + NBAIO_Format format = mInputSource->format(); + size_t pipeFramesP2 = roundup(mSampleRate / 25); // double-buffering of 20 ms each + size_t pipeSize = pipeFramesP2 * Format_frameSize(format); + void *pipeBuffer; + const sp<MemoryDealer> roHeap(readOnlyHeap()); + sp<IMemory> pipeMemory; + if ((roHeap == 0) || + (pipeMemory = roHeap->allocate(pipeSize)) == 0 || + (pipeBuffer = pipeMemory->pointer()) == NULL) { + ALOGE("not enough memory for pipe buffer size=%zu", pipeSize); + goto failed; + } + // pipe will be shared directly with fast clients, so clear to avoid leaking old information + memset(pipeBuffer, 0, pipeSize); + Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer); + const NBAIO_Format offers[1] = {format}; + size_t numCounterOffers = 0; + ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mPipeSink = pipe; + PipeReader *pipeReader = new PipeReader(*pipe); + numCounterOffers = 0; + index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + mPipeSource = pipeReader; + mPipeFramesP2 = pipeFramesP2; + mPipeMemory = pipeMemory; + + // create fast capture + mFastCapture = new FastCapture(); + FastCaptureStateQueue *sq = mFastCapture->sq(); +#ifdef STATE_QUEUE_DUMP + // FIXME +#endif + FastCaptureState *state = sq->begin(); + state->mCblk = NULL; + state->mInputSource = mInputSource.get(); + state->mInputSourceGen++; + state->mPipeSink = pipe; + state->mPipeSinkGen++; + state->mFrameCount = mFrameCount; + state->mCommand = FastCaptureState::COLD_IDLE; + // already done in constructor initialization list + //mFastCaptureFutex = 0; + state->mColdFutexAddr = &mFastCaptureFutex; + state->mColdGen++; + state->mDumpState = &mFastCaptureDumpState; +#ifdef TEE_SINK + // FIXME +#endif + mFastCaptureNBLogWriter = audioFlinger->newWriter_l(kFastCaptureLogSize, "FastCapture"); + state->mNBLogWriter = mFastCaptureNBLogWriter.get(); + sq->end(); + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED); + + // start the fast capture + mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO); + pid_t tid = mFastCapture->getTid(); + int err = requestPriority(getpid_cached, tid, kPriorityFastMixer); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + kPriorityFastCapture, getpid_cached, tid, err); + } - readInputParameters(); +#ifdef AUDIO_WATCHDOG + // FIXME +#endif + + mFastTrackAvail = true; + } +failed: ; + + // FIXME mNormalSource } AudioFlinger::RecordThread::~RecordThread() { + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + if (state->mCommand == FastCaptureState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastCaptureFutex); + if (old == -1) { + (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1); + } + } + state->mCommand = FastCaptureState::EXIT; + sq->end(); + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED); + mFastCapture->join(); + mFastCapture.clear(); + } + mAudioFlinger->unregisterWriter(mFastCaptureNBLogWriter); + mAudioFlinger->unregisterWriter(mNBLogWriter); delete[] mRsmpInBuffer; - delete mResampler; - delete[] mRsmpOutBuffer; } void AudioFlinger::RecordThread::onFirstRef() @@ -4410,230 +4989,482 @@ void AudioFlinger::RecordThread::onFirstRef() run(mName, PRIORITY_URGENT_AUDIO); } -status_t AudioFlinger::RecordThread::readyToRun() -{ - status_t status = initCheck(); - ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this); - return status; -} - bool AudioFlinger::RecordThread::threadLoop() { - AudioBufferProvider::Buffer buffer; - sp<RecordTrack> activeTrack; - Vector< sp<EffectChain> > effectChains; - nsecs_t lastWarning = 0; inputStandBy(); + +reacquire_wakelock: + sp<RecordTrack> activeTrack; + int activeTracksGen; { Mutex::Autolock _l(mLock); - activeTrack = mActiveTrack; - acquireWakeLock_l(activeTrack != 0 ? activeTrack->uid() : -1); + size_t size = mActiveTracks.size(); + activeTracksGen = mActiveTracksGen; + if (size > 0) { + // FIXME an arbitrary choice + activeTrack = mActiveTracks[0]; + acquireWakeLock_l(activeTrack->uid()); + if (size > 1) { + SortedVector<int> tmp; + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } + updateWakeLockUids_l(tmp); + } + } else { + acquireWakeLock_l(-1); + } } - // used to verify we've read at least once before evaluating how many bytes were read - bool readOnce = false; + // used to request a deferred sleep, to be executed later while mutex is unlocked + uint32_t sleepUs = 0; - // start recording - while (!exitPending()) { + // loop while there is work to do + for (;;) { + Vector< sp<EffectChain> > effectChains; - processConfigEvents(); + // sleep with mutex unlocked + if (sleepUs > 0) { + usleep(sleepUs); + sleepUs = 0; + } + + // activeTracks accumulates a copy of a subset of mActiveTracks + Vector< sp<RecordTrack> > activeTracks; + + // reference to the (first and only) active fast track + sp<RecordTrack> fastTrack; + + // reference to a fast track which is about to be removed + sp<RecordTrack> fastTrackToRemove; { // scope for mLock Mutex::Autolock _l(mLock); - checkForNewParameters_l(); - if (mActiveTrack != 0 && activeTrack != mActiveTrack) { - SortedVector<int> tmp; - tmp.add(mActiveTrack->uid()); - updateWakeLockUids_l(tmp); - } - activeTrack = mActiveTrack; - if (mActiveTrack == 0 && mConfigEvents.isEmpty()) { - standby(); - if (exitPending()) { - break; - } + processConfigEvents_l(); + + // check exitPending here because checkForNewParameters_l() and + // checkForNewParameters_l() can temporarily release mLock + if (exitPending()) { + break; + } + // if no active track(s), then standby and release wakelock + size_t size = mActiveTracks.size(); + if (size == 0) { + standbyIfNotAlreadyInStandby(); + // exitPending() can't become true here releaseWakeLock_l(); ALOGV("RecordThread: loop stopping"); // go to sleep mWaitWorkCV.wait(mLock); ALOGV("RecordThread: loop starting"); - acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1); - continue; + goto reacquire_wakelock; } - if (mActiveTrack != 0) { - if (mActiveTrack->isTerminated()) { - removeTrack_l(mActiveTrack); - mActiveTrack.clear(); - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - standby(); - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (mActiveTrack->mState == TrackBase::RESUMING) { - if (mReqChannelCount != mActiveTrack->channelCount()) { - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (readOnce) { - // record start succeeds only if first read from audio input - // succeeds - if (mBytesRead >= 0) { - mActiveTrack->mState = TrackBase::ACTIVE; - } else { - mActiveTrack.clear(); - } - mStartStopCond.broadcast(); + + if (mActiveTracksGen != activeTracksGen) { + activeTracksGen = mActiveTracksGen; + SortedVector<int> tmp; + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } + updateWakeLockUids_l(tmp); + } + + bool doBroadcast = false; + for (size_t i = 0; i < size; ) { + + activeTrack = mActiveTracks[i]; + if (activeTrack->isTerminated()) { + if (activeTrack->isFastTrack()) { + ALOG_ASSERT(fastTrackToRemove == 0); + fastTrackToRemove = activeTrack; } + removeTrack_l(activeTrack); + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + size--; + continue; + } + + TrackBase::track_state activeTrackState = activeTrack->mState; + switch (activeTrackState) { + + case TrackBase::PAUSING: + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + doBroadcast = true; + size--; + continue; + + case TrackBase::STARTING_1: + sleepUs = 10000; + i++; + continue; + + case TrackBase::STARTING_2: + doBroadcast = true; mStandby = false; + activeTrack->mState = TrackBase::ACTIVE; + break; + + case TrackBase::ACTIVE: + break; + + case TrackBase::IDLE: + i++; + continue; + + default: + LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState); + } + + activeTracks.add(activeTrack); + i++; + + if (activeTrack->isFastTrack()) { + ALOG_ASSERT(!mFastTrackAvail); + ALOG_ASSERT(fastTrack == 0); + fastTrack = activeTrack; } } + if (doBroadcast) { + mStartStopCond.broadcast(); + } + + // sleep if there are no active tracks to process + if (activeTracks.size() == 0) { + if (sleepUs == 0) { + sleepUs = kRecordThreadSleepUs; + } + continue; + } + sleepUs = 0; lockEffectChains_l(effectChains); } - if (mActiveTrack != 0) { - if (mActiveTrack->mState != TrackBase::ACTIVE && - mActiveTrack->mState != TrackBase::RESUMING) { - unlockEffectChains(effectChains); - usleep(kRecordThreadSleepUs); - continue; + // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0 + + size_t size = effectChains.size(); + for (size_t i = 0; i < size; i++) { + // thread mutex is not locked, but effect chain is locked + effectChains[i]->process_l(); + } + + // Push a new fast capture state if fast capture is not already running, or cblk change + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + bool didModify = false; + FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED; + if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME && + (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) { + if (state->mCommand == FastCaptureState::COLD_IDLE) { + int32_t old = android_atomic_inc(&mFastCaptureFutex); + if (old == -1) { + (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1); + } + } + state->mCommand = FastCaptureState::READ_WRITE; +#if 0 // FIXME + mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ? + FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN); +#endif + didModify = true; } - for (size_t i = 0; i < effectChains.size(); i ++) { - effectChains[i]->process_l(); + audio_track_cblk_t *cblkOld = state->mCblk; + audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL; + if (cblkNew != cblkOld) { + state->mCblk = cblkNew; + // block until acked if removing a fast track + if (cblkOld != NULL) { + block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED; + } + didModify = true; + } + sq->end(didModify); + if (didModify) { + sq->push(block); +#if 0 + if (kUseFastCapture == FastCapture_Dynamic) { + mNormalSource = mPipeSource; + } +#endif } + } - buffer.frameCount = mFrameCount; - status_t status = mActiveTrack->getNextBuffer(&buffer); - if (status == NO_ERROR) { - readOnce = true; - size_t framesOut = buffer.frameCount; - if (mResampler == NULL) { + // now run the fast track destructor with thread mutex unlocked + fastTrackToRemove.clear(); + + // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one. + // Only the client(s) that are too slow will overrun. But if even the fastest client is too + // slow, then this RecordThread will overrun by not calling HAL read often enough. + // If destination is non-contiguous, first read past the nominal end of buffer, then + // copy to the right place. Permitted because mRsmpInBuffer was over-allocated. + + int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1); + ssize_t framesRead; + + // If an NBAIO source is present, use it to read the normal capture's data + if (mPipeSource != 0) { + size_t framesToRead = mBufferSize / mFrameSize; + framesRead = mPipeSource->read(&mRsmpInBuffer[rear * mChannelCount], + framesToRead, AudioBufferProvider::kInvalidPTS); + if (framesRead == 0) { + // since pipe is non-blocking, simulate blocking input + sleepUs = (framesToRead * 1000000LL) / mSampleRate; + } + // otherwise use the HAL / AudioStreamIn directly + } else { + ssize_t bytesRead = mInput->stream->read(mInput->stream, + &mRsmpInBuffer[rear * mChannelCount], mBufferSize); + if (bytesRead < 0) { + framesRead = bytesRead; + } else { + framesRead = bytesRead / mFrameSize; + } + } + + if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) { + ALOGE("read failed: framesRead=%d", framesRead); + // Force input into standby so that it tries to recover at next read attempt + inputStandBy(); + sleepUs = kRecordThreadSleepUs; + } + if (framesRead <= 0) { + goto unlock; + } + ALOG_ASSERT(framesRead > 0); + + if (mTeeSink != 0) { + (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead); + } + // If destination is non-contiguous, we now correct for reading past end of buffer. + { + size_t part1 = mRsmpInFramesP2 - rear; + if ((size_t) framesRead > part1) { + memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount], + (framesRead - part1) * mFrameSize); + } + } + rear = mRsmpInRear += framesRead; + + size = activeTracks.size(); + // loop over each active track + for (size_t i = 0; i < size; i++) { + activeTrack = activeTracks[i]; + + // skip fast tracks, as those are handled directly by FastCapture + if (activeTrack->isFastTrack()) { + continue; + } + + enum { + OVERRUN_UNKNOWN, + OVERRUN_TRUE, + OVERRUN_FALSE + } overrun = OVERRUN_UNKNOWN; + + // loop over getNextBuffer to handle circular sink + for (;;) { + + activeTrack->mSink.frameCount = ~0; + status_t status = activeTrack->getNextBuffer(&activeTrack->mSink); + size_t framesOut = activeTrack->mSink.frameCount; + LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0)); + + int32_t front = activeTrack->mRsmpInFront; + ssize_t filled = rear - front; + size_t framesIn; + + if (filled < 0) { + // should not happen, but treat like a massive overrun and re-sync + framesIn = 0; + activeTrack->mRsmpInFront = rear; + overrun = OVERRUN_TRUE; + } else if ((size_t) filled <= mRsmpInFrames) { + framesIn = (size_t) filled; + } else { + // client is not keeping up with server, but give it latest data + framesIn = mRsmpInFrames; + activeTrack->mRsmpInFront = front = rear - framesIn; + overrun = OVERRUN_TRUE; + } + + if (framesOut == 0 || framesIn == 0) { + break; + } + + if (activeTrack->mResampler == NULL) { // no resampling - while (framesOut) { - size_t framesIn = mFrameCount - mRsmpInIndex; - if (framesIn) { - int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; - int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * - mActiveTrack->mFrameSize; - if (framesIn > framesOut) - framesIn = framesOut; - mRsmpInIndex += framesIn; - framesOut -= framesIn; - if (mChannelCount == mReqChannelCount) { - memcpy(dst, src, framesIn * mFrameSize); - } else { - if (mChannelCount == 1) { - upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } - } + if (framesIn > framesOut) { + framesIn = framesOut; + } else { + framesOut = framesIn; + } + int8_t *dst = activeTrack->mSink.i8; + while (framesIn > 0) { + front &= mRsmpInFramesP2 - 1; + size_t part1 = mRsmpInFramesP2 - front; + if (part1 > framesIn) { + part1 = framesIn; } - if (framesOut && mFrameCount == mRsmpInIndex) { - void *readInto; - if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) { - readInto = buffer.raw; - framesOut = 0; - } else { - readInto = mRsmpInBuffer; - mRsmpInIndex = 0; - } - mBytesRead = mInput->stream->read(mInput->stream, readInto, - mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) - { - ALOGE("Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - mRsmpInIndex = mFrameCount; - framesOut = 0; - buffer.frameCount = 0; - } -#ifdef TEE_SINK - else if (mTeeSink != 0) { - (void) mTeeSink->write(readInto, - mBytesRead >> Format_frameBitShift(mTeeSink->format())); - } -#endif + int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize); + if (mChannelCount == activeTrack->mChannelCount) { + memcpy(dst, src, part1 * mFrameSize); + } else if (mChannelCount == 1) { + upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src, + part1); + } else { + downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src, + part1); } + dst += part1 * activeTrack->mFrameSize; + front += part1; + framesIn -= part1; } + activeTrack->mRsmpInFront += framesOut; + } else { // resampling + // FIXME framesInNeeded should really be part of resampler API, and should + // depend on the SRC ratio + // to keep mRsmpInBuffer full so resampler always has sufficient input + size_t framesInNeeded; + // FIXME only re-calculate when it changes, and optimize for common ratios + // Do not precompute in/out because floating point is not associative + // e.g. a*b/c != a*(b/c). + const double in(mSampleRate); + const double out(activeTrack->mSampleRate); + framesInNeeded = ceil(framesOut * in / out) + 1; + ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g", + framesInNeeded, framesOut, in / out); + // Although we theoretically have framesIn in circular buffer, some of those are + // unreleased frames, and thus must be discounted for purpose of budgeting. + size_t unreleased = activeTrack->mRsmpInUnrel; + framesIn = framesIn > unreleased ? framesIn - unreleased : 0; + if (framesIn < framesInNeeded) { + ALOGV("not enough to resample: have %u frames in but need %u in to " + "produce %u out given in/out ratio of %.4g", + framesIn, framesInNeeded, framesOut, in / out); + size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * out / in) : 0; + LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut); + if (newFramesOut == 0) { + break; + } + framesInNeeded = ceil(newFramesOut * in / out) + 1; + ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g", + framesInNeeded, newFramesOut, out / in); + LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded); + ALOGV("success 2: have %u frames in and need %u in to produce %u out " + "given in/out ratio of %.4g", + framesIn, framesInNeeded, newFramesOut, in / out); + framesOut = newFramesOut; + } else { + ALOGV("success 1: have %u in and need %u in to produce %u out " + "given in/out ratio of %.4g", + framesIn, framesInNeeded, framesOut, in / out); + } - // resampler accumulates, but we only have one source track - memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); - // alter output frame count as if we were expecting stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - framesOut >>= 1; + // reallocate mRsmpOutBuffer as needed; we will grow but never shrink + if (activeTrack->mRsmpOutFrameCount < framesOut) { + // FIXME why does each track need it's own mRsmpOutBuffer? can't they share? + delete[] activeTrack->mRsmpOutBuffer; + // resampler always outputs stereo + activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2]; + activeTrack->mRsmpOutFrameCount = framesOut; } - mResampler->resample(mRsmpOutBuffer, framesOut, - this /* AudioBufferProvider* */); + + // resampler accumulates, but we only have one source track + memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); + activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut, + // FIXME how about having activeTrack implement this interface itself? + activeTrack->mResamplerBufferProvider + /*this*/ /* AudioBufferProvider* */); // ditherAndClamp() works as long as all buffers returned by - // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true. - if (mChannelCount == 2 && mReqChannelCount == 1) { - // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t - ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); + // activeTrack->getNextBuffer() are 32 bit aligned which should be always true. + if (activeTrack->mChannelCount == 1) { + // temporarily type pun mRsmpOutBuffer from Q4.27 to int16_t + ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer, + framesOut); // the resampler always outputs stereo samples: // do post stereo to mono conversion - downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, - framesOut); + downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16, + (const int16_t *)activeTrack->mRsmpOutBuffer, framesOut); } else { - ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); + ditherAndClamp((int32_t *)activeTrack->mSink.raw, + activeTrack->mRsmpOutBuffer, framesOut); } // now done with mRsmpOutBuffer } - if (mFramestoDrop == 0) { - mActiveTrack->releaseBuffer(&buffer); + + if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) { + overrun = OVERRUN_FALSE; + } + + if (activeTrack->mFramesToDrop == 0) { + if (framesOut > 0) { + activeTrack->mSink.frameCount = framesOut; + activeTrack->releaseBuffer(&activeTrack->mSink); + } } else { - if (mFramestoDrop > 0) { - mFramestoDrop -= buffer.frameCount; - if (mFramestoDrop <= 0) { - clearSyncStartEvent(); + // FIXME could do a partial drop of framesOut + if (activeTrack->mFramesToDrop > 0) { + activeTrack->mFramesToDrop -= framesOut; + if (activeTrack->mFramesToDrop <= 0) { + activeTrack->clearSyncStartEvent(); } } else { - mFramestoDrop += buffer.frameCount; - if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || - mSyncStartEvent->isCancelled()) { + activeTrack->mFramesToDrop += framesOut; + if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 || + activeTrack->mSyncStartEvent->isCancelled()) { ALOGW("Synced record %s, session %d, trigger session %d", - (mFramestoDrop >= 0) ? "timed out" : "cancelled", - mActiveTrack->sessionId(), - (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); - clearSyncStartEvent(); + (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled", + activeTrack->sessionId(), + (activeTrack->mSyncStartEvent != 0) ? + activeTrack->mSyncStartEvent->triggerSession() : 0); + activeTrack->clearSyncStartEvent(); } } } - mActiveTrack->clearOverflow(); + + if (framesOut == 0) { + break; + } } - // client isn't retrieving buffers fast enough - else { - if (!mActiveTrack->setOverflow()) { + + switch (overrun) { + case OVERRUN_TRUE: + // client isn't retrieving buffers fast enough + if (!activeTrack->setOverflow()) { nsecs_t now = systemTime(); + // FIXME should lastWarning per track? if ((now - lastWarning) > kWarningThrottleNs) { ALOGW("RecordThread: buffer overflow"); lastWarning = now; } } - // Release the processor for a while before asking for a new buffer. - // This will give the application more chance to read from the buffer and - // clear the overflow. - usleep(kRecordThreadSleepUs); + break; + case OVERRUN_FALSE: + activeTrack->clearOverflow(); + break; + case OVERRUN_UNKNOWN: + break; } + } + +unlock: // enable changes in effect chain unlockEffectChains(effectChains); - effectChains.clear(); + // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end } - standby(); + standbyIfNotAlreadyInStandby(); { Mutex::Autolock _l(mLock); @@ -4641,7 +5472,8 @@ bool AudioFlinger::RecordThread::threadLoop() sp<RecordTrack> track = mTracks[i]; track->invalidate(); } - mActiveTrack.clear(); + mActiveTracks.clear(); + mActiveTracksGen++; mStartStopCond.broadcast(); } @@ -4651,7 +5483,7 @@ bool AudioFlinger::RecordThread::threadLoop() return false; } -void AudioFlinger::RecordThread::standby() +void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby() { if (!mStandby) { inputStandBy(); @@ -4661,91 +5493,130 @@ void AudioFlinger::RecordThread::standby() void AudioFlinger::RecordThread::inputStandBy() { + // Idle the fast capture if it's currently running + if (mFastCapture != 0) { + FastCaptureStateQueue *sq = mFastCapture->sq(); + FastCaptureState *state = sq->begin(); + if (!(state->mCommand & FastCaptureState::IDLE)) { + state->mCommand = FastCaptureState::COLD_IDLE; + state->mColdFutexAddr = &mFastCaptureFutex; + state->mColdGen++; + mFastCaptureFutex = 0; + sq->end(); + // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now + sq->push(FastCaptureStateQueue::BLOCK_UNTIL_ACKED); +#if 0 + if (kUseFastCapture == FastCapture_Dynamic) { + // FIXME + } +#endif +#ifdef AUDIO_WATCHDOG + // FIXME +#endif + } else { + sq->end(false /*didModify*/); + } + } mInput->stream->common.standby(&mInput->stream->common); } -sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( +// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held +sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, + size_t *notificationFrames, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, status_t *status) { + size_t frameCount = *pFrameCount; sp<RecordTrack> track; status_t lStatus; - lStatus = initCheck(); - if (lStatus != NO_ERROR) { - ALOGE("createRecordTrack_l() audio driver not initialized"); - goto Exit; - } // client expresses a preference for FAST, but we get the final say if (*flags & IAudioFlinger::TRACK_FAST) { if ( - // use case: callback handler and frame count is default or at least as large as HAL - ( - (tid != -1) && - ((frameCount == 0) || - (frameCount >= mFrameCount)) - ) && - // FIXME when record supports non-PCM data, also check for audio_is_linear_pcm(format) - // mono or stereo - ( (channelMask == AUDIO_CHANNEL_OUT_MONO) || - (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) && - // hardware sample rate + // use case: callback handler + (tid != -1) && + // frame count is not specified, or is exactly the pipe depth + ((frameCount == 0) || (frameCount == mPipeFramesP2)) && + // PCM data + audio_is_linear_pcm(format) && + // native format + (format == mFormat) && + // native channel mask + (channelMask == mChannelMask) && + // native hardware sample rate (sampleRate == mSampleRate) && - // record thread has an associated fast recorder - hasFastRecorder() - // FIXME test that RecordThread for this fast track has a capable output HAL - // FIXME add a permission test also? + // record thread has an associated fast capture + hasFastCapture() && + // there are sufficient fast track slots available + mFastTrackAvail ) { - // if frameCount not specified, then it defaults to fast recorder (HAL) frame count - if (frameCount == 0) { - frameCount = mFrameCount * kFastTrackMultiplier; - } - ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", + ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u", frameCount, mFrameCount); } else { - ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d " - "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " - "hasFastRecorder=%d tid=%d", - frameCount, mFrameCount, format, - audio_is_linear_pcm(format), - channelMask, sampleRate, mSampleRate, hasFastRecorder(), tid); + ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u " + "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " + "hasFastCapture=%d tid=%d mFastTrackAvail=%d", + frameCount, mFrameCount, mPipeFramesP2, + format, audio_is_linear_pcm(format), channelMask, sampleRate, mSampleRate, + hasFastCapture(), tid, mFastTrackAvail); *flags &= ~IAudioFlinger::TRACK_FAST; - // For compatibility with AudioRecord calculation, buffer depth is forced - // to be at least 2 x the record thread frame count and cover audio hardware latency. - // This is probably too conservative, but legacy application code may depend on it. - // If you change this calculation, also review the start threshold which is related. - uint32_t latencyMs = 50; // FIXME mInput->stream->get_latency(mInput->stream); - size_t mNormalFrameCount = 2048; // FIXME - uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate); - if (minBufCount < 2) { - minBufCount = 2; - } - size_t minFrameCount = mNormalFrameCount * minBufCount; - if (frameCount < minFrameCount) { - frameCount = minFrameCount; - } } } - // FIXME use flags and tid similar to createTrack_l() + // compute track buffer size in frames, and suggest the notification frame count + if (*flags & IAudioFlinger::TRACK_FAST) { + // fast track: frame count is exactly the pipe depth + frameCount = mPipeFramesP2; + // ignore requested notificationFrames, and always notify exactly once every HAL buffer + *notificationFrames = mFrameCount; + } else { + // not fast track: max notification period is resampled equivalent of one HAL buffer time + // or 20 ms if there is a fast capture + // TODO This could be a roundupRatio inline, and const + size_t maxNotificationFrames = ((int64_t) (hasFastCapture() ? mSampleRate/50 : mFrameCount) + * sampleRate + mSampleRate - 1) / mSampleRate; + // minimum number of notification periods is at least kMinNotifications, + // and at least kMinMs rounded up to a whole notification period (minNotificationsByMs) + static const size_t kMinNotifications = 3; + static const uint32_t kMinMs = 30; + // TODO This could be a roundupRatio inline + const size_t minFramesByMs = (sampleRate * kMinMs + 1000 - 1) / 1000; + // TODO This could be a roundupRatio inline + const size_t minNotificationsByMs = (minFramesByMs + maxNotificationFrames - 1) / + maxNotificationFrames; + const size_t minFrameCount = maxNotificationFrames * + max(kMinNotifications, minNotificationsByMs); + frameCount = max(frameCount, minFrameCount); + if (*notificationFrames == 0 || *notificationFrames > maxNotificationFrames) { + *notificationFrames = maxNotificationFrames; + } + } + *pFrameCount = frameCount; + + lStatus = initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("createRecordTrack_l() audio driver not initialized"); + goto Exit; + } { // scope for mLock Mutex::Autolock _l(mLock); track = new RecordTrack(this, client, sampleRate, - format, channelMask, frameCount, sessionId, uid); + format, channelMask, frameCount, NULL, sessionId, uid, + *flags, TrackBase::TYPE_DEFAULT); - if (track->getCblk() == 0) { - ALOGE("createRecordTrack_l() no control block"); - lStatus = NO_MEMORY; + lStatus = track->initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus); // track must be cleared from the caller as the caller has the AF lock goto Exit; } @@ -4764,12 +5635,11 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp); } } + lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -4782,82 +5652,86 @@ status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrac status_t status = NO_ERROR; if (event == AudioSystem::SYNC_EVENT_NONE) { - clearSyncStartEvent(); + recordTrack->clearSyncStartEvent(); } else if (event != AudioSystem::SYNC_EVENT_SAME) { - mSyncStartEvent = mAudioFlinger->createSyncEvent(event, + recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event, triggerSession, recordTrack->sessionId(), syncStartEventCallback, - this); + recordTrack); // Sync event can be cancelled by the trigger session if the track is not in a // compatible state in which case we start record immediately - if (mSyncStartEvent->isCancelled()) { - clearSyncStartEvent(); + if (recordTrack->mSyncStartEvent->isCancelled()) { + recordTrack->clearSyncStartEvent(); } else { // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs - mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000); + recordTrack->mFramesToDrop = - + ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000); } } { + // This section is a rendezvous between binder thread executing start() and RecordThread AutoMutex lock(mLock); - if (mActiveTrack != 0) { - if (recordTrack != mActiveTrack.get()) { - status = -EBUSY; - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - mActiveTrack->mState = TrackBase::ACTIVE; + if (mActiveTracks.indexOf(recordTrack) >= 0) { + if (recordTrack->mState == TrackBase::PAUSING) { + ALOGV("active record track PAUSING -> ACTIVE"); + recordTrack->mState = TrackBase::ACTIVE; + } else { + ALOGV("active record track state %d", recordTrack->mState); } return status; } - recordTrack->mState = TrackBase::IDLE; - mActiveTrack = recordTrack; - mLock.unlock(); - status_t status = AudioSystem::startInput(mId); - mLock.lock(); - if (status != NO_ERROR) { - mActiveTrack.clear(); - clearSyncStartEvent(); - return status; + // TODO consider other ways of handling this, such as changing the state to :STARTING and + // adding the track to mActiveTracks after returning from AudioSystem::startInput(), + // or using a separate command thread + recordTrack->mState = TrackBase::STARTING_1; + mActiveTracks.add(recordTrack); + mActiveTracksGen++; + status_t status = NO_ERROR; + if (recordTrack->isExternalTrack()) { + mLock.unlock(); + status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId()); + mLock.lock(); + // FIXME should verify that recordTrack is still in mActiveTracks + if (status != NO_ERROR) { + mActiveTracks.remove(recordTrack); + mActiveTracksGen++; + recordTrack->clearSyncStartEvent(); + ALOGV("RecordThread::start error %d", status); + return status; + } } - mRsmpInIndex = mFrameCount; - mBytesRead = 0; - if (mResampler != NULL) { - mResampler->reset(); + // Catch up with current buffer indices if thread is already running. + // This is what makes a new client discard all buffered data. If the track's mRsmpInFront + // was initialized to some value closer to the thread's mRsmpInFront, then the track could + // see previously buffered data before it called start(), but with greater risk of overrun. + + recordTrack->mRsmpInFront = mRsmpInRear; + recordTrack->mRsmpInUnrel = 0; + // FIXME why reset? + if (recordTrack->mResampler != NULL) { + recordTrack->mResampler->reset(); } - mActiveTrack->mState = TrackBase::RESUMING; + recordTrack->mState = TrackBase::STARTING_2; // signal thread to start - ALOGV("Signal record thread"); mWaitWorkCV.broadcast(); - // do not wait for mStartStopCond if exiting - if (exitPending()) { - mActiveTrack.clear(); - status = INVALID_OPERATION; - goto startError; - } - mStartStopCond.wait(mLock); - if (mActiveTrack == 0) { + if (mActiveTracks.indexOf(recordTrack) < 0) { ALOGV("Record failed to start"); status = BAD_VALUE; goto startError; } - ALOGV("Record started OK"); return status; } startError: - AudioSystem::stopInput(mId); - clearSyncStartEvent(); - return status; -} - -void AudioFlinger::RecordThread::clearSyncStartEvent() -{ - if (mSyncStartEvent != 0) { - mSyncStartEvent->cancel(); + if (recordTrack->isExternalTrack()) { + AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId()); } - mSyncStartEvent.clear(); - mFramestoDrop = 0; + recordTrack->clearSyncStartEvent(); + // FIXME I wonder why we do not reset the state here? + return status; } void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event) @@ -4865,46 +5739,42 @@ void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& eve sp<SyncEvent> strongEvent = event.promote(); if (strongEvent != 0) { - RecordThread *me = (RecordThread *)strongEvent->cookie(); - me->handleSyncStartEvent(strongEvent); - } -} - -void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event) -{ - if (event == mSyncStartEvent) { - // TODO: use actual buffer filling status instead of 2 buffers when info is available - // from audio HAL - mFramestoDrop = mFrameCount * 2; + sp<RefBase> ptr = strongEvent->cookie().promote(); + if (ptr != 0) { + RecordTrack *recordTrack = (RecordTrack *)ptr.get(); + recordTrack->handleSyncStartEvent(strongEvent); + } } } bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) { ALOGV("RecordThread::stop"); AutoMutex _l(mLock); - if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) { + if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) { return false; } + // note that threadLoop may still be processing the track at this point [without lock] recordTrack->mState = TrackBase::PAUSING; // do not wait for mStartStopCond if exiting if (exitPending()) { return true; } + // FIXME incorrect usage of wait: no explicit predicate or loop mStartStopCond.wait(mLock); - // if we have been restarted, recordTrack == mActiveTrack.get() here - if (exitPending() || recordTrack != mActiveTrack.get()) { + // if we have been restarted, recordTrack is in mActiveTracks here + if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) { ALOGV("Record stopped OK"); return true; } return false; } -bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const +bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const { return false; } -status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event) +status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event __unused) { #if 0 // This branch is currently dead code, but is preserved in case it will be needed in future if (!isValidSyncEvent(event)) { @@ -4935,7 +5805,7 @@ void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track) track->terminate(); track->mState = TrackBase::STOPPED; // active tracks are removed by threadLoop() - if (mActiveTrack != track) { + if (mActiveTracks.indexOf(track) < 0) { removeTrack_l(track); } } @@ -4944,6 +5814,10 @@ void AudioFlinger::RecordThread::removeTrack_l(const sp<RecordTrack>& track) { mTracks.remove(track); // need anything related to effects here? + if (track->isFastTrack()) { + ALOG_ASSERT(!mFastTrackAvail); + mFastTrackAvail = true; + } } void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) @@ -4955,217 +5829,236 @@ void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args) { - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; + dprintf(fd, "\nInput thread %p:\n", this); - snprintf(buffer, SIZE, "\nInput thread %p internals\n", this); - result.append(buffer); - - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "In index: %zu\n", mRsmpInIndex); - result.append(buffer); - snprintf(buffer, SIZE, "Buffer size: %zu bytes\n", mBufferSize); - result.append(buffer); - snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL)); - result.append(buffer); - snprintf(buffer, SIZE, "Out channel count: %u\n", mReqChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Out sample rate: %u\n", mReqSampleRate); - result.append(buffer); + if (mActiveTracks.size() > 0) { + dprintf(fd, " Buffer size: %zu bytes\n", mBufferSize); } else { - result.append("No active record client\n"); + dprintf(fd, " No active record clients\n"); } - - write(fd, result.string(), result.size()); + dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no"); + dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no"); dumpBase(fd, args); } -void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args) +void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "Input thread %p tracks\n", this); - result.append(buffer); - RecordTrack::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<RecordTrack> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + size_t numtracks = mTracks.size(); + size_t numactive = mActiveTracks.size(); + size_t numactiveseen = 0; + dprintf(fd, " %d Tracks", numtracks); + if (numtracks) { + dprintf(fd, " of which %d are active\n", numactive); + RecordTrack::appendDumpHeader(result); + for (size_t i = 0; i < numtracks ; ++i) { + sp<RecordTrack> track = mTracks[i]; + if (track != 0) { + bool active = mActiveTracks.indexOf(track) >= 0; + if (active) { + numactiveseen++; + } + track->dump(buffer, SIZE, active); + result.append(buffer); + } } + } else { + dprintf(fd, "\n"); } - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this); + if (numactiveseen != numactive) { + snprintf(buffer, SIZE, " The following tracks are in the active list but" + " not in the track list\n"); result.append(buffer); RecordTrack::appendDumpHeader(result); - mActiveTrack->dump(buffer, SIZE); - result.append(buffer); + for (size_t i = 0; i < numactive; ++i) { + sp<RecordTrack> track = mActiveTracks[i]; + if (mTracks.indexOf(track) < 0) { + track->dump(buffer, SIZE, true); + result.append(buffer); + } + } } write(fd, result.string(), result.size()); } // AudioBufferProvider interface -status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - size_t framesReq = buffer->frameCount; - size_t framesReady = mFrameCount - mRsmpInIndex; - int channelCount; - - if (framesReady == 0) { - mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) { - ALOGE("RecordThread::getNextBuffer() Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; - } - mRsmpInIndex = 0; - framesReady = mFrameCount; - } - - if (framesReq > framesReady) { - framesReq = framesReady; - } - - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount; - buffer->frameCount = framesReq; +status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts __unused) +{ + RecordTrack *activeTrack = mRecordTrack; + sp<ThreadBase> threadBase = activeTrack->mThread.promote(); + if (threadBase == 0) { + buffer->frameCount = 0; + buffer->raw = NULL; + return NOT_ENOUGH_DATA; + } + RecordThread *recordThread = (RecordThread *) threadBase.get(); + int32_t rear = recordThread->mRsmpInRear; + int32_t front = activeTrack->mRsmpInFront; + ssize_t filled = rear - front; + // FIXME should not be P2 (don't want to increase latency) + // FIXME if client not keeping up, discard + LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames)); + // 'filled' may be non-contiguous, so return only the first contiguous chunk + front &= recordThread->mRsmpInFramesP2 - 1; + size_t part1 = recordThread->mRsmpInFramesP2 - front; + if (part1 > (size_t) filled) { + part1 = filled; + } + size_t ask = buffer->frameCount; + ALOG_ASSERT(ask > 0); + if (part1 > ask) { + part1 = ask; + } + if (part1 == 0) { + // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty + LOG_ALWAYS_FATAL("RecordThread::getNextBuffer() starved"); + buffer->raw = NULL; + buffer->frameCount = 0; + activeTrack->mRsmpInUnrel = 0; + return NOT_ENOUGH_DATA; + } + + buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount; + buffer->frameCount = part1; + activeTrack->mRsmpInUnrel = part1; return NO_ERROR; } // AudioBufferProvider interface -void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer) +void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer( + AudioBufferProvider::Buffer* buffer) { - mRsmpInIndex += buffer->frameCount; + RecordTrack *activeTrack = mRecordTrack; + size_t stepCount = buffer->frameCount; + if (stepCount == 0) { + return; + } + ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel); + activeTrack->mRsmpInUnrel -= stepCount; + activeTrack->mRsmpInFront += stepCount; + buffer->raw = NULL; buffer->frameCount = 0; } -bool AudioFlinger::RecordThread::checkForNewParameters_l() +bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair, + status_t& status) { bool reconfig = false; - while (!mNewParameters.isEmpty()) { - status_t status = NO_ERROR; - String8 keyValuePair = mNewParameters[0]; - AudioParameter param = AudioParameter(keyValuePair); - int value; - audio_format_t reqFormat = mFormat; - uint32_t reqSamplingRate = mReqSampleRate; - uint32_t reqChannelCount = mReqChannelCount; - - if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { - reqSamplingRate = value; + status = NO_ERROR; + + audio_format_t reqFormat = mFormat; + uint32_t samplingRate = mSampleRate; + audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(mChannelCount); + + AudioParameter param = AudioParameter(keyValuePair); + int value; + // TODO Investigate when this code runs. Check with audio policy when a sample rate and + // channel count change can be requested. Do we mandate the first client defines the + // HAL sampling rate and channel count or do we allow changes on the fly? + if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { + samplingRate = value; + reconfig = true; + } + if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { + if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { + status = BAD_VALUE; + } else { + reqFormat = (audio_format_t) value; reconfig = true; } - if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { - if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { - status = BAD_VALUE; - } else { - reqFormat = (audio_format_t) value; - reconfig = true; - } + } + if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { + audio_channel_mask_t mask = (audio_channel_mask_t) value; + if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) { + status = BAD_VALUE; + } else { + channelMask = mask; + reconfig = true; } - if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - reqChannelCount = popcount(value); + } + if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { + // do not accept frame count changes if tracks are open as the track buffer + // size depends on frame count and correct behavior would not be guaranteed + // if frame count is changed after track creation + if (mActiveTracks.size() > 0) { + status = INVALID_OPERATION; + } else { reconfig = true; } - if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { - // do not accept frame count changes if tracks are open as the track buffer - // size depends on frame count and correct behavior would not be guaranteed - // if frame count is changed after track creation - if (mActiveTrack != 0) { - status = INVALID_OPERATION; - } else { - reconfig = true; - } + } + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { + // forward device change to effects that have requested to be + // aware of attached audio device. + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(value); } - if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { - // forward device change to effects that have requested to be - // aware of attached audio device. - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setDevice_l(value); - } - // store input device and output device but do not forward output device to audio HAL. - // Note that status is ignored by the caller for output device - // (see AudioFlinger::setParameters() - if (audio_is_output_devices(value)) { - mOutDevice = value; - status = BAD_VALUE; - } else { - mInDevice = value; - // disable AEC and NS if the device is a BT SCO headset supporting those - // pre processings - if (mTracks.size() > 0) { - bool suspend = audio_is_bluetooth_sco_device(mInDevice) && - mAudioFlinger->btNrecIsOff(); - for (size_t i = 0; i < mTracks.size(); i++) { - sp<RecordTrack> track = mTracks[i]; - setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId()); - setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId()); - } + // store input device and output device but do not forward output device to audio HAL. + // Note that status is ignored by the caller for output device + // (see AudioFlinger::setParameters() + if (audio_is_output_devices(value)) { + mOutDevice = value; + status = BAD_VALUE; + } else { + mInDevice = value; + // disable AEC and NS if the device is a BT SCO headset supporting those + // pre processings + if (mTracks.size() > 0) { + bool suspend = audio_is_bluetooth_sco_device(mInDevice) && + mAudioFlinger->btNrecIsOff(); + for (size_t i = 0; i < mTracks.size(); i++) { + sp<RecordTrack> track = mTracks[i]; + setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId()); + setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId()); } } } - if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR && - mAudioSource != (audio_source_t)value) { - // forward device change to effects that have requested to be - // aware of attached audio device. - for (size_t i = 0; i < mEffectChains.size(); i++) { - mEffectChains[i]->setAudioSource_l((audio_source_t)value); - } - mAudioSource = (audio_source_t)value; + } + if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR && + mAudioSource != (audio_source_t)value) { + // forward device change to effects that have requested to be + // aware of attached audio device. + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setAudioSource_l((audio_source_t)value); } - if (status == NO_ERROR) { + mAudioSource = (audio_source_t)value; + } + + if (status == NO_ERROR) { + status = mInput->stream->common.set_parameters(&mInput->stream->common, + keyValuePair.string()); + if (status == INVALID_OPERATION) { + inputStandBy(); status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string()); - if (status == INVALID_OPERATION) { - inputStandBy(); - status = mInput->stream->common.set_parameters(&mInput->stream->common, - keyValuePair.string()); + } + if (reconfig) { + if (status == BAD_VALUE && + reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && + reqFormat == AUDIO_FORMAT_PCM_16_BIT && + (mInput->stream->common.get_sample_rate(&mInput->stream->common) + <= (2 * samplingRate)) && + audio_channel_count_from_in_mask( + mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 && + (channelMask == AUDIO_CHANNEL_IN_MONO || + channelMask == AUDIO_CHANNEL_IN_STEREO)) { + status = NO_ERROR; } - if (reconfig) { - if (status == BAD_VALUE && - reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && - reqFormat == AUDIO_FORMAT_PCM_16_BIT && - (mInput->stream->common.get_sample_rate(&mInput->stream->common) - <= (2 * reqSamplingRate)) && - popcount(mInput->stream->common.get_channels(&mInput->stream->common)) - <= FCC_2 && - (reqChannelCount <= FCC_2)) { - status = NO_ERROR; - } - if (status == NO_ERROR) { - readInputParameters(); - sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED); - } + if (status == NO_ERROR) { + readInputParameters_l(); + sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED); } } - - mNewParameters.removeAt(0); - - mParamStatus = status; - mParamCond.signal(); - // wait for condition with time out in case the thread calling ThreadBase::setParameters() - // already timed out waiting for the status and will never signal the condition. - mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs); } + return reconfig; } @@ -5182,9 +6075,9 @@ String8 AudioFlinger::RecordThread::getParameters(const String8& keys) return out_s8; } -void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { +void AudioFlinger::RecordThread::audioConfigChanged(int event, int param __unused) { AudioSystem::OutputDescriptor desc; - void *param2 = NULL; + const void *param2 = NULL; switch (event) { case AudioSystem::INPUT_OPENED: @@ -5201,56 +6094,47 @@ void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { default: break; } - mAudioFlinger->audioConfigChanged_l(event, mId, param2); + mAudioFlinger->audioConfigChanged(event, mId, param2); } -void AudioFlinger::RecordThread::readInputParameters() +void AudioFlinger::RecordThread::readInputParameters_l() { - delete[] mRsmpInBuffer; - // mRsmpInBuffer is always assigned a new[] below - delete[] mRsmpOutBuffer; - mRsmpOutBuffer = NULL; - delete mResampler; - mResampler = NULL; - mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common); mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common); - mChannelCount = popcount(mChannelMask); - mFormat = mInput->stream->common.get_format(&mInput->stream->common); + mChannelCount = audio_channel_count_from_in_mask(mChannelMask); + mHALFormat = mInput->stream->common.get_format(&mInput->stream->common); + mFormat = mHALFormat; if (mFormat != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("HAL format %d not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat); + ALOGE("HAL format %#x not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat); } - mFrameSize = audio_stream_frame_size(&mInput->stream->common); + mFrameSize = audio_stream_in_frame_size(mInput->stream); mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common); mFrameCount = mBufferSize / mFrameSize; - mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount]; + // This is the formula for calculating the temporary buffer size. + // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to + // 1 full output buffer, regardless of the alignment of the available input. + // The value is somewhat arbitrary, and could probably be even larger. + // A larger value should allow more old data to be read after a track calls start(), + // without increasing latency. + mRsmpInFrames = mFrameCount * 7; + mRsmpInFramesP2 = roundup(mRsmpInFrames); + delete[] mRsmpInBuffer; - if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) - { - int channelCount; - // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid - // stereo to mono post process as the resampler always outputs stereo. - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - mResampler = AudioResampler::create(16, channelCount, mReqSampleRate); - mResampler->setSampleRate(mSampleRate); - mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); - mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2]; + // TODO optimize audio capture buffer sizes ... + // Here we calculate the size of the sliding buffer used as a source + // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7). + // For current HAL frame counts, this is usually 2048 = 40 ms. It would + // be better to have it derived from the pipe depth in the long term. + // The current value is higher than necessary. However it should not add to latency. - // optmization: if mono to mono, alter input frame count as if we were inputing - // stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - mFrameCount >>= 1; - } + // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer + mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount]; - } - mRsmpInIndex = mFrameCount; + // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints. + // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks? } -unsigned int AudioFlinger::RecordThread::getInputFramesLost() +uint32_t AudioFlinger::RecordThread::getInputFramesLost() { Mutex::Autolock _l(mLock); if (initCheck() != NO_ERROR) { @@ -5313,15 +6197,20 @@ status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& cha { // only one chain per input thread if (mEffectChains.size() != 0) { + ALOGW("addEffectChain_l() already one chain %p on thread %p", chain.get(), this); return INVALID_OPERATION; } ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this); - + chain->setThread(this); chain->setInBuffer(NULL); chain->setOutBuffer(NULL); checkSuspendOnAddEffectChain_l(chain); + // make sure enabled pre processing effects state is communicated to the HAL as we + // just moved them to a new input stream. + chain->syncHalEffectsState(); + mEffectChains.add(chain); return NO_ERROR; @@ -5339,4 +6228,80 @@ size_t AudioFlinger::RecordThread::removeEffectChain_l(const sp<EffectChain>& ch return 0; } +status_t AudioFlinger::RecordThread::createAudioPatch_l(const struct audio_patch *patch, + audio_patch_handle_t *handle) +{ + status_t status = NO_ERROR; + if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + // store new device and send to effects + mInDevice = patch->sources[0].ext.device.type; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mInDevice); + } + + // disable AEC and NS if the device is a BT SCO headset supporting those + // pre processings + if (mTracks.size() > 0) { + bool suspend = audio_is_bluetooth_sco_device(mInDevice) && + mAudioFlinger->btNrecIsOff(); + for (size_t i = 0; i < mTracks.size(); i++) { + sp<RecordTrack> track = mTracks[i]; + setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId()); + setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId()); + } + } + + // store new source and send to effects + if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) { + mAudioSource = patch->sinks[0].ext.mix.usecase.source; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setAudioSource_l(mAudioSource); + } + } + + audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice(); + status = hwDevice->create_audio_patch(hwDevice, + patch->num_sources, + patch->sources, + patch->num_sinks, + patch->sinks, + handle); + } else { + ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL"); + } + return status; +} + +status_t AudioFlinger::RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle) +{ + status_t status = NO_ERROR; + if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) { + audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice(); + status = hwDevice->release_audio_patch(hwDevice, handle); + } else { + ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL"); + } + return status; +} + +void AudioFlinger::RecordThread::addPatchRecord(const sp<PatchRecord>& record) +{ + Mutex::Autolock _l(mLock); + mTracks.add(record); +} + +void AudioFlinger::RecordThread::deletePatchRecord(const sp<PatchRecord>& record) +{ + Mutex::Autolock _l(mLock); + destroyTrack_l(record); +} + +void AudioFlinger::RecordThread::getAudioPortConfig(struct audio_port_config *config) +{ + ThreadBase::getAudioPortConfig(config); + config->role = AUDIO_PORT_ROLE_SINK; + config->ext.mix.hw_module = mInput->audioHwDev->handle(); + config->ext.mix.usecase.source = mAudioSource; +} + }; // namespace android diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index a2fb874..bb9aa18 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -36,6 +36,8 @@ public: audio_devices_t outDevice, audio_devices_t inDevice, type_t type); virtual ~ThreadBase(); + virtual status_t readyToRun(); + void dumpBase(int fd, const Vector<String16>& args); void dumpEffectChains(int fd, const Vector<String16>& args); @@ -44,60 +46,169 @@ public: // base for record and playback enum { CFG_EVENT_IO, - CFG_EVENT_PRIO + CFG_EVENT_PRIO, + CFG_EVENT_SET_PARAMETER, + CFG_EVENT_CREATE_AUDIO_PATCH, + CFG_EVENT_RELEASE_AUDIO_PATCH, + }; + + class ConfigEventData: public RefBase { + public: + virtual ~ConfigEventData() {} + + virtual void dump(char *buffer, size_t size) = 0; + protected: + ConfigEventData() {} }; - class ConfigEvent { + // Config event sequence by client if status needed (e.g binder thread calling setParameters()): + // 1. create SetParameterConfigEvent. This sets mWaitStatus in config event + // 2. Lock mLock + // 3. Call sendConfigEvent_l(): Append to mConfigEvents and mWaitWorkCV.signal + // 4. sendConfigEvent_l() reads status from event->mStatus; + // 5. sendConfigEvent_l() returns status + // 6. Unlock + // + // Parameter sequence by server: threadLoop calling processConfigEvents_l(): + // 1. Lock mLock + // 2. If there is an entry in mConfigEvents proceed ... + // 3. Read first entry in mConfigEvents + // 4. Remove first entry from mConfigEvents + // 5. Process + // 6. Set event->mStatus + // 7. event->mCond.signal + // 8. Unlock + + class ConfigEvent: public RefBase { public: - ConfigEvent(int type) : mType(type) {} virtual ~ConfigEvent() {} - int type() const { return mType; } + void dump(char *buffer, size_t size) { mData->dump(buffer, size); } - virtual void dump(char *buffer, size_t size) = 0; + const int mType; // event type e.g. CFG_EVENT_IO + Mutex mLock; // mutex associated with mCond + Condition mCond; // condition for status return + status_t mStatus; // status communicated to sender + bool mWaitStatus; // true if sender is waiting for status + sp<ConfigEventData> mData; // event specific parameter data - private: - const int mType; + protected: + ConfigEvent(int type) : mType(type), mStatus(NO_ERROR), mWaitStatus(false), mData(NULL) {} }; - class IoConfigEvent : public ConfigEvent { + class IoConfigEventData : public ConfigEventData { public: - IoConfigEvent(int event, int param) : - ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {} - virtual ~IoConfigEvent() {} - - int event() const { return mEvent; } - int param() const { return mParam; } + IoConfigEventData(int event, int param) : + mEvent(event), mParam(param) {} virtual void dump(char *buffer, size_t size) { snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam); } - private: const int mEvent; const int mParam; }; - class PrioConfigEvent : public ConfigEvent { + class IoConfigEvent : public ConfigEvent { public: - PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) : - ConfigEvent(CFG_EVENT_PRIO), mPid(pid), mTid(tid), mPrio(prio) {} - virtual ~PrioConfigEvent() {} + IoConfigEvent(int event, int param) : + ConfigEvent(CFG_EVENT_IO) { + mData = new IoConfigEventData(event, param); + } + virtual ~IoConfigEvent() {} + }; - pid_t pid() const { return mPid; } - pid_t tid() const { return mTid; } - int32_t prio() const { return mPrio; } + class PrioConfigEventData : public ConfigEventData { + public: + PrioConfigEventData(pid_t pid, pid_t tid, int32_t prio) : + mPid(pid), mTid(tid), mPrio(prio) {} virtual void dump(char *buffer, size_t size) { snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio); } - private: const pid_t mPid; const pid_t mTid; const int32_t mPrio; }; + class PrioConfigEvent : public ConfigEvent { + public: + PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) : + ConfigEvent(CFG_EVENT_PRIO) { + mData = new PrioConfigEventData(pid, tid, prio); + } + virtual ~PrioConfigEvent() {} + }; + + class SetParameterConfigEventData : public ConfigEventData { + public: + SetParameterConfigEventData(String8 keyValuePairs) : + mKeyValuePairs(keyValuePairs) {} + + virtual void dump(char *buffer, size_t size) { + snprintf(buffer, size, "KeyValue: %s\n", mKeyValuePairs.string()); + } + + const String8 mKeyValuePairs; + }; + + class SetParameterConfigEvent : public ConfigEvent { + public: + SetParameterConfigEvent(String8 keyValuePairs) : + ConfigEvent(CFG_EVENT_SET_PARAMETER) { + mData = new SetParameterConfigEventData(keyValuePairs); + mWaitStatus = true; + } + virtual ~SetParameterConfigEvent() {} + }; + + class CreateAudioPatchConfigEventData : public ConfigEventData { + public: + CreateAudioPatchConfigEventData(const struct audio_patch patch, + audio_patch_handle_t handle) : + mPatch(patch), mHandle(handle) {} + + virtual void dump(char *buffer, size_t size) { + snprintf(buffer, size, "Patch handle: %u\n", mHandle); + } + + const struct audio_patch mPatch; + audio_patch_handle_t mHandle; + }; + + class CreateAudioPatchConfigEvent : public ConfigEvent { + public: + CreateAudioPatchConfigEvent(const struct audio_patch patch, + audio_patch_handle_t handle) : + ConfigEvent(CFG_EVENT_CREATE_AUDIO_PATCH) { + mData = new CreateAudioPatchConfigEventData(patch, handle); + mWaitStatus = true; + } + virtual ~CreateAudioPatchConfigEvent() {} + }; + + class ReleaseAudioPatchConfigEventData : public ConfigEventData { + public: + ReleaseAudioPatchConfigEventData(const audio_patch_handle_t handle) : + mHandle(handle) {} + + virtual void dump(char *buffer, size_t size) { + snprintf(buffer, size, "Patch handle: %u\n", mHandle); + } + + audio_patch_handle_t mHandle; + }; + + class ReleaseAudioPatchConfigEvent : public ConfigEvent { + public: + ReleaseAudioPatchConfigEvent(const audio_patch_handle_t handle) : + ConfigEvent(CFG_EVENT_RELEASE_AUDIO_PATCH) { + mData = new ReleaseAudioPatchConfigEventData(handle); + mWaitStatus = true; + } + virtual ~ReleaseAudioPatchConfigEvent() {} + }; class PMDeathRecipient : public IBinder::DeathRecipient { public: @@ -122,9 +233,9 @@ public: // dynamic externally-visible uint32_t sampleRate() const { return mSampleRate; } - uint32_t channelCount() const { return mChannelCount; } audio_channel_mask_t channelMask() const { return mChannelMask; } - audio_format_t format() const { return mFormat; } + audio_format_t format() const { return mHALFormat; } + uint32_t channelCount() const { return mChannelCount; } // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects, // and returns the [normal mix] buffer's frame count. virtual size_t frameCount() const = 0; @@ -133,14 +244,29 @@ public: // Should be "virtual status_t requestExitAndWait()" and override same // method in Thread, but Thread::requestExitAndWait() is not yet virtual. void exit(); - virtual bool checkForNewParameters_l() = 0; + virtual bool checkForNewParameter_l(const String8& keyValuePair, + status_t& status) = 0; virtual status_t setParameters(const String8& keyValuePairs); virtual String8 getParameters(const String8& keys) = 0; - virtual void audioConfigChanged_l(int event, int param = 0) = 0; + virtual void audioConfigChanged(int event, int param = 0) = 0; + // sendConfigEvent_l() must be called with ThreadBase::mLock held + // Can temporarily release the lock if waiting for a reply from + // processConfigEvents_l(). + status_t sendConfigEvent_l(sp<ConfigEvent>& event); void sendIoConfigEvent(int event, int param = 0); void sendIoConfigEvent_l(int event, int param = 0); void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio); - void processConfigEvents(); + status_t sendSetParameterConfigEvent_l(const String8& keyValuePair); + status_t sendCreateAudioPatchConfigEvent(const struct audio_patch *patch, + audio_patch_handle_t *handle); + status_t sendReleaseAudioPatchConfigEvent(audio_patch_handle_t handle); + void processConfigEvents_l(); + virtual void cacheParameters_l() = 0; + virtual status_t createAudioPatch_l(const struct audio_patch *patch, + audio_patch_handle_t *handle) = 0; + virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle) = 0; + virtual void getAudioPortConfig(struct audio_port_config *config) = 0; + // see note at declaration of mStandby, mOutDevice and mInDevice bool standby() const { return mStandby; } @@ -156,10 +282,7 @@ public: int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status); - void disconnectEffect(const sp< EffectModule>& effect, - EffectHandle *handle, - bool unpinIfLast); + status_t *status /*non-NULL*/); // return values for hasAudioSession (bit field) enum effect_state { @@ -198,13 +321,13 @@ public: // effect void removeEffect_l(const sp< EffectModule>& effect); // detach all tracks connected to an auxiliary effect - virtual void detachAuxEffect_l(int effectId) {} + virtual void detachAuxEffect_l(int effectId __unused) {} // returns either EFFECT_SESSION if effects on this audio session exist in one // chain, or TRACK_SESSION if tracks on this audio session exist, or both virtual uint32_t hasAudioSession(int sessionId) const = 0; // the value returned by default implementation is not important as the // strategy is only meaningful for PlaybackThread which implements this method - virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; } + virtual uint32_t getStrategyForSession_l(int sessionId __unused) { return 0; } // suspend or restore effect according to the type of effect passed. a NULL // type pointer means suspend all effects in the session @@ -223,6 +346,15 @@ public: virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0; virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0; + // Return a reference to a per-thread heap which can be used to allocate IMemory + // objects that will be read-only to client processes, read/write to mediaserver, + // and shared by all client processes of the thread. + // The heap is per-thread rather than common across all threads, because + // clients can't be trusted not to modify the offset of the IMemory they receive. + // If a thread does not have such a heap, this method returns 0. + virtual sp<MemoryDealer> readOnlyHeap() const { return 0; } + + virtual sp<IMemory> pipeMemory() const { return 0; } mutable Mutex mLock; @@ -267,48 +399,29 @@ protected: const sp<AudioFlinger> mAudioFlinger; - // updated by PlaybackThread::readOutputParameters() or - // RecordThread::readInputParameters() + // updated by PlaybackThread::readOutputParameters_l() or + // RecordThread::readInputParameters_l() uint32_t mSampleRate; size_t mFrameCount; // output HAL, direct output, record audio_channel_mask_t mChannelMask; uint32_t mChannelCount; size_t mFrameSize; - audio_format_t mFormat; - - // Parameter sequence by client: binder thread calling setParameters(): - // 1. Lock mLock - // 2. Append to mNewParameters - // 3. mWaitWorkCV.signal - // 4. mParamCond.waitRelative with timeout - // 5. read mParamStatus - // 6. mWaitWorkCV.signal - // 7. Unlock - // - // Parameter sequence by server: threadLoop calling checkForNewParameters_l(): - // 1. Lock mLock - // 2. If there is an entry in mNewParameters proceed ... - // 2. Read first entry in mNewParameters - // 3. Process - // 4. Remove first entry from mNewParameters - // 5. Set mParamStatus - // 6. mParamCond.signal - // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus) - // 8. Unlock - Condition mParamCond; - Vector<String8> mNewParameters; - status_t mParamStatus; - - // vector owns each ConfigEvent *, so must delete after removing - Vector<ConfigEvent *> mConfigEvents; + audio_format_t mFormat; // Source format for Recording and + // Sink format for Playback. + // Sink format may be different than + // HAL format if Fastmixer is used. + audio_format_t mHALFormat; + size_t mBufferSize; // HAL buffer size for read() or write() + + Vector< sp<ConfigEvent> > mConfigEvents; // These fields are written and read by thread itself without lock or barrier, - // and read by other threads without lock or barrier via standby() , outDevice() + // and read by other threads without lock or barrier via standby(), outDevice() // and inDevice(). // Because of the absence of a lock or barrier, any other thread that reads // these fields must use the information in isolation, or be prepared to deal // with possibility that it might be inconsistent with other information. - bool mStandby; // Whether thread is currently in standby. + bool mStandby; // Whether thread is currently in standby. audio_devices_t mOutDevice; // output device audio_devices_t mInDevice; // input device audio_source_t mAudioSource; // (see audio.h, audio_source_t) @@ -358,7 +471,6 @@ public: void dump(int fd, const Vector<String16>& args); // Thread virtuals - virtual status_t readyToRun(); virtual bool threadLoop(); // RefBase @@ -391,7 +503,7 @@ protected: virtual bool waitingAsyncCallback(); virtual bool waitingAsyncCallback_l(); virtual bool shouldStandby_l(); - + virtual void onAddNewTrack_l(); // ThreadBase virtuals virtual void preExit(); @@ -419,13 +531,13 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, pid_t tid, int uid, - status_t *status); + status_t *status /*non-NULL*/); AudioStreamOut* getOutput() const; AudioStreamOut* clearOutput(); @@ -445,9 +557,13 @@ public: { return android_atomic_acquire_load(&mSuspended) > 0; } virtual String8 getParameters(const String8& keys); - virtual void audioConfigChanged_l(int event, int param = 0); + virtual void audioConfigChanged(int event, int param = 0); status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames); - int16_t *mixBuffer() const { return mMixBuffer; }; + // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency. + // Consider also removing and passing an explicit mMainBuffer initialization + // parameter to AF::PlaybackThread::Track::Track(). + int16_t *mixBuffer() const { + return reinterpret_cast<int16_t *>(mSinkBuffer); }; virtual void detachAuxEffect_l(int effectId); status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, @@ -472,14 +588,76 @@ public: // Return's the HAL's frame count i.e. fast mixer buffer size. size_t frameCountHAL() const { return mFrameCount; } - status_t getTimestamp_l(AudioTimestamp& timestamp); + status_t getTimestamp_l(AudioTimestamp& timestamp); + + void addPatchTrack(const sp<PatchTrack>& track); + void deletePatchTrack(const sp<PatchTrack>& track); + + virtual void getAudioPortConfig(struct audio_port_config *config); protected: - // updated by readOutputParameters() + // updated by readOutputParameters_l() size_t mNormalFrameCount; // normal mixer and effects - int16_t* mMixBuffer; // frame size aligned mix buffer - int8_t* mAllocMixBuffer; // mixer buffer allocation address + void* mSinkBuffer; // frame size aligned sink buffer + + // TODO: + // Rearrange the buffer info into a struct/class with + // clear, copy, construction, destruction methods. + // + // mSinkBuffer also has associated with it: + // + // mSinkBufferSize: Sink Buffer Size + // mFormat: Sink Buffer Format + + // Mixer Buffer (mMixerBuffer*) + // + // In the case of floating point or multichannel data, which is not in the + // sink format, it is required to accumulate in a higher precision or greater channel count + // buffer before downmixing or data conversion to the sink buffer. + + // Set to "true" to enable the Mixer Buffer otherwise mixer output goes to sink buffer. + bool mMixerBufferEnabled; + + // Storage, 32 byte aligned (may make this alignment a requirement later). + // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames. + void* mMixerBuffer; + + // Size of mMixerBuffer in bytes: mNormalFrameCount * #channels * sampsize. + size_t mMixerBufferSize; + + // The audio format of mMixerBuffer. Set to AUDIO_FORMAT_PCM_(FLOAT|16_BIT) only. + audio_format_t mMixerBufferFormat; + + // An internal flag set to true by MixerThread::prepareTracks_l() + // when mMixerBuffer contains valid data after mixing. + bool mMixerBufferValid; + + // Effects Buffer (mEffectsBuffer*) + // + // In the case of effects data, which is not in the sink format, + // it is required to accumulate in a different buffer before data conversion + // to the sink buffer. + + // Set to "true" to enable the Effects Buffer otherwise effects output goes to sink buffer. + bool mEffectBufferEnabled; + + // Storage, 32 byte aligned (may make this alignment a requirement later). + // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames. + void* mEffectBuffer; + + // Size of mEffectsBuffer in bytes: mNormalFrameCount * #channels * sampsize. + size_t mEffectBufferSize; + + // The audio format of mEffectsBuffer. Set to AUDIO_FORMAT_PCM_16_BIT only. + audio_format_t mEffectBufferFormat; + + // An internal flag set to true by MixerThread::prepareTracks_l() + // when mEffectsBuffer contains valid data after mixing. + // + // When this is set, all mixer data is routed into the effects buffer + // for any processing (including output processing). + bool mEffectBufferValid; // suspend count, > 0 means suspended. While suspended, the thread continues to pull from // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle @@ -505,7 +683,8 @@ protected: // Allocate a track name for a given channel mask. // Returns name >= 0 if successful, -1 on failure. - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId) = 0; + virtual int getTrackName_l(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId) = 0; virtual void deleteTrackName_l(int name) = 0; // Time to sleep between cycles when: @@ -527,11 +706,14 @@ protected: virtual uint32_t correctLatency_l(uint32_t latency) const; + virtual status_t createAudioPatch_l(const struct audio_patch *patch, + audio_patch_handle_t *handle); + virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle); + private: friend class AudioFlinger; // for numerous - PlaybackThread(const Client&); PlaybackThread& operator = (const PlaybackThread&); status_t addTrack_l(const sp<Track>& track); @@ -539,7 +721,7 @@ private: void removeTrack_l(const sp<Track>& track); void broadcast_l(); - void readOutputParameters(); + void readOutputParameters_l(); virtual void dumpInternals(int fd, const Vector<String16>& args); void dumpTracks(int fd, const Vector<String16>& args); @@ -558,7 +740,7 @@ private: // FIXME rename these former local variables of threadLoop to standard "m" names nsecs_t standbyTime; - size_t mixBufferSize; + size_t mSinkBufferSize; // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l() uint32_t activeSleepTime; @@ -623,13 +805,12 @@ private: sp<NBLog::Writer> mFastMixerNBLogWriter; public: virtual bool hasFastMixer() const = 0; - virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const + virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex __unused) const { FastTrackUnderruns dummy; return dummy; } protected: // accessed by both binder threads and within threadLoop(), lock on mutex needed unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available - virtual void flushOutput_l(); private: // timestamp latch: @@ -638,8 +819,11 @@ private: struct { AudioTimestamp mTimestamp; uint32_t mUnpresentedFrames; + KeyedVector<Track *, uint32_t> mFramesReleased; } mLatchD, mLatchQ; - bool mLatchDValid; // true means mLatchD is valid, and clock it into latch at next opportunity + bool mLatchDValid; // true means mLatchD is valid + // (except for mFramesReleased which is filled in later), + // and clock it into latch at next opportunity bool mLatchQValid; // true means mLatchQ is valid }; @@ -654,12 +838,14 @@ public: // Thread virtuals - virtual bool checkForNewParameters_l(); + virtual bool checkForNewParameter_l(const String8& keyValuePair, + status_t& status); virtual void dumpInternals(int fd, const Vector<String16>& args); protected: virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); + virtual int getTrackName_l(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId); virtual void deleteTrackName_l(int name); virtual uint32_t idleSleepTimeUs() const; virtual uint32_t suspendSleepTimeUs() const; @@ -676,7 +862,7 @@ protected: AudioMixer* mAudioMixer; // normal mixer private: // one-time initialization, no locks required - FastMixer* mFastMixer; // non-NULL if there is also a fast mixer + sp<FastMixer> mFastMixer; // non-0 if there is also a fast mixer sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread // contents are not guaranteed to be consistent, no locks required @@ -692,11 +878,12 @@ private: int32_t mFastMixerFutex; // for cold idle public: - virtual bool hasFastMixer() const { return mFastMixer != NULL; } + virtual bool hasFastMixer() const { return mFastMixer != 0; } virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const { ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks); return mFastMixerDumpState.mTracks[fastIndex].mUnderruns; } + }; class DirectOutputThread : public PlaybackThread { @@ -708,10 +895,13 @@ public: // Thread virtuals - virtual bool checkForNewParameters_l(); + virtual bool checkForNewParameter_l(const String8& keyValuePair, + status_t& status); + virtual void flushHw_l(); protected: - virtual int getTrackName_l(audio_channel_mask_t channelMask, int sessionId); + virtual int getTrackName_l(audio_channel_mask_t channelMask, + audio_format_t format, int sessionId); virtual void deleteTrackName_l(int name); virtual uint32_t activeSleepTimeUs() const; virtual uint32_t idleSleepTimeUs() const; @@ -743,19 +933,17 @@ public: OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, audio_io_handle_t id, uint32_t device); virtual ~OffloadThread() {}; + virtual void flushHw_l(); protected: // threadLoop snippets virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); virtual void threadLoop_exit(); - virtual void flushOutput_l(); virtual bool waitingAsyncCallback(); virtual bool waitingAsyncCallback_l(); virtual bool shouldStandby_l(); - -private: - void flushHw_l(); + virtual void onAddNewTrack_l(); private: bool mHwPaused; @@ -838,17 +1026,28 @@ public: // record thread -class RecordThread : public ThreadBase, public AudioBufferProvider - // derives from AudioBufferProvider interface for use by resampler +class RecordThread : public ThreadBase { public: + class RecordTrack; + class ResamplerBufferProvider : public AudioBufferProvider + // derives from AudioBufferProvider interface for use by resampler + { + public: + ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { } + virtual ~ResamplerBufferProvider() { } + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + private: + RecordTrack * const mRecordTrack; + }; + #include "RecordTracks.h" RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, audio_io_handle_t id, audio_devices_t outDevice, audio_devices_t inDevice @@ -867,23 +1066,28 @@ public: // Thread virtuals virtual bool threadLoop(); - virtual status_t readyToRun(); // RefBase virtual void onFirstRef(); virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; } + + virtual sp<MemoryDealer> readOnlyHeap() const { return mReadOnlyHeap; } + + virtual sp<IMemory> pipeMemory() const { return mPipeMemory; } + sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, + size_t *notificationFrames, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, - status_t *status); + status_t *status /*non-NULL*/); status_t start(RecordTrack* recordTrack, AudioSystem::sync_event_t event, @@ -897,15 +1101,21 @@ public: AudioStreamIn* clearInput(); virtual audio_stream_t* stream() const; - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); - virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); - virtual bool checkForNewParameters_l(); + virtual bool checkForNewParameter_l(const String8& keyValuePair, + status_t& status); + virtual void cacheParameters_l() {} virtual String8 getParameters(const String8& keys); - virtual void audioConfigChanged_l(int event, int param = 0); - void readInputParameters(); - virtual unsigned int getInputFramesLost(); + virtual void audioConfigChanged(int event, int param = 0); + virtual status_t createAudioPatch_l(const struct audio_patch *patch, + audio_patch_handle_t *handle); + virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle); + + void addPatchRecord(const sp<PatchRecord>& record); + void deletePatchRecord(const sp<PatchRecord>& record); + + void readInputParameters_l(); + virtual uint32_t getInputFramesLost(); virtual status_t addEffectChain_l(const sp<EffectChain>& chain); virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); @@ -920,45 +1130,73 @@ public: virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; static void syncStartEventCallback(const wp<SyncEvent>& event); - void handleSyncStartEvent(const sp<SyncEvent>& event); virtual size_t frameCount() const { return mFrameCount; } - bool hasFastRecorder() const { return false; } + bool hasFastCapture() const { return mFastCapture != 0; } + virtual void getAudioPortConfig(struct audio_port_config *config); private: - void clearSyncStartEvent(); - // Enter standby if not already in standby, and set mStandby flag - void standby(); + void standbyIfNotAlreadyInStandby(); // Call the HAL standby method unconditionally, and don't change mStandby flag - void inputStandBy(); + void inputStandBy(); AudioStreamIn *mInput; SortedVector < sp<RecordTrack> > mTracks; - // mActiveTrack has dual roles: it indicates the current active track, and + // mActiveTracks has dual roles: it indicates the current active track(s), and // is used together with mStartStopCond to indicate start()/stop() progress - sp<RecordTrack> mActiveTrack; + SortedVector< sp<RecordTrack> > mActiveTracks; + // generation counter for mActiveTracks + int mActiveTracksGen; Condition mStartStopCond; - // updated by RecordThread::readInputParameters() - AudioResampler *mResampler; - // interleaved stereo pairs of fixed-point signed Q19.12 - int32_t *mRsmpOutBuffer; - int16_t *mRsmpInBuffer; // [mFrameCount * mChannelCount] - size_t mRsmpInIndex; - size_t mBufferSize; // stream buffer size for read() - const uint32_t mReqChannelCount; - const uint32_t mReqSampleRate; - ssize_t mBytesRead; - // sync event triggering actual audio capture. Frames read before this event will - // be dropped and therefore not read by the application. - sp<SyncEvent> mSyncStartEvent; - // number of captured frames to drop after the start sync event has been received. - // when < 0, maximum frames to drop before starting capture even if sync event is - // not received - ssize_t mFramestoDrop; + // resampler converts input at HAL Hz to output at AudioRecord client Hz + int16_t *mRsmpInBuffer; // see new[] for details on the size + size_t mRsmpInFrames; // size of resampler input in frames + size_t mRsmpInFramesP2;// size rounded up to a power-of-2 + + // rolling index that is never cleared + int32_t mRsmpInRear; // last filled frame + 1 // For dumpsys const sp<NBAIO_Sink> mTeeSink; + + const sp<MemoryDealer> mReadOnlyHeap; + + // one-time initialization, no locks required + sp<FastCapture> mFastCapture; // non-0 if there is also a fast capture + // FIXME audio watchdog thread + + // contents are not guaranteed to be consistent, no locks required + FastCaptureDumpState mFastCaptureDumpState; +#ifdef STATE_QUEUE_DUMP + // FIXME StateQueue observer and mutator dump fields +#endif + // FIXME audio watchdog dump + + // accessible only within the threadLoop(), no locks required + // mFastCapture->sq() // for mutating and pushing state + int32_t mFastCaptureFutex; // for cold idle + + // The HAL input source is treated as non-blocking, + // but current implementation is blocking + sp<NBAIO_Source> mInputSource; + // The source for the normal capture thread to read from: mInputSource or mPipeSource + sp<NBAIO_Source> mNormalSource; + // If a fast capture is present, the non-blocking pipe sink written to by fast capture, + // otherwise clear + sp<NBAIO_Sink> mPipeSink; + // If a fast capture is present, the non-blocking pipe source read by normal thread, + // otherwise clear + sp<NBAIO_Source> mPipeSource; + // Depth of pipe from fast capture to normal thread and fast clients, always power of 2 + size_t mPipeFramesP2; + // If a fast capture is present, the Pipe as IMemory, otherwise clear + sp<IMemory> mPipeMemory; + + static const size_t kFastCaptureLogSize = 4 * 1024; + sp<NBLog::Writer> mFastCaptureNBLogWriter; + + bool mFastTrackAvail; // true if fast track available }; diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h index cd201d9..98bf96e 100644 --- a/services/audioflinger/TrackBase.h +++ b/services/audioflinger/TrackBase.h @@ -34,7 +34,25 @@ public: RESUMING, ACTIVE, PAUSING, - PAUSED + PAUSED, + STARTING_1, // for RecordTrack only + STARTING_2, // for RecordTrack only + }; + + // where to allocate the data buffer + enum alloc_type { + ALLOC_CBLK, // allocate immediately after control block + ALLOC_READONLY, // allocate from a separate read-only heap per thread + ALLOC_PIPE, // do not allocate; use the pipe buffer + ALLOC_LOCAL, // allocate a local buffer + ALLOC_NONE, // do not allocate:use the buffer passed to TrackBase constructor + }; + + enum track_type { + TYPE_DEFAULT, + TYPE_TIMED, + TYPE_OUTPUT, + TYPE_PATCH, }; TrackBase(ThreadBase *thread, @@ -43,11 +61,15 @@ public: audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, - const sp<IMemory>& sharedBuffer, + void *buffer, int sessionId, int uid, - bool isOut); + IAudioFlinger::track_flags_t flags, + bool isOut, + alloc_type alloc = ALLOC_CBLK, + track_type type = TYPE_DEFAULT); virtual ~TrackBase(); + virtual status_t initCheck() const; virtual status_t start(AudioSystem::sync_event_t event, int triggerSession) = 0; @@ -58,6 +80,14 @@ public: int uid() const { return mUid; } virtual status_t setSyncEvent(const sp<SyncEvent>& event); + sp<IMemory> getBuffers() const { return mBufferMemory; } + void* buffer() const { return mBuffer; } + bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; } + bool isTimedTrack() const { return (mType == TYPE_TIMED); } + bool isOutputTrack() const { return (mType == TYPE_OUTPUT); } + bool isPatchTrack() const { return (mType == TYPE_PATCH); } + bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); } + protected: TrackBase(const TrackBase&); TrackBase& operator = (const TrackBase&); @@ -78,15 +108,6 @@ protected: virtual uint32_t sampleRate() const { return mSampleRate; } - // Return a pointer to the start of a contiguous slice of the track buffer. - // Parameter 'offset' is the requested start position, expressed in - // monotonically increasing frame units relative to the track epoch. - // Parameter 'frames' is the requested length, also in frame units. - // Always returns non-NULL. It is the caller's responsibility to - // verify that this will be successful; the result of calling this - // function with invalid 'offset' or 'frames' is undefined. - void* getBuffer(uint32_t offset, uint32_t frames) const; - bool isStopped() const { return (mState == STOPPED || mState == FLUSHED); } @@ -118,6 +139,7 @@ protected: /*const*/ sp<Client> mClient; // see explanation at ~TrackBase() why not const sp<IMemory> mCblkMemory; audio_track_cblk_t* mCblk; + sp<IMemory> mBufferMemory; // currently non-0 for fast RecordTrack only void* mBuffer; // start of track buffer, typically in shared memory // except for OutputTrack when it is in local memory // we don't really need a lock for these @@ -136,10 +158,26 @@ protected: const int mSessionId; int mUid; Vector < sp<SyncEvent> >mSyncEvents; + const IAudioFlinger::track_flags_t mFlags; const bool mIsOut; ServerProxy* mServerProxy; const int mId; sp<NBAIO_Sink> mTeeSink; sp<NBAIO_Source> mTeeSource; bool mTerminated; + track_type mType; // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ... + audio_io_handle_t mThreadIoHandle; // I/O handle of the thread the track is attached to +}; + +// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord. +// it provides buffer access methods that map those of a ClientProxy (see AudioTrackShared.h) +class PatchProxyBufferProvider +{ +public: + + virtual ~PatchProxyBufferProvider() {} + + virtual status_t obtainBuffer(Proxy::Buffer* buffer, + const struct timespec *requested = NULL) = 0; + virtual void releaseBuffer(Proxy::Buffer* buffer) = 0; }; diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index cbf56b5..b9308fa 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -35,6 +35,7 @@ #include <media/nbaio/Pipe.h> #include <media/nbaio/PipeReader.h> +#include <audio_utils/minifloat.h> // ---------------------------------------------------------------------------- @@ -67,10 +68,13 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, - const sp<IMemory>& sharedBuffer, + void *buffer, int sessionId, int clientUid, - bool isOut) + IAudioFlinger::track_flags_t flags, + bool isOut, + alloc_type alloc, + track_type type) : RefBase(), mThread(thread), mClient(client), @@ -80,15 +84,20 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( mSampleRate(sampleRate), mFormat(format), mChannelMask(channelMask), - mChannelCount(popcount(channelMask)), + mChannelCount(isOut ? + audio_channel_count_from_out_mask(channelMask) : + audio_channel_count_from_in_mask(channelMask)), mFrameSize(audio_is_linear_pcm(format) ? mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)), mFrameCount(frameCount), mSessionId(sessionId), + mFlags(flags), mIsOut(isOut), mServerProxy(NULL), mId(android_atomic_inc(&nextTrackId)), - mTerminated(false) + mTerminated(false), + mType(type), + mThreadIoHandle(thread->id()) { // if the caller is us, trust the specified uid if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) { @@ -102,27 +111,20 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( // battery usage on it. mUid = clientUid; - // client == 0 implies sharedBuffer == 0 - ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); - - ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), - sharedBuffer->size()); - // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); size_t size = sizeof(audio_track_cblk_t); - size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize; - if (sharedBuffer == 0) { + size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize; + if (buffer == NULL && alloc == ALLOC_CBLK) { size += bufferSize; } if (client != 0) { mCblkMemory = client->heap()->allocate(size); - if (mCblkMemory != 0) { - mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); - // can't assume mCblk != NULL - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for AudioTrack size=%u", size); client->heap()->dump("AudioTrack"); + mCblkMemory.clear(); return; } } else { @@ -134,22 +136,55 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( // construct the shared structure in-place. if (mCblk != NULL) { new(mCblk) audio_track_cblk_t(); - // clear all buffers - mCblk->frameCount_ = frameCount; - if (sharedBuffer == 0) { - mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); + switch (alloc) { + case ALLOC_READONLY: { + const sp<MemoryDealer> roHeap(thread->readOnlyHeap()); + if (roHeap == 0 || + (mBufferMemory = roHeap->allocate(bufferSize)) == 0 || + (mBuffer = mBufferMemory->pointer()) == NULL) { + ALOGE("not enough memory for read-only buffer size=%zu", bufferSize); + if (roHeap != 0) { + roHeap->dump("buffer"); + } + mCblkMemory.clear(); + mBufferMemory.clear(); + return; + } memset(mBuffer, 0, bufferSize); - } else { - mBuffer = sharedBuffer->pointer(); + } break; + case ALLOC_PIPE: + mBufferMemory = thread->pipeMemory(); + // mBuffer is the virtual address as seen from current process (mediaserver), + // and should normally be coming from mBufferMemory->pointer(). + // However in this case the TrackBase does not reference the buffer directly. + // It should references the buffer via the pipe. + // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL. + mBuffer = NULL; + break; + case ALLOC_CBLK: + // clear all buffers + if (buffer == NULL) { + mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); + memset(mBuffer, 0, bufferSize); + } else { + mBuffer = buffer; #if 0 - mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic + mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic #endif + } + break; + case ALLOC_LOCAL: + mBuffer = calloc(1, bufferSize); + break; + case ALLOC_NONE: + mBuffer = buffer; + break; } #ifdef TEE_SINK if (mTeeSinkTrackEnabled) { - NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount); - if (pipeFormat != Format_Invalid) { + NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat); + if (Format_isValid(pipeFormat)) { Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); size_t numCounterOffers = 0; const NBAIO_Format offers[1] = {pipeFormat}; @@ -168,6 +203,17 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( } } +status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const +{ + status_t status; + if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) { + status = cblk() != NULL ? NO_ERROR : NO_MEMORY; + } else { + status = getCblk() != 0 ? NO_ERROR : NO_MEMORY; + } + return status; +} + AudioFlinger::ThreadBase::TrackBase::~TrackBase() { #ifdef TEE_SINK @@ -184,13 +230,15 @@ AudioFlinger::ThreadBase::TrackBase::~TrackBase() } mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to if (mClient != 0) { - // Client destructor must run with AudioFlinger mutex locked - Mutex::Autolock _l(mClient->audioFlinger()->mLock); + // Client destructor must run with AudioFlinger client mutex locked + Mutex::Autolock _l(mClient->audioFlinger()->mClientLock); // If the client's reference count drops to zero, the associated destructor // must run with AudioFlinger lock held. Thus the explicit clear() rather than // relying on the automatic clear() at end of scope. mClient.clear(); } + // flush the binder command buffer + IPCThreadState::self()->flushCommands(); } // AudioBufferProvider interface @@ -276,6 +324,11 @@ status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, if (!mTrack->isTimedTrack()) return INVALID_OPERATION; + if (buffer == 0 || buffer->pointer() == NULL) { + ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()"); + return BAD_VALUE; + } + PlaybackThread::TimedTrack* tt = reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); return tt->queueTimedBuffer(buffer, pts); @@ -325,12 +378,17 @@ AudioFlinger::PlaybackThread::Track::Track( audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, + void *buffer, const sp<IMemory>& sharedBuffer, int sessionId, int uid, - IAudioFlinger::track_flags_t flags) - : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, - sessionId, uid, true /*isOut*/), + IAudioFlinger::track_flags_t flags, + track_type type) + : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, + (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer, + sessionId, uid, flags, true /*isOut*/, + (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK, + type), mFillingUpStatus(FS_INVALID), // mRetryCount initialized later when needed mSharedBuffer(sharedBuffer), @@ -340,46 +398,55 @@ AudioFlinger::PlaybackThread::Track::Track( mAuxBuffer(NULL), mAuxEffectId(0), mHasVolumeController(false), mPresentationCompleteFrames(0), - mFlags(flags), mFastIndex(-1), mCachedVolume(1.0), mIsInvalid(false), mAudioTrackServerProxy(NULL), - mResumeToStopping(false) + mResumeToStopping(false), + mFlushHwPending(false), + mPreviousValid(false), + mPreviousFramesWritten(0) + // mPreviousTimestamp { - if (mCblk != NULL) { - if (sharedBuffer == 0) { - mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - } else { - mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - } - mServerProxy = mAudioTrackServerProxy; - // to avoid leaking a track name, do not allocate one unless there is an mCblk - mName = thread->getTrackName_l(channelMask, sessionId); - if (mName < 0) { - ALOGE("no more track names available"); - return; - } - // only allocate a fast track index if we were able to allocate a normal track name - if (flags & IAudioFlinger::TRACK_FAST) { - mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); - ALOG_ASSERT(thread->mFastTrackAvailMask != 0); - int i = __builtin_ctz(thread->mFastTrackAvailMask); - ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); - // FIXME This is too eager. We allocate a fast track index before the - // fast track becomes active. Since fast tracks are a scarce resource, - // this means we are potentially denying other more important fast tracks from - // being created. It would be better to allocate the index dynamically. - mFastIndex = i; - // Read the initial underruns because this field is never cleared by the fast mixer - mObservedUnderruns = thread->getFastTrackUnderruns(i); - thread->mFastTrackAvailMask &= ~(1 << i); - } + // client == 0 implies sharedBuffer == 0 + ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); + + ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), + sharedBuffer->size()); + + if (mCblk == NULL) { + return; + } + + if (sharedBuffer == 0) { + mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, + mFrameSize, !isExternalTrack(), sampleRate); + } else { + mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, + mFrameSize); + } + mServerProxy = mAudioTrackServerProxy; + + mName = thread->getTrackName_l(channelMask, format, sessionId); + if (mName < 0) { + ALOGE("no more track names available"); + return; + } + // only allocate a fast track index if we were able to allocate a normal track name + if (flags & IAudioFlinger::TRACK_FAST) { + mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); + ALOG_ASSERT(thread->mFastTrackAvailMask != 0); + int i = __builtin_ctz(thread->mFastTrackAvailMask); + ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); + // FIXME This is too eager. We allocate a fast track index before the + // fast track becomes active. Since fast tracks are a scarce resource, + // this means we are potentially denying other more important fast tracks from + // being created. It would be better to allocate the index dynamically. + mFastIndex = i; + // Read the initial underruns because this field is never cleared by the fast mixer + mObservedUnderruns = thread->getFastTrackUnderruns(i); + thread->mFastTrackAvailMask &= ~(1 << i); } - ALOGV("Track constructor name %d, calling pid %d", mName, - IPCThreadState::self()->getCallingPid()); } AudioFlinger::PlaybackThread::Track::~Track() @@ -392,11 +459,18 @@ AudioFlinger::PlaybackThread::Track::~Track() // This prevents that leak. if (mSharedBuffer != 0) { mSharedBuffer.clear(); - // flush the binder command buffer - IPCThreadState::self()->flushCommands(); } } +status_t AudioFlinger::PlaybackThread::Track::initCheck() const +{ + status_t status = TrackBase::initCheck(); + if (status == NO_ERROR && mName < 0) { + status = NO_MEMORY; + } + return status; +} + void AudioFlinger::PlaybackThread::Track::destroy() { // NOTE: destroyTrack_l() can remove a strong reference to this Track @@ -409,31 +483,34 @@ void AudioFlinger::PlaybackThread::Track::destroy() // this Track with its member mTrack. sp<Track> keep(this); { // scope for mLock + bool wasActive = false; sp<ThreadBase> thread = mThread.promote(); if (thread != 0) { Mutex::Autolock _l(thread->mLock); PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - bool wasActive = playbackThread->destroyTrack_l(this); - if (!isOutputTrack() && !wasActive) { - AudioSystem::releaseOutput(thread->id()); - } + wasActive = playbackThread->destroyTrack_l(this); + } + if (isExternalTrack() && !wasActive) { + AudioSystem::releaseOutput(mThreadIoHandle); } } } /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) { - result.append(" Name Client Type Fmt Chn mask Session fCount S F SRate " + result.append(" Name Active Client Type Fmt Chn mask Session fCount S F SRate " "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n"); } -void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) +void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active) { - uint32_t vlr = mAudioTrackServerProxy->getVolumeLR(); + gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); if (isFastTrack()) { - sprintf(buffer, " F %2d", mFastIndex); + sprintf(buffer, " F %2d", mFastIndex); + } else if (mName >= AudioMixer::TRACK0) { + sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); } else { - sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); + sprintf(buffer, " none"); } track_state state = mState; char stateChar; @@ -488,8 +565,9 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) nowInUnderrun = '?'; break; } - snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " + snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " "%08X %p %p 0x%03X %9u%c\n", + active ? "yes" : "no", (mClient == 0) ? getpid_cached : mClient->pid(), mStreamType, mFormat, @@ -499,8 +577,8 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) stateChar, mFillingUpStatus, mAudioTrackServerProxy->getSampleRate(), - 20.0 * log10((vlr & 0xFFFF) / 4096.0), - 20.0 * log10((vlr >> 16) / 4096.0), + 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))), + 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))), mCblk->mServer, mMainBuffer, mAuxBuffer, @@ -515,7 +593,7 @@ uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const { // AudioBufferProvider interface status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( - AudioBufferProvider::Buffer* buffer, int64_t pts) + AudioBufferProvider::Buffer* buffer, int64_t pts __unused) { ServerProxy::Buffer buf; size_t desiredFrames = buffer->frameCount; @@ -552,7 +630,14 @@ size_t AudioFlinger::PlaybackThread::Track::framesReleased() const // Don't call for fast tracks; the framesReady() could result in priority inversion bool AudioFlinger::PlaybackThread::Track::isReady() const { - if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) { + if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { + return true; + } + + if (isStopping()) { + if (framesReady() > 0) { + mFillingUpStatus = FS_FILLED; + } return true; } @@ -565,8 +650,8 @@ bool AudioFlinger::PlaybackThread::Track::isReady() const { return false; } -status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event, - int triggerSession) +status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused, + int triggerSession __unused) { status_t status = NO_ERROR; ALOGV("start(%d), calling pid %d session %d", @@ -589,7 +674,10 @@ status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t ev // here the track could be either new, or restarted // in both cases "unstop" the track - if (state == PAUSED) { + // initial state-stopping. next state-pausing. + // What if resume is called ? + + if (state == PAUSED || state == PAUSING) { if (mResumeToStopping) { // happened we need to resume to STOPPING_1 mState = TrackBase::STOPPING_1; @@ -644,7 +732,7 @@ void AudioFlinger::PlaybackThread::Track::stop() if (playbackThread->mActiveTracks.indexOf(this) < 0) { reset(); mState = STOPPED; - } else if (!isFastTrack() && !isOffloaded()) { + } else if (!isFastTrack() && !isOffloaded() && !isDirect()) { mState = STOPPED; } else { // For fast tracks prepareTracks_l() will set state to STOPPING_2 @@ -720,6 +808,7 @@ void AudioFlinger::PlaybackThread::Track::flush() mRetryCount = PlaybackThread::kMaxTrackRetriesOffload; } + mFlushHwPending = true; mResumeToStopping = false; } else { if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && @@ -735,16 +824,28 @@ void AudioFlinger::PlaybackThread::Track::flush() // remove from active track list, reset(), and trigger presentation complete if (playbackThread->mActiveTracks.indexOf(this) < 0) { reset(); + if (thread->type() == ThreadBase::DIRECT) { + DirectOutputThread *t = (DirectOutputThread *)playbackThread; + t->flushHw_l(); + } } } // Prevent flush being lost if the track is flushed and then resumed // before mixer thread can run. This is important when offloading // because the hardware buffer could hold a large amount of audio - playbackThread->flushOutput_l(); playbackThread->broadcast_l(); } } +// must be called with thread lock held +void AudioFlinger::PlaybackThread::Track::flushAck() +{ + if (!isOffloaded()) + return; + + mFlushHwPending = false; +} + void AudioFlinger::PlaybackThread::Track::reset() { // Do not reset twice to avoid discarding data written just after a flush and before @@ -779,27 +880,60 @@ status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& times { // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant if (isFastTrack()) { + // FIXME no lock held to set mPreviousValid = false return INVALID_OPERATION; } sp<ThreadBase> thread = mThread.promote(); if (thread == 0) { + // FIXME no lock held to set mPreviousValid = false return INVALID_OPERATION; } Mutex::Autolock _l(thread->mLock); PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); - if (!isOffloaded()) { + if (!isOffloaded() && !isDirect()) { if (!playbackThread->mLatchQValid) { + mPreviousValid = false; return INVALID_OPERATION; } uint32_t unpresentedFrames = ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) / playbackThread->mSampleRate; - uint32_t framesWritten = mAudioTrackServerProxy->framesReleased(); + // FIXME Since we're using a raw pointer as the key, it is theoretically possible + // for a brand new track to share the same address as a recently destroyed + // track, and thus for us to get the frames released of the wrong track. + // It is unlikely that we would be able to call getTimestamp() so quickly + // right after creating a new track. Nevertheless, the index here should + // be changed to something that is unique. Or use a completely different strategy. + ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this); + uint32_t framesWritten = i >= 0 ? + playbackThread->mLatchQ.mFramesReleased[i] : + mAudioTrackServerProxy->framesReleased(); + bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten; if (framesWritten < unpresentedFrames) { + mPreviousValid = false; return INVALID_OPERATION; } - timestamp.mPosition = framesWritten - unpresentedFrames; - timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime; + mPreviousFramesWritten = framesWritten; + uint32_t position = framesWritten - unpresentedFrames; + struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime; + if (checkPreviousTimestamp) { + if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec || + (time.tv_sec == mPreviousTimestamp.mTime.tv_sec && + time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) { + ALOGW("Time is going backwards"); + } + // position can bobble slightly as an artifact; this hides the bobble + static const uint32_t MINIMUM_POSITION_DELTA = 8u; + if ((position <= mPreviousTimestamp.mPosition) || + (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) { + position = mPreviousTimestamp.mPosition; + time = mPreviousTimestamp.mTime; + } + } + timestamp.mPosition = position; + timestamp.mTime = time; + mPreviousTimestamp = timestamp; + mPreviousValid = true; return NO_ERROR; } @@ -885,8 +1019,6 @@ bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWrit } if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { - ALOGV("presentationComplete() session %d complete: framesWritten %d", - mSessionId, framesWritten); triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); mAudioTrackServerProxy->setStreamEndDone(); return true; @@ -907,27 +1039,27 @@ void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_ // implement VolumeBufferProvider interface -uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR() +gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR() { // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); - uint32_t vlr = mAudioTrackServerProxy->getVolumeLR(); - uint32_t vl = vlr & 0xFFFF; - uint32_t vr = vlr >> 16; + gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); + float vl = float_from_gain(gain_minifloat_unpack_left(vlr)); + float vr = float_from_gain(gain_minifloat_unpack_right(vlr)); // track volumes come from shared memory, so can't be trusted and must be clamped - if (vl > MAX_GAIN_INT) { - vl = MAX_GAIN_INT; + if (vl > GAIN_FLOAT_UNITY) { + vl = GAIN_FLOAT_UNITY; } - if (vr > MAX_GAIN_INT) { - vr = MAX_GAIN_INT; + if (vr > GAIN_FLOAT_UNITY) { + vr = GAIN_FLOAT_UNITY; } // now apply the cached master volume and stream type volume; // this is trusted but lacks any synchronization or barrier so may be stale float v = mCachedVolume; vl *= v; vr *= v; - // re-combine into U4.16 - vlr = (vr << 16) | (vl & 0xFFFF); + // re-combine into packed minifloat + vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr)); // FIXME look at mute, pause, and stop flags return vlr; } @@ -967,6 +1099,33 @@ void AudioFlinger::PlaybackThread::Track::signal() } } +//To be called with thread lock held +bool AudioFlinger::PlaybackThread::Track::isResumePending() { + + if (mState == RESUMING) + return true; + /* Resume is pending if track was stopping before pause was called */ + if (mState == STOPPING_1 && + mResumeToStopping) + return true; + + return false; +} + +//To be called with thread lock held +void AudioFlinger::PlaybackThread::Track::resumeAck() { + + + if (mState == RESUMING) + mState = ACTIVE; + + // Other possibility of pending resume is stopping_1 state + // Do not update the state from stopping as this prevents + // drain being called. + if (mState == STOPPING_1) { + mResumeToStopping = false; + } +} // ---------------------------------------------------------------------------- sp<AudioFlinger::PlaybackThread::TimedTrack> @@ -980,7 +1139,8 @@ AudioFlinger::PlaybackThread::TimedTrack::create( size_t frameCount, const sp<IMemory>& sharedBuffer, int sessionId, - int uid) { + int uid) +{ if (!client->reserveTimedTrack()) return 0; @@ -1001,7 +1161,8 @@ AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( int sessionId, int uid) : Track(thread, client, streamType, sampleRate, format, channelMask, - frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED), + frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer, + sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED), mQueueHeadInFlight(false), mTrimQueueHeadOnRelease(false), mFramesPendingInQueue(0), @@ -1046,15 +1207,14 @@ status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, "AudioFlingerTimed"); - if (mTimedMemoryDealer == NULL) + if (mTimedMemoryDealer == NULL) { return NO_MEMORY; + } } sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) { - newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) - return NO_MEMORY; + if (newBuffer == 0 || newBuffer->pointer() == NULL) { + return NO_MEMORY; } *buffer = newBuffer; @@ -1153,7 +1313,7 @@ void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( const TimedBuffer& buf, - const char* logTag) { + const char* logTag __unused) { uint32_t bufBytes = buf.buffer()->size(); uint32_t consumedAlready = buf.position(); @@ -1464,7 +1624,7 @@ void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( mTrimQueueHeadOnRelease = false; } } else { - LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" + LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" " buffers in the timed buffer queue"); } @@ -1497,7 +1657,7 @@ AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( size_t frameCount, int uid) : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, - NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT), + NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT), mActive(false), mSourceThread(sourceThread), mClientProxy(NULL) { @@ -1505,17 +1665,16 @@ AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( mOutBuffer.frameCount = 0; playbackThread->mTracks.add(this); ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " - "mCblk->frameCount_ %u, mChannelMask 0x%08x", + "frameCount %u, mChannelMask 0x%08x", mCblk, mBuffer, - mCblk->frameCount_, mChannelMask); + frameCount, mChannelMask); // since client and server are in the same process, // the buffer has the same virtual address on both sides - mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); - mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000)); - mClientProxy->setSendLevel(0.0); - mClientProxy->setSampleRate(sampleRate); mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize, true /*clientInServer*/); + mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY); + mClientProxy->setSendLevel(0.0); + mClientProxy->setSampleRate(sampleRate); } else { ALOGW("Error creating output track on thread %p", playbackThread); } @@ -1706,6 +1865,75 @@ void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() } +AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_format_t format, + size_t frameCount, + void *buffer, + IAudioFlinger::track_flags_t flags) + : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, + buffer, 0, 0, getuid(), flags, TYPE_PATCH), + mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true)) +{ + uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) / + playbackThread->sampleRate(); + mPeerTimeout.tv_sec = mixBufferNs / 1000000000; + mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000); + + ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec", + this, sampleRate, + (int)mPeerTimeout.tv_sec, + (int)(mPeerTimeout.tv_nsec / 1000000)); +} + +AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack() +{ +} + +// AudioBufferProvider interface +status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts) +{ + ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy"); + Proxy::Buffer buf; + buf.mFrameCount = buffer->frameCount; + status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout); + ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status); + buffer->frameCount = buf.mFrameCount; + if (buf.mFrameCount == 0) { + return WOULD_BLOCK; + } + status = Track::getNextBuffer(buffer, pts); + return status; +} + +void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer) +{ + ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy"); + Proxy::Buffer buf; + buf.mFrameCount = buffer->frameCount; + buf.mRaw = buffer->raw; + mPeerProxy->releaseBuffer(&buf); + TrackBase::releaseBuffer(buffer); +} + +status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer, + const struct timespec *timeOut) +{ + return mProxy->obtainBuffer(buffer, timeOut); +} + +void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer) +{ + mProxy->releaseBuffer(buffer); + if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) { + ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting"); + start(); + } + android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags); +} + // ---------------------------------------------------------------------------- // Record // ---------------------------------------------------------------------------- @@ -1722,10 +1950,6 @@ AudioFlinger::RecordHandle::~RecordHandle() { mRecordTrack->destroy(); } -sp<IMemory> AudioFlinger::RecordHandle::getCblk() const { - return mRecordTrack->getCblk(); -} - status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) { ALOGV("RecordHandle::start()"); @@ -1749,7 +1973,7 @@ status_t AudioFlinger::RecordHandle::onTransact( // ---------------------------------------------------------------------------- -// RecordTrack constructor must be called with AudioFlinger::mLock held +// RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held AudioFlinger::RecordThread::RecordTrack::RecordTrack( RecordThread *thread, const sp<Client>& client, @@ -1757,28 +1981,59 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, + void *buffer, int sessionId, - int uid) + int uid, + IAudioFlinger::track_flags_t flags, + track_type type) : TrackBase(thread, client, sampleRate, format, - channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/), - mOverflow(false) + channelMask, frameCount, buffer, sessionId, uid, + flags, false /*isOut*/, + (type == TYPE_DEFAULT) ? + ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) : + ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE), + type), + mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0), + // See real initialization of mRsmpInFront at RecordThread::start() + mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL) { - ALOGV("RecordTrack constructor"); - if (mCblk != NULL) { - mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - mServerProxy = mAudioRecordServerProxy; + if (mCblk == NULL) { + return; + } + + mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, + mFrameSize, !isExternalTrack()); + + uint32_t channelCount = audio_channel_count_from_in_mask(channelMask); + // FIXME I don't understand either of the channel count checks + if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 && + channelCount <= FCC_2) { + // sink SR + mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT, + thread->mChannelCount, sampleRate); + // source SR + mResampler->setSampleRate(thread->mSampleRate); + mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT); + mResamplerBufferProvider = new ResamplerBufferProvider(this); + } + + if (flags & IAudioFlinger::TRACK_FAST) { + ALOG_ASSERT(thread->mFastTrackAvail); + thread->mFastTrackAvail = false; } } AudioFlinger::RecordThread::RecordTrack::~RecordTrack() { ALOGV("%s", __func__); + delete mResampler; + delete[] mRsmpOutBuffer; + delete mResamplerBufferProvider; } // AudioBufferProvider interface status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, - int64_t pts) + int64_t pts __unused) { ServerProxy::Buffer buf; buf.mFrameCount = buffer->frameCount; @@ -1809,8 +2064,8 @@ void AudioFlinger::RecordThread::RecordTrack::stop() sp<ThreadBase> thread = mThread.promote(); if (thread != 0) { RecordThread *recordThread = (RecordThread *)thread.get(); - if (recordThread->stop(this)) { - AudioSystem::stopInput(recordThread->id()); + if (recordThread->stop(this) && isExternalTrack()) { + AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId); } } } @@ -1820,12 +2075,14 @@ void AudioFlinger::RecordThread::RecordTrack::destroy() // see comments at AudioFlinger::PlaybackThread::Track::destroy() sp<RecordTrack> keep(this); { - sp<ThreadBase> thread = mThread.promote(); - if (thread != 0) { + if (isExternalTrack()) { if (mState == ACTIVE || mState == RESUMING) { - AudioSystem::stopInput(thread->id()); + AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId); } - AudioSystem::releaseInput(thread->id()); + AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId); + } + sp<ThreadBase> thread = mThread.promote(); + if (thread != 0) { Mutex::Autolock _l(thread->mLock); RecordThread *recordThread = (RecordThread *) thread.get(); recordThread->destroyTrack_l(this); @@ -1846,19 +2103,111 @@ void AudioFlinger::RecordThread::RecordTrack::invalidate() /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) { - result.append("Client Fmt Chn mask Session S Server fCount\n"); + result.append(" Active Client Fmt Chn mask Session S Server fCount SRate\n"); } -void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size) +void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active) { - snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6zu\n", + snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n", + active ? "yes" : "no", (mClient == 0) ? getpid_cached : mClient->pid(), mFormat, mChannelMask, mSessionId, mState, mCblk->mServer, - mFrameCount); + mFrameCount, + mSampleRate); + +} + +void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event) +{ + if (event == mSyncStartEvent) { + ssize_t framesToDrop = 0; + sp<ThreadBase> threadBase = mThread.promote(); + if (threadBase != 0) { + // TODO: use actual buffer filling status instead of 2 buffers when info is available + // from audio HAL + framesToDrop = threadBase->mFrameCount * 2; + } + mFramesToDrop = framesToDrop; + } +} + +void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent() +{ + if (mSyncStartEvent != 0) { + mSyncStartEvent->cancel(); + mSyncStartEvent.clear(); + } + mFramesToDrop = 0; +} + + +AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread, + uint32_t sampleRate, + audio_channel_mask_t channelMask, + audio_format_t format, + size_t frameCount, + void *buffer, + IAudioFlinger::track_flags_t flags) + : RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount, + buffer, 0, getuid(), flags, TYPE_PATCH), + mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true)) +{ + uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) / + recordThread->sampleRate(); + mPeerTimeout.tv_sec = mixBufferNs / 1000000000; + mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000); + + ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec", + this, sampleRate, + (int)mPeerTimeout.tv_sec, + (int)(mPeerTimeout.tv_nsec / 1000000)); +} + +AudioFlinger::RecordThread::PatchRecord::~PatchRecord() +{ +} + +// AudioBufferProvider interface +status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts) +{ + ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy"); + Proxy::Buffer buf; + buf.mFrameCount = buffer->frameCount; + status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout); + ALOGV_IF(status != NO_ERROR, + "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status); + buffer->frameCount = buf.mFrameCount; + if (buf.mFrameCount == 0) { + return WOULD_BLOCK; + } + status = RecordTrack::getNextBuffer(buffer, pts); + return status; +} + +void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer) +{ + ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy"); + Proxy::Buffer buf; + buf.mFrameCount = buffer->frameCount; + buf.mRaw = buffer->raw; + mPeerProxy->releaseBuffer(&buf); + TrackBase::releaseBuffer(buffer); +} + +status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer, + const struct timespec *timeOut) +{ + return mProxy->obtainBuffer(buffer, timeOut); +} + +void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer) +{ + mProxy->releaseBuffer(buffer); } }; // namespace android diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp index 7a314cf..84a655a 100644 --- a/services/audioflinger/test-resample.cpp +++ b/services/audioflinger/test-resample.cpp @@ -14,8 +14,6 @@ * limitations under the License. */ -#include "AudioResampler.h" -#include <media/AudioBufferProvider.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> @@ -24,81 +22,114 @@ #include <sys/mman.h> #include <sys/stat.h> #include <errno.h> +#include <inttypes.h> #include <time.h> #include <math.h> +#include <audio_utils/primitives.h> +#include <audio_utils/sndfile.h> +#include <utils/Vector.h> +#include <media/AudioBufferProvider.h> +#include "AudioResampler.h" using namespace android; -struct HeaderWav { - HeaderWav(size_t size, int nc, int sr, int bits) { - strncpy(RIFF, "RIFF", 4); - chunkSize = size + sizeof(HeaderWav); - strncpy(WAVE, "WAVE", 4); - strncpy(fmt, "fmt ", 4); - fmtSize = 16; - audioFormat = 1; - numChannels = nc; - samplesRate = sr; - byteRate = sr * numChannels * (bits/8); - align = nc*(bits/8); - bitsPerSample = bits; - strncpy(data, "data", 4); - dataSize = size; - } - - char RIFF[4]; // RIFF - uint32_t chunkSize; // File size - char WAVE[4]; // WAVE - char fmt[4]; // fmt\0 - uint32_t fmtSize; // fmt size - uint16_t audioFormat; // 1=PCM - uint16_t numChannels; // num channels - uint32_t samplesRate; // sample rate in hz - uint32_t byteRate; // Bps - uint16_t align; // 2=16-bit mono, 4=16-bit stereo - uint16_t bitsPerSample; // bits per sample - char data[4]; // "data" - uint32_t dataSize; // size -}; +static bool gVerbose = false; static int usage(const char* name) { - fprintf(stderr,"Usage: %s [-p] [-h] [-s] [-q {dq|lq|mq|hq|vhq}] [-i input-sample-rate] " - "[-o output-sample-rate] [<input-file>] <output-file>\n", name); + fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]" + " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]" + " [-i input-sample-rate] [-o output-sample-rate]" + " [-O csv] [-P csv] [<input-file>]" + " <output-file>\n", name); fprintf(stderr," -p enable profiling\n"); - fprintf(stderr," -h create wav file\n"); - fprintf(stderr," -s stereo\n"); + fprintf(stderr," -f enable filter profiling\n"); + fprintf(stderr," -F enable floating point -q {dlq|dmq|dhq} only"); + fprintf(stderr," -v verbose : log buffer provider calls\n"); + fprintf(stderr," -c # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n"); fprintf(stderr," -q resampler quality\n"); fprintf(stderr," dq : default quality\n"); fprintf(stderr," lq : low quality\n"); fprintf(stderr," mq : medium quality\n"); fprintf(stderr," hq : high quality\n"); fprintf(stderr," vhq : very high quality\n"); - fprintf(stderr," -i input file sample rate\n"); + fprintf(stderr," dlq : dynamic low quality\n"); + fprintf(stderr," dmq : dynamic medium quality\n"); + fprintf(stderr," dhq : dynamic high quality\n"); + fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n"); fprintf(stderr," -o output file sample rate\n"); + fprintf(stderr," -O # frames output per call to resample() in CSV format\n"); + fprintf(stderr," -P # frames provided per call to resample() in CSV format\n"); return -1; } -int main(int argc, char* argv[]) { +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +int parseCSV(const char *string, Vector<int>& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values.editItemAt(0) = atoi(p = optarg); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values.editItemAt(i++) = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} +int main(int argc, char* argv[]) { const char* const progname = argv[0]; - bool profiling = false; - bool writeHeader = false; + bool profileResample = false; + bool profileFilter = false; + bool useFloat = false; int channels = 1; int input_freq = 0; int output_freq = 0; AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY; + Vector<int> Ovalues; + Vector<int> Pvalues; int ch; - while ((ch = getopt(argc, argv, "phsq:i:o:")) != -1) { + while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) { switch (ch) { case 'p': - profiling = true; + profileResample = true; + break; + case 'f': + profileFilter = true; + break; + case 'F': + useFloat = true; break; - case 'h': - writeHeader = true; + case 'v': + gVerbose = true; break; - case 's': - channels = 2; + case 'c': + channels = atoi(optarg); break; case 'q': if (!strcmp(optarg, "dq")) @@ -111,6 +142,12 @@ int main(int argc, char* argv[]) { quality = AudioResampler::HIGH_QUALITY; else if (!strcmp(optarg, "vhq")) quality = AudioResampler::VERY_HIGH_QUALITY; + else if (!strcmp(optarg, "dlq")) + quality = AudioResampler::DYN_LOW_QUALITY; + else if (!strcmp(optarg, "dmq")) + quality = AudioResampler::DYN_MED_QUALITY; + else if (!strcmp(optarg, "dhq")) + quality = AudioResampler::DYN_HIGH_QUALITY; else { usage(progname); return -1; @@ -122,12 +159,35 @@ int main(int argc, char* argv[]) { case 'o': output_freq = atoi(optarg); break; + case 'O': + if (parseCSV(optarg, Ovalues) < 0) { + fprintf(stderr, "incorrect syntax for -O option\n"); + return -1; + } + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return -1; + } + break; case '?': default: usage(progname); return -1; } } + + if (channels < 1 + || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) { + fprintf(stderr, "invalid number of audio channels %d\n", channels); + return -1; + } + if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) { + fprintf(stderr, "float processing is only possible for dynamic resamplers\n"); + return -1; + } + argc -= optind; argv += optind; @@ -148,25 +208,22 @@ int main(int argc, char* argv[]) { size_t input_size; void* input_vaddr; if (argc == 2) { - struct stat st; - if (stat(file_in, &st) < 0) { - fprintf(stderr, "stat: %s\n", strerror(errno)); - return -1; - } - - int input_fd = open(file_in, O_RDONLY); - if (input_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; - } - - input_size = st.st_size; - input_vaddr = mmap(0, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0); - if (input_vaddr == MAP_FAILED ) { - fprintf(stderr, "mmap: %s\n", strerror(errno)); - return -1; + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return EXIT_FAILURE; } + input_size = info.frames * info.channels * sizeof(short); + input_vaddr = malloc(input_size); + (void) sf_readf_short(sf, (short *) input_vaddr, info.frames); + sf_close(sf); + channels = info.channels; + input_freq = info.samplerate; } else { + // data for testing is exactly (input sampling rate/1000)/2 seconds + // so 44.1khz input is 22.05 seconds double k = 1000; // Hz / s double time = (input_freq / 2) / k; size_t input_frames = size_t(input_freq * time); @@ -177,98 +234,276 @@ int main(int argc, char* argv[]) { double t = double(i) / input_freq; double y = sin(M_PI * k * t * t); int16_t yi = floor(y * 32767.0 + 0.5); - for (size_t j=0 ; j<(size_t)channels ; j++) { - in[i*channels + j] = yi / (1+j); + for (int j = 0; j < channels; j++) { + in[i*channels + j] = yi / (1 + j); } } } + size_t input_framesize = channels * sizeof(int16_t); + size_t input_frames = input_size / input_framesize; + + // For float processing, convert input int16_t to float array + if (useFloat) { + void *new_vaddr; + + input_framesize = channels * sizeof(float); + input_size = input_frames * input_framesize; + new_vaddr = malloc(input_size); + memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr), + reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels); + free(input_vaddr); + input_vaddr = new_vaddr; + } // ---------------------------------------------------------- class Provider: public AudioBufferProvider { - int16_t* mAddr; - size_t mNumFrames; + const void* mAddr; // base address + const size_t mNumFrames; // total frames + const size_t mFrameSize; // size of each frame in bytes + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + const Vector<int> mPvalues; // number of frames provided per call + size_t mNextPidx; // index of next entry in mPvalues to use public: - Provider(const void* addr, size_t size, int channels) { - mAddr = (int16_t*) addr; - mNumFrames = size / (channels*sizeof(int16_t)); + Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) { } virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) { - buffer->frameCount = mNumFrames; - buffer->i16 = mAddr; - return NO_ERROR; + (void)pts; // suppress warning + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mPvalues.isEmpty()) { + size_t provided = mPvalues[mNextPidx++]; + printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextPidx >= mPvalues.size()) { + mNextPidx = 0; + } + } + if (gVerbose) { + printf("getNextBuffer() requested %zu frames out of %zu frames available," + " and returned %zu frames\n", + requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount); + } + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return NO_ERROR; + } else { + buffer->raw = NULL; + return NOT_ENOUGH_DATA; + } } virtual void releaseBuffer(Buffer* buffer) { + if (buffer->frameCount > mUnrel) { + fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + if (gVerbose) { + printf("releaseBuffer() released %zu frames out of %zu frames available " + "to release\n", buffer->frameCount, mUnrel); + } + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; } - } provider(input_vaddr, input_size, channels); - - size_t input_frames = input_size / (channels * sizeof(int16_t)); - size_t output_size = 2 * 4 * ((int64_t) input_frames * output_freq) / input_freq; - output_size &= ~7; // always stereo, 32-bits - - void* output_vaddr = malloc(output_size); + void reset() { + mNextFrame = 0; + } + } provider(input_vaddr, input_frames, input_framesize, Pvalues); - if (profiling) { - AudioResampler* resampler = AudioResampler::create(16, channels, - output_freq, quality); + if (gVerbose) { + printf("%zu input frames\n", input_frames); + } - size_t out_frames = output_size/8; - resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); + audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples + size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t)); + size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq; + size_t output_size = output_frames * output_framesize; - memset(output_vaddr, 0, output_size); + if (profileFilter) { + // Check how fast sample rate changes are that require filter changes. + // The delta sample rate changes must indicate a downsampling ratio, + // and must be larger than 10% changes. + // + // On fast devices, filters should be generated between 0.1ms - 1ms. + // (single threaded). + AudioResampler* resampler = AudioResampler::create(format, channels, + 8000, quality); + int looplimit = 100; timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(9000); + resampler->setSampleRate(12000); + resampler->setSampleRate(20000); + resampler->setSampleRate(30000); + } clock_gettime(CLOCK_MONOTONIC, &end); int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; - int64_t time = (end_ns - start_ns)/4; - printf("%f Mspl/s\n", out_frames/(time/1e9)/1e6); + int64_t time = end_ns - start_ns; + printf("%.2f sample rate changes with filter calculation/sec\n", + looplimit * 4 / (time / 1e9)); + // Check how fast sample rate changes are without filter changes. + // This should be very fast, probably 0.1us - 1us per sample rate + // change. + resampler->setSampleRate(1000); + looplimit = 1000; + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(1000+i); + } + clock_gettime(CLOCK_MONOTONIC, &end); + start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + time = end_ns - start_ns; + printf("%.2f sample rate changes without filter calculation/sec\n", + looplimit / (time / 1e9)); + resampler->reset(); delete resampler; } - AudioResampler* resampler = AudioResampler::create(16, channels, + void* output_vaddr = malloc(output_size); + AudioResampler* resampler = AudioResampler::create(format, channels, output_freq, quality); - size_t out_frames = output_size/8; + resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); + resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT); + + if (profileResample) { + /* + * For profiling on mobile devices, upon experimentation + * it is better to run a few trials with a shorter loop limit, + * and take the minimum time. + * + * Long tests can cause CPU temperature to build up and thermal throttling + * to reduce CPU frequency. + * + * For frequency checks (index=0, or 1, etc.): + * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq" + * + * For temperature checks (index=0, or 1, etc.): + * "cat /sys/class/thermal/thermal_zone${index}/temp" + * + * Another way to avoid thermal throttling is to fix the CPU frequency + * at a lower level which prevents excessive temperatures. + */ + const int trials = 4; + const int looplimit = 4; + timespec start, end; + int64_t time = 0; + + for (int n = 0; n < trials; ++n) { + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->resample((int*) output_vaddr, output_frames, &provider); + provider.reset(); // during benchmarking reset only the provider + } + clock_gettime(CLOCK_MONOTONIC, &end); + int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + int64_t diff_ns = end_ns - start_ns; + if (n == 0 || diff_ns < time) { + time = diff_ns; // save the best out of our trials. + } + } + // Mfrms/s is "Millions of output frames per second". + printf("quality: %d channels: %d msec: %" PRId64 " Mfrms/s: %.2lf\n", + quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6); + resampler->reset(); + } memset(output_vaddr, 0, output_size); - resampler->resample((int*) output_vaddr, out_frames, &provider); + if (gVerbose) { + printf("resample() %zu output frames\n", output_frames); + } + if (Ovalues.isEmpty()) { + Ovalues.push(output_frames); + } + for (size_t i = 0, j = 0; i < output_frames; ) { + size_t thisFrames = Ovalues[j++]; + if (j >= Ovalues.size()) { + j = 0; + } + if (thisFrames == 0 || thisFrames > output_frames - i) { + thisFrames = output_frames - i; + } + resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider); + i += thisFrames; + } + if (gVerbose) { + printf("resample() complete\n"); + } + resampler->reset(); + if (gVerbose) { + printf("reset() complete\n"); + } + delete resampler; + resampler = NULL; + + // For float processing, convert output format from float to Q4.27, + // which is then converted to int16_t for final storage. + if (useFloat) { + memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr), + reinterpret_cast<float*>(output_vaddr), output_frames * output_channels); + } - // down-mix (we just truncate and keep the left channel) + // mono takes left channel only (out of stereo output pair) + // stereo and multichannel preserve all channels. int32_t* out = (int32_t*) output_vaddr; - int16_t* convert = (int16_t*) malloc(out_frames * channels * sizeof(int16_t)); - for (size_t i = 0; i < out_frames; i++) { - for (int j=0 ; j<channels ; j++) { - int32_t s = out[i * 2 + j] >> 12; - if (s > 32767) s = 32767; - else if (s < -32768) s = -32768; + int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t)); + + const int volumeShift = 12; // shift requirement for Q4.27 to Q.15 + // round to half towards zero and saturate at int16 (non-dithered) + const int roundVal = (1<<(volumeShift-1)) - 1; // volumePrecision > 0 + + for (size_t i = 0; i < output_frames; i++) { + for (int j = 0; j < channels; j++) { + int32_t s = out[i * output_channels + j] + roundVal; // add offset here + if (s < 0) { + s = (s + 1) >> volumeShift; // round to 0 + if (s < -32768) { + s = -32768; + } + } else { + s = s >> volumeShift; + if (s > 32767) { + s = 32767; + } + } convert[i * channels + j] = int16_t(s); } } // write output to disk - int output_fd = open(file_out, O_WRONLY | O_CREAT | O_TRUNC, - S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - if (output_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; + SF_INFO info; + info.frames = 0; + info.samplerate = output_freq; + info.channels = channels; + info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16; + SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info); + if (sf == NULL) { + perror(file_out); + return EXIT_FAILURE; } + (void) sf_writef_short(sf, convert, output_frames); + sf_close(sf); - if (writeHeader) { - HeaderWav wav(out_frames * channels * sizeof(int16_t), channels, output_freq, 16); - write(output_fd, &wav, sizeof(wav)); - } - - write(output_fd, convert, out_frames * channels * sizeof(int16_t)); - close(output_fd); - - return 0; + return EXIT_SUCCESS; } diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk new file mode 100644 index 0000000..7bba05b --- /dev/null +++ b/services/audioflinger/tests/Android.mk @@ -0,0 +1,73 @@ +# Build the unit tests for audioflinger + +# +# resampler unit test +# +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SHARED_LIBRARIES := \ + liblog \ + libutils \ + libcutils \ + libstlport \ + libaudioutils \ + libaudioresampler + +LOCAL_STATIC_LIBRARIES := \ + libgtest \ + libgtest_main + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/gtest/include \ + external/stlport/stlport \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_SRC_FILES := \ + resampler_tests.cpp + +LOCAL_MODULE := resampler_tests +LOCAL_MODULE_TAGS := tests + +include $(BUILD_EXECUTABLE) + +# +# audio mixer test tool +# +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + test-mixer.cpp \ + ../AudioMixer.cpp.arm \ + +LOCAL_C_INCLUDES := \ + bionic \ + bionic/libstdc++/include \ + external/stlport/stlport \ + $(call include-path-for, audio-effects) \ + $(call include-path-for, audio-utils) \ + frameworks/av/services/audioflinger + +LOCAL_STATIC_LIBRARIES := \ + libsndfile + +LOCAL_SHARED_LIBRARIES := \ + libstlport \ + libeffects \ + libnbaio \ + libcommon_time_client \ + libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog + +LOCAL_MODULE:= test-mixer + +LOCAL_MODULE_TAGS := optional + +include $(BUILD_EXECUTABLE) diff --git a/services/audioflinger/tests/build_and_run_all_unit_tests.sh b/services/audioflinger/tests/build_and_run_all_unit_tests.sh new file mode 100755 index 0000000..2c453b0 --- /dev/null +++ b/services/audioflinger/tests/build_and_run_all_unit_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +# ensure we have mm +. $ANDROID_BUILD_TOP/build/envsetup.sh + +pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/ +pwd +mm + +echo "waiting for device" +adb root && adb wait-for-device remount +adb push $OUT/system/lib/libaudioresampler.so /system/lib +adb push $OUT/system/bin/resampler_tests /system/bin + +sh $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/tests/run_all_unit_tests.sh + +popd diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh new file mode 100755 index 0000000..9b39e77 --- /dev/null +++ b/services/audioflinger/tests/mixer_to_wav_tests.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# +# This script uses test-mixer to generate WAV files +# for evaluation of the AudioMixer component. +# +# Sine and chirp signals are used for input because they +# show up as clear lines, either horizontal or diagonal, +# on a spectrogram. This means easy verification of multiple +# track mixing. +# +# After execution, look for created subdirectories like +# mixer_i_i +# mixer_i_f +# mixer_f_f +# +# Recommend using a program such as audacity to evaluate +# the output WAV files, e.g. +# +# cd testdir +# audacity *.wav +# +# Using Audacity: +# +# Under "Waveform" view mode you can zoom into the +# start of the WAV file to verify proper ramping. +# +# Select "Spectrogram" to see verify the lines +# (sine = horizontal, chirp = diagonal) which should +# be clear (except for around the start as the volume +# ramping causes spectral distortion). + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +# ensure we have mm +. $ANDROID_BUILD_TOP/build/envsetup.sh + +pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/ + +# build +pwd +mm + +# send to device +echo "waiting for device" +adb root && adb wait-for-device remount +adb push $OUT/system/lib/libaudioresampler.so /system/lib +adb push $OUT/system/bin/test-mixer /system/bin + +# createwav creates a series of WAV files testing various +# mixer settings +# $1 = flags +# $2 = directory +function createwav() { +# create directory if it doesn't exist + if [ ! -d $2 ]; then + mkdir $2 + fi + +# Test: +# process__genericResampling +# track__Resample / track__genericResample + adb shell test-mixer $1 -s 48000 \ + -o /sdcard/tm48000gr.wav \ + sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 + adb pull /sdcard/tm48000gr.wav $2 + +# Test: +# process__genericResample +# track__Resample / track__genericResample +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -c 5 -s 9307 \ + -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \ + sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307 + adb pull /sdcard/tm9307gra.wav $2 + adb pull /sdcard/aux9307gra.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000gnr.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnr.wav $2 + +# Test: +# process__genericNoResampling +# track__NoResample / track__16BitsStereo / track__16BitsMono +# Aux buffer + adb shell test-mixer $1 -s 32000 \ + -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \ + sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000 + adb pull /sdcard/tm32000gnra.wav $2 + adb pull /sdcard/aux32000gnra.wav $2 + +# Test: +# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling +# Downmixer + adb shell test-mixer $1 -s 32000 \ + -o /sdcard/tm32000nrot.wav \ + sine:6,1000,32000 + adb pull /sdcard/tm32000nrot.wav $2 + +# Test: +# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling +# Aux buffer + adb shell test-mixer $1 -s 44100 \ + -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \ + sine:2,2000,44100 + adb pull /sdcard/tm44100nrota.wav $2 + adb pull /sdcard/aux44100nrota.wav $2 +} + +# +# Call createwav to generate WAV files in various combinations +# +# i_i = integer input track, integer mixer output +# f_f = float input track, float mixer output +# i_f = integer input track, float_mixer output +# +# If the mixer output is float, then the output WAV file is pcm float. +# +# TODO: create a "snr" like "diff" to automatically +# compare files in these directories together. +# + +createwav "" "tests/mixer_i_i" +createwav "-f -m" "tests/mixer_f_f" +createwav "-m" "tests/mixer_i_f" + +popd diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp new file mode 100644 index 0000000..d6217ba --- /dev/null +++ b/services/audioflinger/tests/resampler_tests.cpp @@ -0,0 +1,411 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "audioflinger_resampler_tests" + +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <fcntl.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <errno.h> +#include <time.h> +#include <math.h> +#include <vector> +#include <utility> +#include <iostream> +#include <cutils/log.h> +#include <gtest/gtest.h> +#include <media/AudioBufferProvider.h> +#include "AudioResampler.h" +#include "test_utils.h" + +void resample(int channels, void *output, + size_t outputFrames, const std::vector<size_t> &outputIncr, + android::AudioBufferProvider *provider, android::AudioResampler *resampler) +{ + for (size_t i = 0, j = 0; i < outputFrames; ) { + size_t thisFrames = outputIncr[j++]; + if (j >= outputIncr.size()) { + j = 0; + } + if (thisFrames == 0 || thisFrames > outputFrames - i) { + thisFrames = outputFrames - i; + } + resampler->resample((int32_t*) output + channels*i, thisFrames, provider); + i += thisFrames; + } +} + +void buffercmp(const void *reference, const void *test, + size_t outputFrameSize, size_t outputFrames) +{ + for (size_t i = 0; i < outputFrames; ++i) { + int check = memcmp((const char*)reference + i * outputFrameSize, + (const char*)test + i * outputFrameSize, outputFrameSize); + if (check) { + ALOGE("Failure at frame %zu", i); + ASSERT_EQ(check, 0); /* fails */ + } + } +} + +void testBufferIncrement(size_t channels, bool useFloat, + unsigned inputFreq, unsigned outputFreq, + enum android::AudioResampler::src_quality quality) +{ + const audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + // create the provider + std::vector<int> inputIncr; + SignalProvider provider; + if (useFloat) { + provider.setChirp<float>(channels, + 0., outputFreq/2., outputFreq, outputFreq/2000.); + } else { + provider.setChirp<int16_t>(channels, + 0., outputFreq/2., outputFreq, outputFreq/2000.); + } + provider.setIncr(inputIncr); + + // calculate the output size + size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; + size_t outputFrameSize = channels * (useFloat ? sizeof(float) : sizeof(int32_t)); + size_t outputSize = outputFrameSize * outputFrames; + outputSize &= ~7; + + // create the resampler + android::AudioResampler* resampler; + + resampler = android::AudioResampler::create(format, channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT, + android::AudioResampler::UNITY_GAIN_FLOAT); + + // set up the reference run + std::vector<size_t> refIncr; + refIncr.push_back(outputFrames); + void* reference = malloc(outputSize); + resample(channels, reference, outputFrames, refIncr, &provider, resampler); + + provider.reset(); + +#if 0 + /* this test will fail - API interface issue: reset() does not clear internal buffers */ + resampler->reset(); +#else + delete resampler; + resampler = android::AudioResampler::create(format, channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT, + android::AudioResampler::UNITY_GAIN_FLOAT); +#endif + + // set up the test run + std::vector<size_t> outIncr; + outIncr.push_back(1); + outIncr.push_back(2); + outIncr.push_back(3); + void* test = malloc(outputSize); + inputIncr.push_back(1); + inputIncr.push_back(3); + provider.setIncr(inputIncr); + resample(channels, test, outputFrames, outIncr, &provider, resampler); + + // check + buffercmp(reference, test, outputFrameSize, outputFrames); + + free(reference); + free(test); + delete resampler; +} + +template <typename T> +inline double sqr(T v) +{ + double dv = static_cast<double>(v); + return dv * dv; +} + +template <typename T> +double signalEnergy(T *start, T *end, unsigned stride) +{ + double accum = 0; + + for (T *p = start; p < end; p += stride) { + accum += sqr(*p); + } + unsigned count = (end - start + stride - 1) / stride; + return accum / count; +} + +// TI = resampler input type, int16_t or float +// TO = resampler output type, int32_t or float +template <typename TI, typename TO> +void testStopbandDownconversion(size_t channels, + unsigned inputFreq, unsigned outputFreq, + unsigned passband, unsigned stopband, + enum android::AudioResampler::src_quality quality) +{ + // create the provider + std::vector<int> inputIncr; + SignalProvider provider; + provider.setChirp<TI>(channels, + 0., inputFreq/2., inputFreq, inputFreq/2000.); + provider.setIncr(inputIncr); + + // calculate the output size + size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq; + size_t outputFrameSize = channels * sizeof(TO); + size_t outputSize = outputFrameSize * outputFrames; + outputSize &= ~7; + + // create the resampler + android::AudioResampler* resampler; + + resampler = android::AudioResampler::create( + is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT, + channels, outputFreq, quality); + resampler->setSampleRate(inputFreq); + resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT, + android::AudioResampler::UNITY_GAIN_FLOAT); + + // set up the reference run + std::vector<size_t> refIncr; + refIncr.push_back(outputFrames); + void* reference = malloc(outputSize); + resample(channels, reference, outputFrames, refIncr, &provider, resampler); + + TO *out = reinterpret_cast<TO *>(reference); + + // check signal energy in passband + const unsigned passbandFrame = passband * outputFreq / 1000.; + const unsigned stopbandFrame = stopband * outputFreq / 1000.; + + // check each channel separately + for (size_t i = 0; i < channels; ++i) { + double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels); + double stopbandEnergy = signalEnergy(out + stopbandFrame * channels, + out + outputFrames * channels, channels); + double dbAtten = -10. * log10(stopbandEnergy / passbandEnergy); + ASSERT_GT(dbAtten, 60.); + +#if 0 + // internal verification + printf("if:%d of:%d pbf:%d sbf:%d sbe: %f pbe: %f db: %.2f\n", + provider.getNumFrames(), outputFrames, + passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten); + for (size_t i = 0; i < 10; ++i) { + std::cout << out[i+passbandFrame*channels] << std::endl; + } + for (size_t i = 0; i < 10; ++i) { + std::cout << out[i+stopbandFrame*channels] << std::endl; + } +#endif + } + + free(reference); + delete resampler; +} + +/* Buffer increment test + * + * We compare a reference output, where we consume and process the entire + * buffer at a time, and a test output, where we provide small chunks of input + * data and process small chunks of output (which may not be equivalent in size). + * + * Two subtests - fixed phase (3:2 down) and interpolated phase (147:320 up) + */ +TEST(audioflinger_resampler, bufferincrement_fixedphase) { + // all of these work + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::LOW_QUALITY, + android::AudioResampler::MED_QUALITY, + android::AudioResampler::HIGH_QUALITY, + android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(2, false, 48000, 32000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_interpolatedphase) { + // all of these work except low quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { +// android::AudioResampler::LOW_QUALITY, + android::AudioResampler::MED_QUALITY, + android::AudioResampler::HIGH_QUALITY, + android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(2, false, 22050, 48000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_fixedphase_multi) { + // only dynamic quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(4, false, 48000, 32000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, bufferincrement_interpolatedphase_multi_float) { + // only dynamic quality + static const enum android::AudioResampler::src_quality kQualityArray[] = { + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testBufferIncrement(8, true, 22050, 48000, kQualityArray[i]); + } +} + +/* Simple aliasing test + * + * This checks stopband response of the chirp signal to make sure frequencies + * are properly suppressed. It uses downsampling because the stopband can be + * clearly isolated by input frequencies exceeding the output sample rate (nyquist). + */ +TEST(audioflinger_resampler, stopbandresponse_integer) { + // not all of these may work (old resamplers fail on downsampling) + static const enum android::AudioResampler::src_quality kQualityArray[] = { + //android::AudioResampler::LOW_QUALITY, + //android::AudioResampler::MED_QUALITY, + //android::AudioResampler::HIGH_QUALITY, + //android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + // in this test we assume a maximum transition band between 12kHz and 20kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<int16_t, int32_t>( + 2, 48000, 32000, 12000, 20000, kQualityArray[i]); + } + + // in this test we assume a maximum transition band between 7kHz and 15kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + // (the weird ratio triggers interpolative resampling) + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<int16_t, int32_t>( + 2, 48000, 22101, 7000, 15000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) { + // not all of these may work (old resamplers fail on downsampling) + static const enum android::AudioResampler::src_quality kQualityArray[] = { + //android::AudioResampler::LOW_QUALITY, + //android::AudioResampler::MED_QUALITY, + //android::AudioResampler::HIGH_QUALITY, + //android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + // in this test we assume a maximum transition band between 12kHz and 20kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<int16_t, int32_t>( + 8, 48000, 32000, 12000, 20000, kQualityArray[i]); + } + + // in this test we assume a maximum transition band between 7kHz and 15kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + // (the weird ratio triggers interpolative resampling) + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<int16_t, int32_t>( + 8, 48000, 22101, 7000, 15000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, stopbandresponse_float) { + // not all of these may work (old resamplers fail on downsampling) + static const enum android::AudioResampler::src_quality kQualityArray[] = { + //android::AudioResampler::LOW_QUALITY, + //android::AudioResampler::MED_QUALITY, + //android::AudioResampler::HIGH_QUALITY, + //android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + // in this test we assume a maximum transition band between 12kHz and 20kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<float, float>( + 2, 48000, 32000, 12000, 20000, kQualityArray[i]); + } + + // in this test we assume a maximum transition band between 7kHz and 15kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + // (the weird ratio triggers interpolative resampling) + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<float, float>( + 2, 48000, 22101, 7000, 15000, kQualityArray[i]); + } +} + +TEST(audioflinger_resampler, stopbandresponse_float_multichannel) { + // not all of these may work (old resamplers fail on downsampling) + static const enum android::AudioResampler::src_quality kQualityArray[] = { + //android::AudioResampler::LOW_QUALITY, + //android::AudioResampler::MED_QUALITY, + //android::AudioResampler::HIGH_QUALITY, + //android::AudioResampler::VERY_HIGH_QUALITY, + android::AudioResampler::DYN_LOW_QUALITY, + android::AudioResampler::DYN_MED_QUALITY, + android::AudioResampler::DYN_HIGH_QUALITY, + }; + + // in this test we assume a maximum transition band between 12kHz and 20kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<float, float>( + 8, 48000, 32000, 12000, 20000, kQualityArray[i]); + } + + // in this test we assume a maximum transition band between 7kHz and 15kHz. + // there must be at least 60dB relative attenuation between stopband and passband. + // (the weird ratio triggers interpolative resampling) + for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) { + testStopbandDownconversion<float, float>( + 8, 48000, 22101, 7000, 15000, kQualityArray[i]); + } +} + diff --git a/services/audioflinger/tests/run_all_unit_tests.sh b/services/audioflinger/tests/run_all_unit_tests.sh new file mode 100755 index 0000000..ffae6ae --- /dev/null +++ b/services/audioflinger/tests/run_all_unit_tests.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [ -z "$ANDROID_BUILD_TOP" ]; then + echo "Android build environment not set" + exit -1 +fi + +echo "waiting for device" +adb root && adb wait-for-device remount + +adb shell /system/bin/resampler_tests diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp new file mode 100644 index 0000000..9a4fad6 --- /dev/null +++ b/services/audioflinger/tests/test-mixer.cpp @@ -0,0 +1,306 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <math.h> +#include <vector> +#include <audio_utils/primitives.h> +#include <audio_utils/sndfile.h> +#include <media/AudioBufferProvider.h> +#include "AudioMixer.h" +#include "test_utils.h" + +/* Testing is typically through creation of an output WAV file from several + * source inputs, to be later analyzed by an audio program such as Audacity. + * + * Sine or chirp functions are typically more useful as input to the mixer + * as they show up as straight lines on a spectrogram if successfully mixed. + * + * A sample shell script is provided: mixer_to_wave_tests.sh + */ + +using namespace android; + +static void usage(const char* name) { + fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]" + " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]" + " (<input-file> | <command>)+\n", name); + fprintf(stderr, " -f enable floating point input track\n"); + fprintf(stderr, " -m enable floating point mixer output\n"); + fprintf(stderr, " -c number of mixer output channels\n"); + fprintf(stderr, " -s mixer sample-rate\n"); + fprintf(stderr, " -o <output-file> WAV file, pcm16 (or float if -m specified)\n"); + fprintf(stderr, " -a <aux-buffer-file>\n"); + fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n"); + fprintf(stderr, " <input-file> is a WAV file\n"); + fprintf(stderr, " <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n"); + fprintf(stderr, " 'chirp:<channels>,<samplerate>'\n"); +} + +static int writeFile(const char *filename, const void *buffer, + uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) { + if (filename == NULL) { + return 0; // ok to pass in NULL filename + } + // write output to file. + SF_INFO info; + info.frames = 0; + info.samplerate = sampleRate; + info.channels = channels; + info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16); + printf("saving file:%s channels:%u samplerate:%u frames:%zu\n", + filename, info.channels, info.samplerate, frames); + SNDFILE *sf = sf_open(filename, SFM_WRITE, &info); + if (sf == NULL) { + perror(filename); + return EXIT_FAILURE; + } + if (isBufferFloat) { + (void) sf_writef_float(sf, (float*)buffer, frames); + } else { + (void) sf_writef_short(sf, (short*)buffer, frames); + } + sf_close(sf); + return EXIT_SUCCESS; +} + +int main(int argc, char* argv[]) { + const char* const progname = argv[0]; + bool useInputFloat = false; + bool useMixerFloat = false; + bool useRamp = true; + uint32_t outputSampleRate = 48000; + uint32_t outputChannels = 2; // stereo for now + std::vector<int> Pvalues; + const char* outputFilename = NULL; + const char* auxFilename = NULL; + std::vector<int32_t> Names; + std::vector<SignalProvider> Providers; + + for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) { + switch (ch) { + case 'f': + useInputFloat = true; + break; + case 'm': + useMixerFloat = true; + break; + case 'c': + outputChannels = atoi(optarg); + break; + case 's': + outputSampleRate = atoi(optarg); + break; + case 'o': + outputFilename = optarg; + break; + case 'a': + auxFilename = optarg; + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return EXIT_FAILURE; + } + break; + case '?': + default: + usage(progname); + return EXIT_FAILURE; + } + } + argc -= optind; + argv += optind; + + if (argc == 0) { + usage(progname); + return EXIT_FAILURE; + } + if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) { + fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS); + return EXIT_FAILURE; + } + + size_t outputFrames = 0; + + // create providers for each track + Providers.resize(argc); + for (int i = 0; i < argc; ++i) { + static const char chirp[] = "chirp:"; + static const char sine[] = "sine:"; + static const double kSeconds = 1; + + if (!strncmp(argv[i], chirp, strlen(chirp))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(chirp), v); + if (v.size() == 2) { + printf("creating chirp(%d %d)\n", v[0], v[1]); + if (useInputFloat) { + Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds); + } else { + Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else if (!strncmp(argv[i], sine, strlen(sine))) { + std::vector<int> v; + + parseCSV(argv[i] + strlen(sine), v); + if (v.size() == 3) { + printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]); + if (useInputFloat) { + Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds); + } else { + Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds); + } + Providers[i].setIncr(Pvalues); + } else { + fprintf(stderr, "malformed input '%s'\n", argv[i]); + } + } else { + printf("creating filename(%s)\n", argv[i]); + if (useInputFloat) { + Providers[i].setFile<float>(argv[i]); + } else { + Providers[i].setFile<short>(argv[i]); + } + Providers[i].setIncr(Pvalues); + } + // calculate the number of output frames + size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate + / Providers[i].getSampleRate(); + if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames + outputFrames = nframes; + } + } + + // create the output buffer. + const size_t outputFrameSize = outputChannels + * (useMixerFloat ? sizeof(float) : sizeof(int16_t)); + const size_t outputSize = outputFrames * outputFrameSize; + const audio_channel_mask_t outputChannelMask = + audio_channel_out_mask_from_count(outputChannels); + void *outputAddr = NULL; + (void) posix_memalign(&outputAddr, 32, outputSize); + memset(outputAddr, 0, outputSize); + + // create the aux buffer, if needed. + const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always + const size_t auxSize = outputFrames * auxFrameSize; + void *auxAddr = NULL; + if (auxFilename) { + (void) posix_memalign(&auxAddr, 32, auxSize); + memset(auxAddr, 0, auxSize); + } + + // create the mixer. + const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960 + AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate); + audio_format_t inputFormat = useInputFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + audio_format_t mixerFormat = useMixerFloat + ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT; + float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks + static float f0; // zero + + // set up the tracks. + for (size_t i = 0; i < Providers.size(); ++i) { + //printf("track %d out of %d\n", i, Providers.size()); + uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels()); + int32_t name = mixer->getTrackName(channelMask, + inputFormat, AUDIO_SESSION_OUTPUT_MIX); + ALOG_ASSERT(name >= 0); + Names.push_back(name); + mixer->setBufferProvider(name, &Providers[i]); + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (void *)outputAddr); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, + (void *)(uintptr_t)mixerFormat); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::FORMAT, + (void *)(uintptr_t)inputFormat); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_CHANNEL_MASK, + (void *)(uintptr_t)outputChannelMask); + mixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::CHANNEL_MASK, + (void *)(uintptr_t)channelMask); + mixer->setParameter( + name, + AudioMixer::RESAMPLE, + AudioMixer::SAMPLE_RATE, + (void *)(uintptr_t)Providers[i].getSampleRate()); + if (useRamp) { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f); + } else { + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f); + } + if (auxFilename) { + mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (void *) auxAddr); + mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0); + mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f); + } + mixer->enable(name); + } + + // pump the mixer to process data. + size_t i; + for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) { + for (size_t j = 0; j < Names.size(); ++j) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, + (char *) outputAddr + i * outputFrameSize); + if (auxFilename) { + mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER, + (char *) auxAddr + i * auxFrameSize); + } + } + mixer->process(AudioBufferProvider::kInvalidPTS); + } + outputFrames = i; // reset output frames to the data actually produced. + + // write to files + writeFile(outputFilename, outputAddr, + outputSampleRate, outputChannels, outputFrames, useMixerFloat); + if (auxFilename) { + // Aux buffer is always in q4_27 format for now. + // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count) + ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1); + writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false); + } + + delete mixer; + free(outputAddr); + free(auxAddr); + return EXIT_SUCCESS; +} diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h new file mode 100644 index 0000000..3d51cdc --- /dev/null +++ b/services/audioflinger/tests/test_utils.h @@ -0,0 +1,307 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_TEST_UTILS_H +#define ANDROID_AUDIO_TEST_UTILS_H + +#include <audio_utils/sndfile.h> + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +template<typename T> +static inline T convertValue(double val) +{ + if (is_same<T, int16_t>::value) { + return floor(val * 32767.0 + 0.5); + } else if (is_same<T, int32_t>::value) { + return floor(val * (1UL<<31) + 0.5); + } + return val; // assume float or double +} + +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +static inline int parseCSV(const char *string, std::vector<int>& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values[0] = atoi(p = string); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values[i++] = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} + +/* Creates a type-independent audio buffer provider from + * a buffer base address, size, framesize, and input increment array. + * + * No allocation or deallocation of the provided buffer is done. + */ +class TestProvider : public android::AudioBufferProvider { +public: + TestProvider(void* addr, size_t frames, size_t frameSize, + const std::vector<int>& inputIncr) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0) + { + } + + TestProvider() + : mAddr(NULL), mNumFrames(0), mFrameSize(0), + mNextFrame(0), mUnrel(0), mNextIdx(0) + { + } + + void setIncr(const std::vector<int>& inputIncr) { + mInputIncr = inputIncr; + mNextIdx = 0; + } + + virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS) + { + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mInputIncr.empty()) { + size_t provided = mInputIncr[mNextIdx++]; + ALOGV("getNextBuffer() mValue[%zu]=%zu not %zu", + mNextIdx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextIdx >= mInputIncr.size()) { + mNextIdx = 0; + } + } + ALOGV("getNextBuffer() requested %zu frames out of %zu frames available" + " and returned %zu frames", + requestedFrames, mNumFrames - mNextFrame, buffer->frameCount); + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return android::NO_ERROR; + } else { + buffer->raw = NULL; + return android::NOT_ENOUGH_DATA; + } + } + + virtual void releaseBuffer(Buffer* buffer) + { + if (buffer->frameCount > mUnrel) { + ALOGE("releaseBuffer() released %zu frames but only %zu available " + "to release", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + + ALOGV("releaseBuffer() released %zu frames out of %zu frames available " + "to release", buffer->frameCount, mUnrel); + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; + } + + void reset() + { + mNextFrame = 0; + } + + size_t getNumFrames() + { + return mNumFrames; + } + + +protected: + void* mAddr; // base address + size_t mNumFrames; // total frames + int mFrameSize; // frame size (# channels * bytes per sample) + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + std::vector<int> mInputIncr; // number of frames provided per call + size_t mNextIdx; // index of next entry in mInputIncr to use +}; + +/* Creates a buffer filled with a sine wave. + */ +template<typename T> +static void createSine(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double freq) +{ + double tscale = 1. / sampleRate; + T* buffer = reinterpret_cast<T*>(vbuffer); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * freq * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / T(j + 1); + } + } +} + +/* Creates a buffer filled with a chirp signal (a sine wave sweep). + * + * When creating the Chirp, note that the frequency is the true sinusoidal + * frequency not the sampling rate. + * + * http://en.wikipedia.org/wiki/Chirp + */ +template<typename T> +static void createChirp(void *vbuffer, size_t frames, + size_t channels, double sampleRate, double minfreq, double maxfreq) +{ + double tscale = 1. / sampleRate; + T *buffer = reinterpret_cast<T*>(vbuffer); + // note the chirp constant k has a divide-by-two. + double k = (maxfreq - minfreq) / (2. * tscale * frames); + for (size_t i = 0; i < frames; ++i) { + double t = i * tscale; + double y = sin(2. * M_PI * (k * t + minfreq) * t); + T yt = convertValue<T>(y); + + for (size_t j = 0; j < channels; ++j) { + buffer[i*channels + j] = yt / T(j + 1); + } + } +} + +/* This derived class creates a buffer provider of datatype T, + * consisting of an input signal, e.g. from createChirp(). + * The number of frames can be obtained from the base class + * TestProvider::getNumFrames(). + */ + +class SignalProvider : public TestProvider { +public: + SignalProvider() + : mSampleRate(0), + mChannels(0) + { + } + + virtual ~SignalProvider() + { + free(mAddr); + mAddr = NULL; + } + + template <typename T> + void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createChirp<T>(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq); + } + + template <typename T> + void setSine(size_t channels, + double freq, double sampleRate, double time) + { + createBufferByFrames<T>(channels, sampleRate, sampleRate*time); + createSine<T>(mAddr, mNumFrames, mChannels, mSampleRate, freq); + } + + template <typename T> + void setFile(const char *file_in) + { + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return; + } + createBufferByFrames<T>(info.channels, info.samplerate, info.frames); + if (is_same<T, float>::value) { + (void) sf_readf_float(sf, (float *) mAddr, mNumFrames); + } else if (is_same<T, short>::value) { + (void) sf_readf_short(sf, (short *) mAddr, mNumFrames); + } + sf_close(sf); + } + + template <typename T> + void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) + { + mNumFrames = frames; + mChannels = channels; + mFrameSize = mChannels * sizeof(T); + free(mAddr); + mAddr = malloc(mFrameSize * mNumFrames); + mSampleRate = sampleRate; + } + + uint32_t getSampleRate() const { + return mSampleRate; + } + + uint32_t getNumChannels() const { + return mChannels; + } + +protected: + uint32_t mSampleRate; + uint32_t mChannels; +}; + +#endif // ANDROID_AUDIO_TEST_UTILS_H |