diff options
Diffstat (limited to 'services')
81 files changed, 13479 insertions, 2483 deletions
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk index b895027..27e38a3 100644 --- a/services/audioflinger/Android.mk +++ b/services/audioflinger/Android.mk @@ -13,18 +13,27 @@ include $(BUILD_STATIC_LIBRARY) include $(CLEAR_VARS) +LOCAL_SRC_FILES := \ + ServiceUtilities.cpp + +# FIXME Move this library to frameworks/native +LOCAL_MODULE := libserviceutility + +include $(BUILD_STATIC_LIBRARY) + +include $(CLEAR_VARS) + LOCAL_SRC_FILES:= \ AudioFlinger.cpp \ Threads.cpp \ Tracks.cpp \ Effects.cpp \ AudioMixer.cpp.arm \ - AudioPolicyService.cpp \ - ServiceUtilities.cpp \ LOCAL_SRC_FILES += StateQueue.cpp LOCAL_C_INCLUDES := \ + $(TOPDIR)frameworks/av/services/audiopolicy \ $(call include-path-for, audio-effects) \ $(call include-path-for, audio-utils) @@ -46,12 +55,13 @@ LOCAL_SHARED_LIBRARIES := \ LOCAL_STATIC_LIBRARIES := \ libscheduling_policy \ libcpustats \ - libmedia_helper + libmedia_helper \ + libserviceutility LOCAL_MODULE:= libaudioflinger LOCAL_32_BIT_ONLY := true -LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp +LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp FastThreadState.cpp LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"' @@ -72,10 +82,21 @@ include $(BUILD_SHARED_LIBRARY) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ - test-resample.cpp \ + test-resample.cpp \ + +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) + +LOCAL_STATIC_LIBRARIES := \ + libsndfile LOCAL_SHARED_LIBRARIES := \ libaudioresampler \ + libaudioutils \ + libdl \ + libcutils \ + libutils \ + liblog LOCAL_MODULE:= test-resample @@ -88,7 +109,8 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ AudioResampler.cpp.arm \ AudioResamplerCubic.cpp.arm \ - AudioResamplerSinc.cpp.arm + AudioResamplerSinc.cpp.arm \ + AudioResamplerDyn.cpp.arm LOCAL_SHARED_LIBRARIES := \ libcutils \ diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp index c0c34f7..755d480 100644 --- a/services/audioflinger/AudioFlinger.cpp +++ b/services/audioflinger/AudioFlinger.cpp @@ -104,6 +104,27 @@ static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200); // ---------------------------------------------------------------------------- +const char *formatToString(audio_format_t format) { + switch(format) { + case AUDIO_FORMAT_PCM_SUB_8_BIT: return "pcm8"; + case AUDIO_FORMAT_PCM_SUB_16_BIT: return "pcm16"; + case AUDIO_FORMAT_PCM_SUB_32_BIT: return "pcm32"; + case AUDIO_FORMAT_PCM_SUB_8_24_BIT: return "pcm8.24"; + case AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED: return "pcm24"; + case AUDIO_FORMAT_PCM_SUB_FLOAT: return "pcmfloat"; + case AUDIO_FORMAT_MP3: return "mp3"; + case AUDIO_FORMAT_AMR_NB: return "amr-nb"; + case AUDIO_FORMAT_AMR_WB: return "amr-wb"; + case AUDIO_FORMAT_AAC: return "aac"; + case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1"; + case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2"; + case AUDIO_FORMAT_VORBIS: return "vorbis"; + default: + break; + } + return "unknown"; +} + static int load_audio_interface(const char *if_name, audio_hw_device_t **dev) { const hw_module_t *mod; @@ -138,6 +159,7 @@ out: AudioFlinger::AudioFlinger() : BnAudioFlinger(), mPrimaryHardwareDev(NULL), + mAudioHwDevs(NULL), mHardwareStatus(AUDIO_HW_IDLE), mMasterVolume(1.0f), mMasterMute(false), @@ -152,7 +174,7 @@ AudioFlinger::AudioFlinger() char value[PROPERTY_VALUE_MAX]; bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1); if (doLog) { - mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters"); + mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY); } #ifdef TEE_SINK (void) property_get("ro.debuggable", value, "0"); @@ -162,12 +184,16 @@ AudioFlinger::AudioFlinger() (void) property_get("af.tee", value, "0"); teeEnabled = atoi(value); } - if (teeEnabled & 1) + // FIXME symbolic constants here + if (teeEnabled & 1) { mTeeSinkInputEnabled = true; - if (teeEnabled & 2) + } + if (teeEnabled & 2) { mTeeSinkOutputEnabled = true; - if (teeEnabled & 4) + } + if (teeEnabled & 4) { mTeeSinkTrackEnabled = true; + } #endif } @@ -210,6 +236,18 @@ AudioFlinger::~AudioFlinger() audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice()); delete mAudioHwDevs.valueAt(i); } + + // Tell media.log service about any old writers that still need to be unregistered + sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); + if (binder != 0) { + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory()); + mUnregisteredWriters.pop(); + mediaLogService->unregisterWriter(iMemory); + } + } + } static const char * const audio_interfaces[] = { @@ -249,7 +287,7 @@ AudioFlinger::AudioHwDevice* AudioFlinger::findSuitableHwDev_l( return NULL; } -void AudioFlinger::dumpClients(int fd, const Vector<String16>& args) +void AudioFlinger::dumpClients(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -271,17 +309,17 @@ void AudioFlinger::dumpClients(int fd, const Vector<String16>& args) } result.append("Global session refs:\n"); - result.append(" session pid count\n"); + result.append(" session pid count\n"); for (size_t i = 0; i < mAudioSessionRefs.size(); i++) { AudioSessionRef *r = mAudioSessionRefs[i]; - snprintf(buffer, SIZE, " %7d %3d %3d\n", r->mSessionid, r->mPid, r->mCnt); + snprintf(buffer, SIZE, " %7d %5d %5d\n", r->mSessionid, r->mPid, r->mCnt); result.append(buffer); } write(fd, result.string(), result.size()); } -void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args) +void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -296,7 +334,7 @@ void AudioFlinger::dumpInternals(int fd, const Vector<String16>& args) write(fd, result.string(), result.size()); } -void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args) +void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -403,16 +441,44 @@ sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid) sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name) { + // If there is no memory allocated for logs, return a dummy writer that does nothing if (mLogMemoryDealer == 0) { return new NBLog::Writer(); } - sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); - sp<NBLog::Writer> writer = new NBLog::Writer(size, shared); sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->registerWriter(shared, size, name); + // Similarly if we can't contact the media.log service, also return a dummy writer + if (binder == 0) { + return new NBLog::Writer(); + } + sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder)); + sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + // If allocation fails, consult the vector of previously unregistered writers + // and garbage-collect one or more them until an allocation succeeds + if (shared == 0) { + Mutex::Autolock _l(mUnregisteredWritersLock); + for (size_t count = mUnregisteredWriters.size(); count > 0; count--) { + { + // Pick the oldest stale writer to garbage-collect + sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory()); + mUnregisteredWriters.removeAt(0); + mediaLogService->unregisterWriter(iMemory); + // Now the media.log remote reference to IMemory is gone. When our last local + // reference to IMemory also drops to zero at end of this block, + // the IMemory destructor will deallocate the region from mLogMemoryDealer. + } + // Re-attempt the allocation + shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size)); + if (shared != 0) { + goto success; + } + } + // Even after garbage-collecting all old writers, there is still not enough memory, + // so return a dummy writer + return new NBLog::Writer(); } - return writer; +success: + mediaLogService->registerWriter(shared, size, name); + return new NBLog::Writer(size, shared); } void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) @@ -424,13 +490,10 @@ void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer) if (iMemory == 0) { return; } - sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log")); - if (binder != 0) { - interface_cast<IMediaLogService>(binder)->unregisterWriter(iMemory); - // Now the media.log remote reference to IMemory is gone. - // When our last local reference to IMemory also drops to zero, - // the IMemory destructor will deallocate the region from mMemoryDealer. - } + // Rather than removing the writer immediately, append it to a queue of old writers to + // be garbage-collected later. This allows us to continue to view old logs for a while. + Mutex::Autolock _l(mUnregisteredWritersLock); + mUnregisteredWriters.push(writer); } // IAudioFlinger interface @@ -441,13 +504,12 @@ sp<IAudioTrack> AudioFlinger::createTrack( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, int *sessionId, - String8& name, int clientUid, status_t *status) { @@ -465,10 +527,31 @@ sp<IAudioTrack> AudioFlinger::createTrack( goto Exit; } + // further sample rate checks are performed by createTrack_l() depending on the thread type + if (sampleRate == 0) { + ALOGE("createTrack() invalid sample rate %u", sampleRate); + lStatus = BAD_VALUE; + goto Exit; + } + + // further channel mask checks are performed by createTrack_l() depending on the thread type + if (!audio_is_output_channel(channelMask)) { + ALOGE("createTrack() invalid channel mask %#x", channelMask); + lStatus = BAD_VALUE; + goto Exit; + } + // client is responsible for conversion of 8-bit PCM to 16-bit PCM, // and we don't yet support 8.24 or 32-bit PCM - if (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("createTrack() invalid format %d", format); + if (!audio_is_valid_format(format) || + (audio_is_linear_pcm(format) && format != AUDIO_FORMAT_PCM_16_BIT)) { + ALOGE("createTrack() invalid format %#x", format); + lStatus = BAD_VALUE; + goto Exit; + } + + if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) { + ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()"); lStatus = BAD_VALUE; goto Exit; } @@ -476,7 +559,6 @@ sp<IAudioTrack> AudioFlinger::createTrack( { Mutex::Autolock _l(mLock); PlaybackThread *thread = checkPlaybackThread_l(output); - PlaybackThread *effectThread = NULL; if (thread == NULL) { ALOGE("no playback thread found for output handle %d", output); lStatus = BAD_VALUE; @@ -484,24 +566,23 @@ sp<IAudioTrack> AudioFlinger::createTrack( } pid_t pid = IPCThreadState::self()->getCallingPid(); - client = registerPid_l(pid); - ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId); - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + PlaybackThread *effectThread = NULL; + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { + lSessionId = *sessionId; // check if an effect chain with the same session ID is present on another // output thread and move it here. for (size_t i = 0; i < mPlaybackThreads.size(); i++) { sp<PlaybackThread> t = mPlaybackThreads.valueAt(i); if (mPlaybackThreads.keyAt(i) != output) { - uint32_t sessions = t->hasAudioSession(*sessionId); + uint32_t sessions = t->hasAudioSession(lSessionId); if (sessions & PlaybackThread::EFFECT_SESSION) { effectThread = t.get(); break; } } } - lSessionId = *sessionId; } else { // if no audio session id is provided, create one here lSessionId = nextUniqueId(); @@ -519,6 +600,7 @@ sp<IAudioTrack> AudioFlinger::createTrack( // move effect chain to this output thread if an effect on same session was waiting // for a track to be created if (lStatus == NO_ERROR && effectThread != NULL) { + // no risk of deadlock because AudioFlinger::mLock is held Mutex::Autolock _dl(thread->mLock); Mutex::Autolock _sl(effectThread->mLock); moveEffectChain_l(lSessionId, effectThread, thread, true); @@ -538,23 +620,22 @@ sp<IAudioTrack> AudioFlinger::createTrack( } } } + } - if (lStatus == NO_ERROR) { - // s for server's pid, n for normal mixer name, f for fast index - name = String8::format("s:%d;n:%d;f:%d", getpid_cached, track->name() - AudioMixer::TRACK0, - track->fastIndex()); - trackHandle = new TrackHandle(track); - } else { - // remove local strong reference to Client before deleting the Track so that the Client - // destructor is called by the TrackBase destructor with mLock held + + if (lStatus != NO_ERROR) { + // remove local strong reference to Client before deleting the Track so that the + // Client destructor is called by the TrackBase destructor with mLock held client.clear(); track.clear(); + goto Exit; } + // return handle to client + trackHandle = new TrackHandle(track); + Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return trackHandle; } @@ -796,7 +877,7 @@ status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value, AutoMutex lock(mLock); PlaybackThread *thread = NULL; - if (output) { + if (output != AUDIO_IO_HANDLE_NONE) { thread = checkPlaybackThread_l(output); if (thread == NULL) { return BAD_VALUE; @@ -845,7 +926,7 @@ float AudioFlinger::streamVolume(audio_stream_type_t stream, audio_io_handle_t o AutoMutex lock(mLock); float volume; - if (output) { + if (output != AUDIO_IO_HANDLE_NONE) { PlaybackThread *thread = checkPlaybackThread_l(output); if (thread == NULL) { return 0.0f; @@ -878,8 +959,8 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& return PERMISSION_DENIED; } - // ioHandle == 0 means the parameters are global to the audio hardware interface - if (ioHandle == 0) { + // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface + if (ioHandle == AUDIO_IO_HANDLE_NONE) { Mutex::Autolock _l(mLock); status_t final_result = NO_ERROR; { @@ -961,7 +1042,7 @@ String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& k Mutex::Autolock _l(mLock); - if (ioHandle == 0) { + if (ioHandle == AUDIO_IO_HANDLE_NONE) { String8 out_s8; for (size_t i = 0; i < mAudioHwDevs.size(); i++) { @@ -1212,7 +1293,7 @@ AudioFlinger::NotificationClient::~NotificationClient() { } -void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who) +void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who __unused) { sp<NotificationClient> keep(this); mAudioFlinger->removeNotificationClient(mPid); @@ -1230,7 +1311,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *frameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, @@ -1240,8 +1321,6 @@ sp<IAudioRecord> AudioFlinger::openRecord( sp<RecordHandle> recordHandle; sp<Client> client; status_t lStatus; - RecordThread *thread; - size_t inFrameCount; int lSessionId; // check calling permissions @@ -1251,16 +1330,31 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } - if (format != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("openRecord() invalid format %d", format); + // further sample rate checks are performed by createRecordTrack_l() + if (sampleRate == 0) { + ALOGE("openRecord() invalid sample rate %u", sampleRate); + lStatus = BAD_VALUE; + goto Exit; + } + + // we don't yet support anything other than 16-bit PCM + if (!(audio_is_valid_format(format) && + audio_is_linear_pcm(format) && format == AUDIO_FORMAT_PCM_16_BIT)) { + ALOGE("openRecord() invalid format %#x", format); + lStatus = BAD_VALUE; + goto Exit; + } + + // further channel mask checks are performed by createRecordTrack_l() + if (!audio_is_input_channel(channelMask)) { + ALOGE("openRecord() invalid channel mask %#x", channelMask); lStatus = BAD_VALUE; goto Exit; } - // add client to list - { // scope for mLock + { Mutex::Autolock _l(mLock); - thread = checkRecordThread_l(input); + RecordThread *thread = checkRecordThread_l(input); if (thread == NULL) { ALOGE("openRecord() checkRecordThread_l failed"); lStatus = BAD_VALUE; @@ -1277,17 +1371,17 @@ sp<IAudioRecord> AudioFlinger::openRecord( pid_t pid = IPCThreadState::self()->getCallingPid(); client = registerPid_l(pid); - // If no audio session id is provided, create one here - if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) { + if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) { lSessionId = *sessionId; } else { + // if no audio session id is provided, create one here lSessionId = nextUniqueId(); if (sessionId != NULL) { *sessionId = lSessionId; } } - // create new record track. - // The record track uses one track in mHardwareMixerThread by convention. + ALOGV("openRecord() lSessionId: %d", lSessionId); + // TODO: the uid should be passed in as a parameter to openRecord recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask, frameCount, lSessionId, @@ -1295,6 +1389,7 @@ sp<IAudioRecord> AudioFlinger::openRecord( flags, tid, &lStatus); LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0)); } + if (lStatus != NO_ERROR) { // remove local strong reference to Client before deleting the RecordTrack so that the // Client destructor is called by the TrackBase destructor with mLock held @@ -1303,14 +1398,11 @@ sp<IAudioRecord> AudioFlinger::openRecord( goto Exit; } - // return to handle to client + // return handle to client recordHandle = new RecordHandle(recordTrack); - lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return recordHandle; } @@ -1451,18 +1543,15 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, audio_output_flags_t flags, const audio_offload_info_t *offloadInfo) { - PlaybackThread *thread = NULL; struct audio_config config; + memset(&config, 0, sizeof(config)); config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; - if (offloadInfo) { + if (offloadInfo != NULL) { config.offload_info = *offloadInfo; } - audio_stream_out_t *outStream = NULL; - AudioHwDevice *outHwDev; - ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x", module, (pDevices != NULL) ? *pDevices : 0, @@ -1471,23 +1560,25 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, config.channel_mask, flags); ALOGV("openOutput(), offloadInfo %p version 0x%04x", - offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version ); + offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version); - if (pDevices == NULL || *pDevices == 0) { - return 0; + if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) { + return AUDIO_IO_HANDLE_NONE; } Mutex::Autolock _l(mLock); - outHwDev = findSuitableHwDev_l(module, *pDevices); - if (outHwDev == NULL) - return 0; + AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices); + if (outHwDev == NULL) { + return AUDIO_IO_HANDLE_NONE; + } audio_hw_device_t *hwDevHal = outHwDev->hwDevice(); audio_io_handle_t id = nextUniqueId(); mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; + audio_stream_out_t *outStream = NULL; status_t status = hwDevHal->open_output_stream(hwDevHal, id, *pDevices, @@ -1507,6 +1598,7 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, if (status == NO_ERROR && outStream != NULL) { AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags); + PlaybackThread *thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { thread = new OffloadThread(this, output, id, *pDevices); ALOGV("openOutput() created offload output: ID %d thread %p", id, thread); @@ -1550,7 +1642,7 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module, return id; } - return 0; + return AUDIO_IO_HANDLE_NONE; } audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, @@ -1563,7 +1655,7 @@ audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1, if (thread1 == NULL || thread2 == NULL) { ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2); - return 0; + return AUDIO_IO_HANDLE_NONE; } audio_io_handle_t id = nextUniqueId(); @@ -1674,35 +1766,34 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, audio_format_t *pFormat, audio_channel_mask_t *pChannelMask) { - status_t status; - RecordThread *thread = NULL; struct audio_config config; + memset(&config, 0, sizeof(config)); config.sample_rate = (pSamplingRate != NULL) ? *pSamplingRate : 0; config.channel_mask = (pChannelMask != NULL) ? *pChannelMask : 0; config.format = (pFormat != NULL) ? *pFormat : AUDIO_FORMAT_DEFAULT; uint32_t reqSamplingRate = config.sample_rate; audio_format_t reqFormat = config.format; - audio_channel_mask_t reqChannels = config.channel_mask; - audio_stream_in_t *inStream = NULL; - AudioHwDevice *inHwDev; + audio_channel_mask_t reqChannelMask = config.channel_mask; - if (pDevices == NULL || *pDevices == 0) { + if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) { return 0; } Mutex::Autolock _l(mLock); - inHwDev = findSuitableHwDev_l(module, *pDevices); - if (inHwDev == NULL) + AudioHwDevice *inHwDev = findSuitableHwDev_l(module, *pDevices); + if (inHwDev == NULL) { return 0; + } audio_hw_device_t *inHwHal = inHwDev->hwDevice(); audio_io_handle_t id = nextUniqueId(); - status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, + audio_stream_in_t *inStream = NULL; + status_t status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); - ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, " + ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %#x, Channels %x, " "status %d", inStream, config.sample_rate, @@ -1716,10 +1807,12 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, if (status == BAD_VALUE && reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT && (config.sample_rate <= 2 * reqSamplingRate) && - (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) { + (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannelMask) <= FCC_2)) { + // FIXME describe the change proposed by HAL (save old values so we can log them here) ALOGV("openInput() reopening with proposed sampling rate and channel mask"); inStream = NULL; status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream); + // FIXME log this new status; HAL should not propose any further changes } if (status == NO_ERROR && inStream != NULL) { @@ -1737,13 +1830,13 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, popcount(inStream->common.get_channels(&inStream->common))); if (!mTeeSinkInputEnabled) { kind = TEE_SINK_NO; - } else if (format == Format_Invalid) { + } else if (!Format_isValid(format)) { kind = TEE_SINK_NO; } else if (mRecordTeeSink == 0) { kind = TEE_SINK_NEW; } else if (mRecordTeeSink->getStrongCount() != 1) { kind = TEE_SINK_NO; - } else if (format == mRecordTeeSink->format()) { + } else if (Format_isEqual(format, mRecordTeeSink->format())) { kind = TEE_SINK_OLD; } else { kind = TEE_SINK_NEW; @@ -1778,10 +1871,8 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, // Start record thread // RecordThread requires both input and output device indication to forward to audio // pre processing modules - thread = new RecordThread(this, + RecordThread *thread = new RecordThread(this, input, - reqSamplingRate, - reqChannels, id, primaryOutputDevice_l(), *pDevices @@ -1798,7 +1889,7 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module, *pFormat = config.format; } if (pChannelMask != NULL) { - *pChannelMask = reqChannels; + *pChannelMask = reqChannelMask; } // notify client processes of the new input creation @@ -1843,10 +1934,10 @@ status_t AudioFlinger::closeInput_nonvirtual(audio_io_handle_t input) return NO_ERROR; } -status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) +status_t AudioFlinger::invalidateStream(audio_stream_type_t stream) { Mutex::Autolock _l(mLock); - ALOGV("setStreamOutput() stream %d to output %d", stream, output); + ALOGV("invalidateStream() stream %d", stream); for (size_t i = 0; i < mPlaybackThreads.size(); i++) { PlaybackThread *thread = mPlaybackThreads.valueAt(i).get(); @@ -1862,18 +1953,21 @@ int AudioFlinger::newAudioSessionId() return nextUniqueId(); } -void AudioFlinger::acquireAudioSessionId(int audioSession) +void AudioFlinger::acquireAudioSessionId(int audioSession, pid_t pid) { Mutex::Autolock _l(mLock); pid_t caller = IPCThreadState::self()->getCallingPid(); - ALOGV("acquiring %d from %d", audioSession, caller); + ALOGV("acquiring %d from %d, for %d", audioSession, caller, pid); + if (pid != -1 && (caller == getpid_cached)) { + caller = pid; + } // Ignore requests received from processes not known as notification client. The request // is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be // called from a different pid leaving a stale session reference. Also we don't know how // to clear this reference if the client process dies. if (mNotificationClients.indexOfKey(caller) < 0) { - ALOGV("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession); + ALOGW("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession); return; } @@ -1890,11 +1984,14 @@ void AudioFlinger::acquireAudioSessionId(int audioSession) ALOGV(" added new entry for %d", audioSession); } -void AudioFlinger::releaseAudioSessionId(int audioSession) +void AudioFlinger::releaseAudioSessionId(int audioSession, pid_t pid) { Mutex::Autolock _l(mLock); pid_t caller = IPCThreadState::self()->getCallingPid(); - ALOGV("releasing %d from %d", audioSession, caller); + ALOGV("releasing %d from %d for %d", audioSession, caller, pid); + if (pid != -1 && (caller == getpid_cached)) { + caller = pid; + } size_t num = mAudioSessionRefs.size(); for (size_t i = 0; i< num; i++) { AudioSessionRef *ref = mAudioSessionRefs.itemAt(i); @@ -1956,7 +2053,7 @@ void AudioFlinger::purgeStaleEffects_l() { } } if (!found) { - Mutex::Autolock _l (t->mLock); + Mutex::Autolock _l(t->mLock); // remove all effects from the chain while (ec->mEffects.size()) { sp<EffectModule> effect = ec->mEffects[0]; @@ -1993,7 +2090,7 @@ AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(audio_io_handle_t uint32_t AudioFlinger::nextUniqueId() { - return android_atomic_inc(&mNextUniqueId); + return (uint32_t) android_atomic_inc(&mNextUniqueId); } AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const @@ -2023,7 +2120,7 @@ sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_even int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie) + wp<RefBase> cookie) { Mutex::Autolock _l(mLock); @@ -2185,7 +2282,7 @@ sp<IEffect> AudioFlinger::createEffect( // return effect descriptor *pDesc = desc; - if (io == 0 && sessionId == AUDIO_SESSION_OUTPUT_MIX) { + if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) { // if the output returned by getOutputForEffect() is removed before we lock the // mutex below, the call to checkPlaybackThread_l(io) below will detect it // and we will exit safely @@ -2200,7 +2297,7 @@ sp<IEffect> AudioFlinger::createEffect( // If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX // because of code checking output when entering the function. // Note: io is never 0 when creating an effect on an input - if (io == 0) { + if (io == AUDIO_IO_HANDLE_NONE) { if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) { // output must be specified by AudioPolicyManager when using session // AUDIO_SESSION_OUTPUT_STAGE @@ -2225,7 +2322,7 @@ sp<IEffect> AudioFlinger::createEffect( // If no output thread contains the requested session ID, default to // first output. The effect chain will be moved to the correct output // thread when a track with the same session ID is created - if (io == 0 && mPlaybackThreads.size()) { + if (io == AUDIO_IO_HANDLE_NONE && mPlaybackThreads.size() > 0) { io = mPlaybackThreads.keyAt(0); } ALOGV("createEffect() got io %d for effect %s", io, desc.name); @@ -2251,9 +2348,7 @@ sp<IEffect> AudioFlinger::createEffect( } Exit: - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h index 7320144..ec32edd 100644 --- a/services/audioflinger/AudioFlinger.h +++ b/services/audioflinger/AudioFlinger.h @@ -60,8 +60,8 @@ namespace android { -class audio_track_cblk_t; -class effect_param_cblk_t; +struct audio_track_cblk_t; +struct effect_param_cblk_t; class AudioMixer; class AudioBuffer; class AudioResampler; @@ -102,26 +102,25 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, const sp<IMemory>& sharedBuffer, audio_io_handle_t output, pid_t tid, int *sessionId, - String8& name, int clientUid, - status_t *status); + status_t *status /*non-NULL*/); virtual sp<IAudioRecord> openRecord( audio_io_handle_t input, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, IAudioFlinger::track_flags_t *flags, pid_t tid, int *sessionId, - status_t *status); + status_t *status /*non-NULL*/); virtual uint32_t sampleRate(audio_io_handle_t output) const; virtual int channelCount(audio_io_handle_t output) const; @@ -182,7 +181,7 @@ public: virtual status_t closeInput(audio_io_handle_t input); - virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output); + virtual status_t invalidateStream(audio_stream_type_t stream); virtual status_t setVoiceVolume(float volume); @@ -193,9 +192,9 @@ public: virtual int newAudioSessionId(); - virtual void acquireAudioSessionId(int audioSession); + virtual void acquireAudioSessionId(int audioSession, pid_t pid); - virtual void releaseAudioSessionId(int audioSession); + virtual void releaseAudioSessionId(int audioSession, pid_t pid); virtual status_t queryNumberEffects(uint32_t *numEffects) const; @@ -210,7 +209,7 @@ public: int32_t priority, audio_io_handle_t io, int sessionId, - status_t *status, + status_t *status /*non-NULL*/, int *id, int *enabled); @@ -235,8 +234,12 @@ public: sp<NBLog::Writer> newWriter_l(size_t size, const char *name); void unregisterWriter(const sp<NBLog::Writer>& writer); private: - static const size_t kLogMemorySize = 10 * 1024; + static const size_t kLogMemorySize = 40 * 1024; sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled + // When a log writer is unregistered, it is done lazily so that media.log can continue to see it + // for as long as possible. The memory is only freed when it is needed for another log writer. + Vector< sp<NBLog::Writer> > mUnregisteredWriters; + Mutex mUnregisteredWritersLock; public: class SyncEvent; @@ -249,7 +252,7 @@ public: int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie) + wp<RefBase> cookie) : mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession), mCallback(callBack), mCookie(cookie) {} @@ -262,14 +265,14 @@ public: AudioSystem::sync_event_t type() const { return mType; } int triggerSession() const { return mTriggerSession; } int listenerSession() const { return mListenerSession; } - void *cookie() const { return mCookie; } + wp<RefBase> cookie() const { return mCookie; } private: const AudioSystem::sync_event_t mType; const int mTriggerSession; const int mListenerSession; sync_event_callback_t mCallback; - void * const mCookie; + const wp<RefBase> mCookie; mutable Mutex mLock; }; @@ -277,7 +280,7 @@ public: int triggerSession, int listenerSession, sync_event_callback_t callBack, - void *cookie); + wp<RefBase> cookie); private: class AudioHwDevice; // fwd declaration for findSuitableHwDev_l @@ -451,7 +454,14 @@ private: { return mStreamTypes[stream].volume; } void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2); - // allocate an audio_io_handle_t, session ID, or effect ID + // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t. + // They all share the same ID space, but the namespaces are actually independent + // because there are separate KeyedVectors for each kind of ID. + // The return value is uint32_t, but is cast to signed for some IDs. + // FIXME This API does not handle rollover to zero (for unsigned IDs), + // or from positive to negative (for signed IDs). + // Thus it may fail by returning an ID of the wrong sign, + // or by returning a non-unique ID. uint32_t nextUniqueId(); status_t moveEffectChain_l(int sessionId, @@ -499,7 +509,7 @@ private: private: const char * const mModuleName; audio_hw_device_t * const mHwDevice; - Flags mFlags; + const Flags mFlags; }; // AudioStreamOut and AudioStreamIn are immutable, so their fields are const. @@ -509,7 +519,7 @@ private: struct AudioStreamOut { AudioHwDevice* const audioHwDev; audio_stream_out_t* const stream; - audio_output_flags_t flags; + const audio_output_flags_t flags; audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); } @@ -587,7 +597,11 @@ private: DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> > mRecordThreads; DefaultKeyedVector< pid_t, sp<NotificationClient> > mNotificationClients; + volatile int32_t mNextUniqueId; // updated by android_atomic_inc + // nextUniqueId() returns uint32_t, but this is declared int32_t + // because the atomic operations require an int32_t + audio_mode_t mMode; bool mBtNrecIsOff; @@ -634,7 +648,7 @@ public: // 0x200000 stereo 16-bit PCM frames = 47.5 seconds at 44.1 kHz, 8 megabytes static const size_t kTeeSinkInputFramesDefault = 0x200000; static const size_t kTeeSinkOutputFramesDefault = 0x200000; - static const size_t kTeeSinkTrackFramesDefault = 0x1000; + static const size_t kTeeSinkTrackFramesDefault = 0x200000; #endif // This method reads from a variable without mLock, but the variable is updated under mLock. So @@ -651,6 +665,8 @@ private: #undef INCLUDING_FROM_AUDIOFLINGER_H +const char *formatToString(audio_format_t format); + // ---------------------------------------------------------------------------- }; // namespace android diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp index f92421e..2d67efb 100644 --- a/services/audioflinger/AudioMixer.cpp +++ b/services/audioflinger/AudioMixer.cpp @@ -58,7 +58,7 @@ AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider() status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) { //ALOGV("DownmixerBufferProvider::getNextBuffer()"); - if (this->mTrackBufferProvider != NULL) { + if (mTrackBufferProvider != NULL) { status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts); if (res == OK) { mDownmixConfig.inputCfg.buffer.frameCount = pBuffer->frameCount; @@ -81,7 +81,7 @@ status_t AudioMixer::DownmixerBufferProvider::getNextBuffer(AudioBufferProvider: void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer) { //ALOGV("DownmixerBufferProvider::releaseBuffer()"); - if (this->mTrackBufferProvider != NULL) { + if (mTrackBufferProvider != NULL) { mTrackBufferProvider->releaseBuffer(pBuffer); } else { ALOGE("DownmixerBufferProvider::releaseBuffer() error: NULL track buffer provider"); @@ -90,9 +90,9 @@ void AudioMixer::DownmixerBufferProvider::releaseBuffer(AudioBufferProvider::Buf // ---------------------------------------------------------------------------- -bool AudioMixer::isMultichannelCapable = false; +bool AudioMixer::sIsMultichannelCapable = false; -effect_descriptor_t AudioMixer::dwnmFxDesc; +effect_descriptor_t AudioMixer::sDwnmFxDesc; // Ensure mConfiguredNames bitmask is initialized properly on all architectures. // The value of 1 << x is undefined in C when x >= 32. @@ -113,8 +113,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr // AudioMixer is not yet capable of multi-channel output beyond stereo ALOG_ASSERT(2 == MAX_NUM_CHANNELS, "bad MAX_NUM_CHANNELS %d", MAX_NUM_CHANNELS); - LocalClock lc; - pthread_once(&sOnceControl, &sInitRoutine); mState.enabledTracks= 0; @@ -136,27 +134,6 @@ AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTr t++; } - // find multichannel downmix effect if we have to play multichannel content - uint32_t numEffects = 0; - int ret = EffectQueryNumberEffects(&numEffects); - if (ret != 0) { - ALOGE("AudioMixer() error %d querying number of effects", ret); - return; - } - ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); - - for (uint32_t i = 0 ; i < numEffects ; i++) { - if (EffectQueryEffect(i, &dwnmFxDesc) == 0) { - ALOGV("effect %d is called %s", i, dwnmFxDesc.name); - if (memcmp(&dwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { - ALOGI("found effect \"%s\" from %s", - dwnmFxDesc.name, dwnmFxDesc.implementor); - isMultichannelCapable = true; - break; - } - } - } - ALOGE_IF(!isMultichannelCapable, "unable to find downmix effect"); } AudioMixer::~AudioMixer() @@ -216,6 +193,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) t->mainBuffer = NULL; t->auxBuffer = NULL; t->downmixerBufferProvider = NULL; + t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT; status_t status = initTrackDownmix(&mState.tracks[n], n, channelMask); if (status == OK) { @@ -229,7 +207,7 @@ int AudioMixer::getTrackName(audio_channel_mask_t channelMask, int sessionId) void AudioMixer::invalidateState(uint32_t mask) { - if (mask) { + if (mask != 0) { mState.needsChanged |= mask; mState.hook = process__validate; } @@ -252,7 +230,7 @@ status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackNum, audio_chann return status; } -void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName) { +void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) { ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName); if (pTrack->downmixerBufferProvider != NULL) { @@ -276,13 +254,13 @@ status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName) DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(); int32_t status; - if (!isMultichannelCapable) { + if (!sIsMultichannelCapable) { ALOGE("prepareTrackForDownmix(%d) fails: mixer doesn't support multichannel content", trackName); goto noDownmixForActiveTrack; } - if (EffectCreate(&dwnmFxDesc.uuid, + if (EffectCreate(&sDwnmFxDesc.uuid, pTrack->sessionId /*sessionId*/, -2 /*ioId not relevant here, using random value*/, &pDbp->mDownmixHandle/*pHandle*/) != 0) { ALOGE("prepareTrackForDownmix(%d) fails: error creating downmixer effect", trackName); @@ -463,8 +441,15 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) // for a specific track? or per mixer? /* case DOWNMIX_TYPE: break */ + case MIXER_FORMAT: { + audio_format_t format = static_cast<audio_format_t>(valueInt); + if (track.mMixerFormat != format) { + track.mMixerFormat = format; + ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format); + } + } break; default: - LOG_FATAL("bad param"); + LOG_ALWAYS_FATAL("setParameter track: bad param %d", param); } break; @@ -489,7 +474,7 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) invalidateState(1 << name); break; default: - LOG_FATAL("bad param"); + LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param); } break; @@ -537,12 +522,12 @@ void AudioMixer::setParameter(int name, int target, int param, void *value) } break; default: - LOG_FATAL("bad param"); + LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param); } break; default: - LOG_FATAL("bad target"); + LOG_ALWAYS_FATAL("setParameter: bad target %d", target); } } @@ -560,14 +545,14 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate) // Should have a way to distinguish tracks with static ratios vs. dynamic ratios. if (!((value == 44100 && devSampleRate == 48000) || (value == 48000 && devSampleRate == 44100))) { - quality = AudioResampler::LOW_QUALITY; + quality = AudioResampler::DYN_LOW_QUALITY; } else { quality = AudioResampler::DEFAULT_QUALITY; } resampler = AudioResampler::create( format, // the resampler sees the number of channels after the downmixer, if any - downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount, + (int) (downmixerBufferProvider != NULL ? MAX_NUM_CHANNELS : channelCount), devSampleRate, quality); resampler->setLocalTimeFreq(sLocalTimeFreq); } @@ -668,27 +653,29 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) countActiveTracks++; track_t& t = state->tracks[i]; uint32_t n = 0; + // FIXME can overflow (mask is only 3 bits) n |= NEEDS_CHANNEL_1 + t.channelCount - 1; - n |= NEEDS_FORMAT_16; - n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED; + if (t.doesResample()) { + n |= NEEDS_RESAMPLE; + } if (t.auxLevel != 0 && t.auxBuffer != NULL) { - n |= NEEDS_AUX_ENABLED; + n |= NEEDS_AUX; } if (t.volumeInc[0]|t.volumeInc[1]) { volumeRamp = true; } else if (!t.doesResample() && t.volumeRL == 0) { - n |= NEEDS_MUTE_ENABLED; + n |= NEEDS_MUTE; } t.needs = n; - if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) { + if (n & NEEDS_MUTE) { t.hook = track__nop; } else { - if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) { + if (n & NEEDS_AUX) { all16BitsStereoNoResample = false; } - if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (n & NEEDS_RESAMPLE) { all16BitsStereoNoResample = false; resampling = true; t.hook = track__genericResample; @@ -710,7 +697,7 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // select the processing hooks state->hook = process__nop; - if (countActiveTracks) { + if (countActiveTracks > 0) { if (resampling) { if (!state->outputTemp) { state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount]; @@ -746,16 +733,15 @@ void AudioMixer::process__validate(state_t* state, int64_t pts) // Now that the volume ramp has been done, set optimal state and // track hooks for subsequent mixer process - if (countActiveTracks) { + if (countActiveTracks > 0) { bool allMuted = true; uint32_t en = state->enabledTracks; while (en) { const int i = 31 - __builtin_clz(en); en &= ~(1<<i); track_t& t = state->tracks[i]; - if (!t.doesResample() && t.volumeRL == 0) - { - t.needs |= NEEDS_MUTE_ENABLED; + if (!t.doesResample() && t.volumeRL == 0) { + t.needs |= NEEDS_MUTE; t.hook = track__nop; } else { allMuted = false; @@ -806,8 +792,8 @@ void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFram } } -void AudioMixer::track__nop(track_t* t, int32_t* out, size_t outFrameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused, + size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused) { } @@ -883,8 +869,8 @@ void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32 } } -void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) { const int16_t *in = static_cast<const int16_t *>(t->in); @@ -974,8 +960,8 @@ void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount t->in = in; } -void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, - int32_t* aux) +void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, + int32_t* temp __unused, int32_t* aux) { const int16_t *in = static_cast<int16_t const *>(t->in); @@ -1065,7 +1051,7 @@ void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, void AudioMixer::process__nop(state_t* state, int64_t pts) { uint32_t e0 = state->enabledTracks; - size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS; + size_t sampleCount = state->frameCount * MAX_NUM_CHANNELS; while (e0) { // process by group of tracks with same output buffer to // avoid multiple memset() on same buffer @@ -1084,7 +1070,8 @@ void AudioMixer::process__nop(state_t* state, int64_t pts) } e0 &= ~(e1); - memset(t1.mainBuffer, 0, bufSize); + memset(t1.mainBuffer, 0, sampleCount + * audio_bytes_per_sample(t1.mMixerFormat)); } while (e1) { @@ -1154,7 +1141,7 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) track_t& t = state->tracks[i]; size_t outFrames = BLOCKSIZE; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer + numFrames; } while (outFrames) { @@ -1166,7 +1153,7 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) break; } size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount; - if (inFrames) { + if (inFrames > 0) { t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux); t.frameCount -= inFrames; @@ -1192,8 +1179,18 @@ void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts) } } } - ditherAndClamp(out, outTemp, BLOCKSIZE); - out += BLOCKSIZE; + switch (t1.mMixerFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy_to_float_from_q4_27(reinterpret_cast<float *>(out), outTemp, BLOCKSIZE * 2); + out += BLOCKSIZE * 2; // output is 2 floats/frame. + break; + case AUDIO_FORMAT_PCM_16_BIT: + ditherAndClamp(out, outTemp, BLOCKSIZE); + out += BLOCKSIZE; // output is 1 int32_t (2 int16_t samples)/frame + break; + default: + LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat); + } numFrames += BLOCKSIZE; } while (numFrames < state->frameCount); } @@ -1242,14 +1239,14 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) e1 &= ~(1<<i); track_t& t = state->tracks[i]; int32_t *aux = NULL; - if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) { + if (CC_UNLIKELY(t.needs & NEEDS_AUX)) { aux = t.auxBuffer; } // this is a little goofy, on the resampling case we don't // acquire/release the buffers because it's done by // the resampler. - if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) { + if (t.needs & NEEDS_RESAMPLE) { t.resampler->setPTS(pts); t.hook(&t, outTemp, numFrames, state->resampleTemp, aux); } else { @@ -1275,7 +1272,16 @@ void AudioMixer::process__genericResampling(state_t* state, int64_t pts) } } } - ditherAndClamp(out, outTemp, numFrames); + switch (t1.mMixerFormat) { + case AUDIO_FORMAT_PCM_FLOAT: + memcpy_to_float_from_q4_27(reinterpret_cast<float*>(out), outTemp, numFrames*2); + break; + case AUDIO_FORMAT_PCM_16_BIT: + ditherAndClamp(out, outTemp, numFrames); + break; + default: + LOG_ALWAYS_FATAL("bad mixer format: %d", t1.mMixerFormat); + } } } @@ -1316,27 +1322,46 @@ void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, } size_t outFrames = b.frameCount; - if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) { - // volume is boosted, so we might need to clamp even though - // we process only one track. - do { - uint32_t rl = *reinterpret_cast<const uint32_t *>(in); - in += 2; - int32_t l = mulRL(1, rl, vrl) >> 12; - int32_t r = mulRL(0, rl, vrl) >> 12; - // clamping... - l = clamp16(l); - r = clamp16(r); - *out++ = (r<<16) | (l & 0xFFFF); - } while (--outFrames); - } else { + switch (t.mMixerFormat) { + case AUDIO_FORMAT_PCM_FLOAT: { + float *fout = reinterpret_cast<float*>(out); do { uint32_t rl = *reinterpret_cast<const uint32_t *>(in); in += 2; - int32_t l = mulRL(1, rl, vrl) >> 12; - int32_t r = mulRL(0, rl, vrl) >> 12; - *out++ = (r<<16) | (l & 0xFFFF); + int32_t l = mulRL(1, rl, vrl); + int32_t r = mulRL(0, rl, vrl); + *fout++ = float_from_q4_27(l); + *fout++ = float_from_q4_27(r); + // Note: In case of later int16_t sink output, + // conversion and clamping is done by memcpy_to_i16_from_float(). } while (--outFrames); + } break; + case AUDIO_FORMAT_PCM_16_BIT: + if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) { + // volume is boosted, so we might need to clamp even though + // we process only one track. + do { + uint32_t rl = *reinterpret_cast<const uint32_t *>(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + // clamping... + l = clamp16(l); + r = clamp16(r); + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } else { + do { + uint32_t rl = *reinterpret_cast<const uint32_t *>(in); + in += 2; + int32_t l = mulRL(1, rl, vrl) >> 12; + int32_t r = mulRL(0, rl, vrl) >> 12; + *out++ = (r<<16) | (l & 0xFFFF); + } while (--outFrames); + } + break; + default: + LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat); } numFrames -= b.frameCount; t.bufferProvider->releaseBuffer(&b); @@ -1449,8 +1474,9 @@ void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state, int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, int outputFrameIndex) { - if (AudioBufferProvider::kInvalidPTS == basePTS) + if (AudioBufferProvider::kInvalidPTS == basePTS) { return AudioBufferProvider::kInvalidPTS; + } return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate); } @@ -1462,6 +1488,28 @@ int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS, { LocalClock lc; sLocalTimeFreq = lc.getLocalFreq(); + + // find multichannel downmix effect if we have to play multichannel content + uint32_t numEffects = 0; + int ret = EffectQueryNumberEffects(&numEffects); + if (ret != 0) { + ALOGE("AudioMixer() error %d querying number of effects", ret); + return; + } + ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects); + + for (uint32_t i = 0 ; i < numEffects ; i++) { + if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) { + ALOGV("effect %d is called %s", i, sDwnmFxDesc.name); + if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) { + ALOGI("found effect \"%s\" from %s", + sDwnmFxDesc.name, sDwnmFxDesc.implementor); + sIsMultichannelCapable = true; + break; + } + } + } + ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect"); } // ---------------------------------------------------------------------------- diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h index 43aeb86..e5e120c 100644 --- a/services/audioflinger/AudioMixer.h +++ b/services/audioflinger/AudioMixer.h @@ -77,6 +77,7 @@ public: MAIN_BUFFER = 0x4002, AUX_BUFFER = 0x4003, DOWNMIX_TYPE = 0X4004, + MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT) // for target RESAMPLE SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name; // parameter 'value' is the new sample rate in Hz. @@ -120,27 +121,19 @@ public: private: enum { + // FIXME this representation permits up to 8 channels NEEDS_CHANNEL_COUNT__MASK = 0x00000007, - NEEDS_FORMAT__MASK = 0x000000F0, - NEEDS_MUTE__MASK = 0x00000100, - NEEDS_RESAMPLE__MASK = 0x00001000, - NEEDS_AUX__MASK = 0x00010000, }; enum { - NEEDS_CHANNEL_1 = 0x00000000, - NEEDS_CHANNEL_2 = 0x00000001, + NEEDS_CHANNEL_1 = 0x00000000, // mono + NEEDS_CHANNEL_2 = 0x00000001, // stereo - NEEDS_FORMAT_16 = 0x00000010, + // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT - NEEDS_MUTE_DISABLED = 0x00000000, - NEEDS_MUTE_ENABLED = 0x00000100, - - NEEDS_RESAMPLE_DISABLED = 0x00000000, - NEEDS_RESAMPLE_ENABLED = 0x00001000, - - NEEDS_AUX_DISABLED = 0x00000000, - NEEDS_AUX_ENABLED = 0x00010000, + NEEDS_MUTE = 0x00000100, + NEEDS_RESAMPLE = 0x00001000, + NEEDS_AUX = 0x00010000, }; struct state_t; @@ -201,7 +194,9 @@ private: int32_t sessionId; - int32_t padding[2]; + audio_format_t mMixerFormat; // at this time: AUDIO_FORMAT_PCM_(FLOAT|16_BIT) + + int32_t padding[1]; // 16-byte boundary @@ -224,7 +219,7 @@ private: NBLog::Writer* mLog; int32_t reserved[1]; // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS - track_t tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32))); + track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32))); }; // AudioBufferProvider that wraps a track AudioBufferProvider by a call to a downmix effect @@ -256,9 +251,9 @@ private: state_t mState __attribute__((aligned(32))); // effect descriptor for the downmixer used by the mixer - static effect_descriptor_t dwnmFxDesc; + static effect_descriptor_t sDwnmFxDesc; // indicates whether a downmix effect has been found and is usable by this mixer - static bool isMultichannelCapable; + static bool sIsMultichannelCapable; // Call after changing either the enabled status of a track, or parameters of an enabled track. // OK to call more often than that, but unnecessary. diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp index e5cceb1..562c4ea 100644 --- a/services/audioflinger/AudioResampler.cpp +++ b/services/audioflinger/AudioResampler.cpp @@ -25,6 +25,7 @@ #include "AudioResampler.h" #include "AudioResamplerSinc.h" #include "AudioResamplerCubic.h" +#include "AudioResamplerDyn.h" #ifdef __arm__ #include <machine/cpu-features.h> @@ -77,6 +78,9 @@ private: int mX0R; }; +/*static*/ +const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits; + bool AudioResampler::qualityIsSupported(src_quality quality) { switch (quality) { @@ -85,6 +89,9 @@ bool AudioResampler::qualityIsSupported(src_quality quality) case MED_QUALITY: case HIGH_QUALITY: case VERY_HIGH_QUALITY: + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: return true; default: return false; @@ -105,7 +112,7 @@ void AudioResampler::init_routine() if (*endptr == '\0') { defaultQuality = (src_quality) l; ALOGD("forcing AudioResampler quality to %d", defaultQuality); - if (defaultQuality < DEFAULT_QUALITY || defaultQuality > VERY_HIGH_QUALITY) { + if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) { defaultQuality = DEFAULT_QUALITY; } } @@ -125,6 +132,12 @@ uint32_t AudioResampler::qualityMHz(src_quality quality) return 20; case VERY_HIGH_QUALITY: return 34; + case DYN_LOW_QUALITY: + return 4; + case DYN_MED_QUALITY: + return 6; + case DYN_HIGH_QUALITY: + return 12; } } @@ -148,6 +161,16 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, atFinalQuality = true; } + /* if the caller requests DEFAULT_QUALITY and af.resampler.property + * has not been set, the target resampler quality is set to DYN_MED_QUALITY, + * and allowed to "throttle" down to DYN_LOW_QUALITY if necessary + * due to estimated CPU load of having too many active resamplers + * (the code below the if). + */ + if (quality == DEFAULT_QUALITY) { + quality = DYN_MED_QUALITY; + } + // naive implementation of CPU load throttling doesn't account for whether resampler is active pthread_mutex_lock(&mutex); for (;;) { @@ -162,7 +185,6 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, // not enough CPU available for proposed quality level, so try next lowest level switch (quality) { default: - case DEFAULT_QUALITY: case LOW_QUALITY: atFinalQuality = true; break; @@ -175,6 +197,15 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, case VERY_HIGH_QUALITY: quality = HIGH_QUALITY; break; + case DYN_LOW_QUALITY: + atFinalQuality = true; + break; + case DYN_MED_QUALITY: + quality = DYN_LOW_QUALITY; + break; + case DYN_HIGH_QUALITY: + quality = DYN_MED_QUALITY; + break; } } pthread_mutex_unlock(&mutex); @@ -183,7 +214,6 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, switch (quality) { default: - case DEFAULT_QUALITY: case LOW_QUALITY: ALOGV("Create linear Resampler"); resampler = new AudioResamplerOrder1(bitDepth, inChannelCount, sampleRate); @@ -200,6 +230,21 @@ AudioResampler* AudioResampler::create(int bitDepth, int inChannelCount, ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality); resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate, quality); break; + case DYN_LOW_QUALITY: + case DYN_MED_QUALITY: + case DYN_HIGH_QUALITY: + ALOGV("Create dynamic Resampler = %d", quality); + if (bitDepth == 32) { /* bitDepth == 32 signals float precision */ + resampler = new AudioResamplerDyn<float, float, float>(bitDepth, inChannelCount, + sampleRate, quality); + } else if (quality == DYN_HIGH_QUALITY) { + resampler = new AudioResamplerDyn<int32_t, int16_t, int32_t>(bitDepth, inChannelCount, + sampleRate, quality); + } else { + resampler = new AudioResamplerDyn<int16_t, int16_t, int32_t>(bitDepth, inChannelCount, + sampleRate, quality); + } + break; } // initialize resampler @@ -305,7 +350,7 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d", // outFrameCount, inputIndex, phaseFraction, phaseIncrement); @@ -339,8 +384,9 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction); out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction); Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples @@ -402,7 +448,7 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d", // outFrameCount, inputIndex, phaseFraction, phaseIncrement); @@ -434,8 +480,9 @@ void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount, out[outputIndex++] += vl * sample; out[outputIndex++] += vr * sample; Advance(&inputIndex, &phaseFraction, phaseIncrement); - if (outputIndex == outputSampleCount) + if (outputIndex == outputSampleCount) { break; + } } // process input samples @@ -514,6 +561,16 @@ void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr, uint32_t &phaseFraction, uint32_t phaseIncrement) { + (void)maxOutPt; // remove unused parameter warnings + (void)maxInIdx; + (void)outputIndex; + (void)out; + (void)inputIndex; + (void)vl; + (void)vr; + (void)phaseFraction; + (void)phaseIncrement; + (void)in; #define MO_PARAM5 "36" // offset of parameter 5 (outputIndex) asm( @@ -625,6 +682,16 @@ void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32 size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr, uint32_t &phaseFraction, uint32_t phaseIncrement) { + (void)maxOutPt; // remove unused parameter warnings + (void)maxInIdx; + (void)outputIndex; + (void)out; + (void)inputIndex; + (void)vl; + (void)vr; + (void)phaseFraction; + (void)phaseIncrement; + (void)in; #define ST_PARAM5 "40" // offset of parameter 5 (outputIndex) asm( "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n" diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h index 33e64ce..b84567e 100644 --- a/services/audioflinger/AudioResampler.h +++ b/services/audioflinger/AudioResampler.h @@ -41,6 +41,9 @@ public: MED_QUALITY=2, HIGH_QUALITY=3, VERY_HIGH_QUALITY=4, + DYN_LOW_QUALITY=5, + DYN_MED_QUALITY=6, + DYN_HIGH_QUALITY=7, }; static AudioResampler* create(int bitDepth, int inChannelCount, @@ -60,7 +63,7 @@ public: // A mono provider delivers a sequence of samples. // A stereo provider delivers a sequence of interleaved pairs of samples. // Multi-channel providers are not supported. - // In either case, 'out' holds interleaved pairs of fixed-point signed Q19.12. + // In either case, 'out' holds interleaved pairs of fixed-point Q4.27. // That is, for a mono provider, there is an implicit up-channeling. // Since this method accumulates, the caller is responsible for clearing 'out' initially. // FIXME assumes provider is always successful; it should return the actual frame count. @@ -81,7 +84,7 @@ protected: static const uint32_t kPhaseMask = (1LU<<kNumPhaseBits)-1; // multiplier to calculate fixed point phase increment - static const double kPhaseMultiplier = 1L << kNumPhaseBits; + static const double kPhaseMultiplier; AudioResampler(int bitDepth, int inChannelCount, int32_t sampleRate, src_quality quality); @@ -107,6 +110,38 @@ protected: uint64_t mLocalTimeFreq; int64_t mPTS; + // returns the inFrameCount required to generate outFrameCount frames. + // + // Placed here to be a consistent for all resamplers. + // + // Right now, we use the upper bound without regards to the current state of the + // input buffer using integer arithmetic, as follows: + // + // (static_cast<uint64_t>(outFrameCount)*mInSampleRate + (mSampleRate - 1))/mSampleRate; + // + // The double precision equivalent (float may not be precise enough): + // ceil(static_cast<double>(outFrameCount) * mInSampleRate / mSampleRate); + // + // this relies on the fact that the mPhaseIncrement is rounded down from + // #phases * mInSampleRate/mSampleRate and the fact that Sum(Floor(x)) <= Floor(Sum(x)). + // http://www.proofwiki.org/wiki/Sum_of_Floors_Not_Greater_Than_Floor_of_Sums + // + // (so long as double precision is computed accurately enough to be considered + // greater than or equal to the Floor(x) value in int32_t arithmetic; thus this + // will not necessarily hold for floats). + // + // TODO: + // Greater accuracy and a tight bound is obtained by: + // 1) subtract and adjust for the current state of the AudioBufferProvider buffer. + // 2) using the exact integer formula where (ignoring 64b casting) + // inFrameCount = (mPhaseIncrement * (outFrameCount - 1) + mPhaseFraction) / phaseWrapLimit; + // phaseWrapLimit is the wraparound (1 << kNumPhaseBits), if not specified explicitly. + // + inline size_t getInFrameCountRequired(size_t outFrameCount) { + return (static_cast<uint64_t>(outFrameCount)*mInSampleRate + + (mSampleRate - 1))/mSampleRate; + } + private: const src_quality mQuality; diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp index 18e59e9..8f14ff9 100644 --- a/services/audioflinger/AudioResamplerCubic.cpp +++ b/services/audioflinger/AudioResamplerCubic.cpp @@ -60,14 +60,15 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // fetch first buffer if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -97,8 +98,9 @@ void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } in = mBuffer.i16; // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } @@ -126,14 +128,15 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); // fetch first buffer if (mBuffer.frameCount == 0) { mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, mPTS); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { return; + } // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount); } int16_t *in = mBuffer.i16; @@ -163,8 +166,9 @@ void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount, mBuffer.frameCount = inFrameCount; provider->getNextBuffer(&mBuffer, calculateOutputPTS(outputIndex / 2)); - if (mBuffer.raw == NULL) + if (mBuffer.raw == NULL) { goto save_state; // ugly, but efficient + } // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount); in = mBuffer.i16; } diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp new file mode 100644 index 0000000..3abe8fd --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioResamplerDyn" +//#define LOG_NDEBUG 0 + +#include <malloc.h> +#include <string.h> +#include <stdlib.h> +#include <dlfcn.h> +#include <math.h> + +#include <cutils/compiler.h> +#include <cutils/properties.h> +#include <utils/Debug.h> +#include <utils/Log.h> + +#include "AudioResamplerFirOps.h" // USE_NEON and USE_INLINE_ASSEMBLY defined here +#include "AudioResamplerFirProcess.h" +#include "AudioResamplerFirProcessNeon.h" +#include "AudioResamplerFirGen.h" // requires math.h +#include "AudioResamplerDyn.h" + +//#define DEBUG_RESAMPLER + +namespace android { + +// generate a unique resample type compile-time constant (constexpr) +#define RESAMPLETYPE(CHANNELS, LOCKED, STRIDE) \ + ((((CHANNELS)-1)&1) | !!(LOCKED)<<1 \ + | ((STRIDE)==8 ? 1 : (STRIDE)==16 ? 2 : 0)<<2) + +/* + * InBuffer is a type agnostic input buffer. + * + * Layout of the state buffer for halfNumCoefs=8. + * + * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr] + * S I R + * + * S = mState + * I = mImpulse + * R = mRingFull + * p = past samples, convoluted with the (p)ositive side of sinc() + * n = future samples, convoluted with the (n)egative side of sinc() + * r = extra space for implementing the ring buffer + */ + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::InBuffer::InBuffer() + : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateCount(0) +{ +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::InBuffer::~InBuffer() +{ + init(); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::init() +{ + free(mState); + mState = NULL; + mImpulse = NULL; + mRingFull = NULL; + mStateCount = 0; +} + +// resizes the state buffer to accommodate the appropriate filter length +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs) +{ + // calculate desired state size + int stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength; + + // check if buffer needs resizing + if (mState + && stateCount == mStateCount + && mRingFull-mState == mStateCount-halfNumCoefs*CHANNELS) { + return; + } + + // create new buffer + TI* state; + (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state)); + memset(state, 0, stateCount*sizeof(*state)); + + // attempt to preserve state + if (mState) { + TI* srcLo = mImpulse - halfNumCoefs*CHANNELS; + TI* srcHi = mImpulse + halfNumCoefs*CHANNELS; + TI* dst = state; + + if (srcLo < mState) { + dst += mState-srcLo; + srcLo = mState; + } + if (srcHi > mState + mStateCount) { + srcHi = mState + mStateCount; + } + memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo)); + free(mState); + } + + // set class member vars + mState = state; + mStateCount = stateCount; + mImpulse = state + halfNumCoefs*CHANNELS; // actually one sample greater than needed + mRingFull = state + mStateCount - halfNumCoefs*CHANNELS; +} + +// copy in the input data into the head (impulse+halfNumCoefs) of the buffer. +template<typename TC, typename TI, typename TO> +template<int CHANNELS> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) +{ + TI* head = impulse + halfNumCoefs*CHANNELS; + for (size_t i=0 ; i<CHANNELS ; i++) { + head[i] = in[inputIndex*CHANNELS + i]; + } +} + +// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs) +template<typename TC, typename TI, typename TO> +template<int CHANNELS> +void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex) +{ + impulse += CHANNELS; + + if (CC_UNLIKELY(impulse >= mRingFull)) { + const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS; + memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI)); + impulse -= shiftDown; + } + readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::Constants::set( + int L, int halfNumCoefs, int inSampleRate, int outSampleRate) +{ + int bits = 0; + int lscale = inSampleRate/outSampleRate < 2 ? L - 1 : + static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate); + for (int i=lscale; i; ++bits, i>>=1) + ; + mL = L; + mShift = kNumPhaseBits - bits; + mHalfNumCoefs = halfNumCoefs; +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::AudioResamplerDyn(int bitDepth, + int inChannelCount, int32_t sampleRate, src_quality quality) + : AudioResampler(bitDepth, inChannelCount, sampleRate, quality), + mResampleFunc(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY), + mCoefBuffer(NULL) +{ + mVolumeSimd[0] = mVolumeSimd[1] = 0; + // The AudioResampler base class assumes we are always ready for 1:1 resampling. + // We reset mInSampleRate to 0, so setSampleRate() will calculate filters for + // setSampleRate() for 1:1. (May be removed if precalculated filters are used.) + mInSampleRate = 0; + mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better +} + +template<typename TC, typename TI, typename TO> +AudioResamplerDyn<TC, TI, TO>::~AudioResamplerDyn() +{ + free(mCoefBuffer); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::init() +{ + mFilterSampleRate = 0; // always trigger new filter generation + mInBuffer.init(); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::setVolume(int16_t left, int16_t right) +{ + AudioResampler::setVolume(left, right); + // volume is applied on the output type. + if (is_same<TO, float>::value || is_same<TO, double>::value) { + const TO scale = 1. / (1UL << 12); + mVolumeSimd[0] = static_cast<TO>(left) * scale; + mVolumeSimd[1] = static_cast<TO>(right) * scale; + } else { + mVolumeSimd[0] = static_cast<int32_t>(left) << 16; + mVolumeSimd[1] = static_cast<int32_t>(right) << 16; + } +} + +template<typename T> T max(T a, T b) {return a > b ? a : b;} + +template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c, + double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat) +{ + TC* buf; + static const double atten = 0.9998; // to avoid ripple overflow + double fcr; + double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten); + + (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC)); + if (inSampleRate < outSampleRate) { // upsample + fcr = max(0.5*tbwCheat - tbw/2, tbw/2); + } else { // downsample + fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2); + } + // create and set filter + firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten); + c.mFirCoefs = buf; + if (mCoefBuffer) { + free(mCoefBuffer); + } + mCoefBuffer = buf; +#ifdef DEBUG_RESAMPLER + // print basic filter stats + printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n", + c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw); + // test the filter and report results + double fp = (fcr - tbw/2)/c.mL; + double fs = (fcr + tbw/2)/c.mL; + double passMin, passMax, passRipple; + double stopMax, stopRipple; + testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000, + passMin, passMax, passRipple, stopMax, stopRipple); + printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple); + printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple); +#endif +} + +// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop. +static int gcd(int n, int m) +{ + if (m == 0) { + return n; + } + return gcd(m, n % m); +} + +static bool isClose(int32_t newSampleRate, int32_t prevSampleRate, + int32_t filterSampleRate, int32_t outSampleRate) +{ + + // different upsampling ratios do not need a filter change. + if (filterSampleRate != 0 + && filterSampleRate < outSampleRate + && newSampleRate < outSampleRate) + return true; + + // check design criteria again if downsampling is detected. + int pdiff = absdiff(newSampleRate, prevSampleRate); + int adiff = absdiff(newSampleRate, filterSampleRate); + + // allow up to 6% relative change increments. + // allow up to 12% absolute change increments (from filter design) + return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3; +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate) +{ + if (mInSampleRate == inSampleRate) { + return; + } + int32_t oldSampleRate = mInSampleRate; + int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs; + uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift; + bool useS32 = false; + + mInSampleRate = inSampleRate; + + // TODO: Add precalculated Equiripple filters + + if (mFilterQuality != getQuality() || + !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) { + mFilterSampleRate = inSampleRate; + mFilterQuality = getQuality(); + + // Begin Kaiser Filter computation + // + // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB. + // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters + // + // For s32 we keep the stop band attenuation at the same as 16b resolution, about + // 96-98dB + // + + double stopBandAtten; + double tbwCheat = 1.; // how much we "cheat" into aliasing + int halfLength; + if (mFilterQuality == DYN_HIGH_QUALITY) { + // 32b coefficients, 64 length + useS32 = true; + stopBandAtten = 98.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 48; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 40; + } else { + halfLength = 32; + } + } else if (mFilterQuality == DYN_LOW_QUALITY) { + // 16b coefficients, 16-32 length + useS32 = false; + stopBandAtten = 80.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 24; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 16; + } else { + halfLength = 8; + } + if (inSampleRate <= mSampleRate) { + tbwCheat = 1.05; + } else { + tbwCheat = 1.03; + } + } else { // DYN_MED_QUALITY + // 16b coefficients, 32-64 length + // note: > 64 length filters with 16b coefs can have quantization noise problems + useS32 = false; + stopBandAtten = 84.; + if (inSampleRate >= mSampleRate * 4) { + halfLength = 32; + } else if (inSampleRate >= mSampleRate * 2) { + halfLength = 24; + } else { + halfLength = 16; + } + if (inSampleRate <= mSampleRate) { + tbwCheat = 1.03; + } else { + tbwCheat = 1.01; + } + } + + // determine the number of polyphases in the filterbank. + // for 16b, it is desirable to have 2^(16/2) = 256 phases. + // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html + // + // We are a bit more lax on this. + + int phases = mSampleRate / gcd(mSampleRate, inSampleRate); + + // TODO: Once dynamic sample rate change is an option, the code below + // should be modified to execute only when dynamic sample rate change is enabled. + // + // as above, #phases less than 63 is too few phases for accurate linear interpolation. + // we increase the phases to compensate, but more phases means more memory per + // filter and more time to compute the filter. + // + // if we know that the filter will be used for dynamic sample rate changes, + // that would allow us skip this part for fixed sample rate resamplers. + // + while (phases<63) { + phases *= 2; // this code only needed to support dynamic rate changes + } + + if (phases>=256) { // too many phases, always interpolate + phases = 127; + } + + // create the filter + mConstants.set(phases, halfLength, inSampleRate, mSampleRate); + createKaiserFir(mConstants, stopBandAtten, + inSampleRate, mSampleRate, tbwCheat); + } // End Kaiser filter + + // update phase and state based on the new filter. + const Constants& c(mConstants); + mInBuffer.resize(mChannelCount, c.mHalfNumCoefs); + const uint32_t phaseWrapLimit = c.mL << c.mShift; + // try to preserve as much of the phase fraction as possible for on-the-fly changes + mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction) + * phaseWrapLimit / oldPhaseWrapLimit; + mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case. + mPhaseIncrement = static_cast<uint32_t>(static_cast<double>(phaseWrapLimit) + * inSampleRate / mSampleRate); + + // determine which resampler to use + // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits") + int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0; + int stride = (c.mHalfNumCoefs&7)==0 ? 16 : (c.mHalfNumCoefs&3)==0 ? 8 : 2; + if (locked) { + mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase + } + + setResampler(RESAMPLETYPE(mChannelCount, locked, stride)); +#ifdef DEBUG_RESAMPLER + printf("channels:%d %s stride:%d %s coef:%d shift:%d\n", + mChannelCount, locked ? "locked" : "interpolated", + stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift); +#endif +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider) +{ + (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider); +} + +template<typename TC, typename TI, typename TO> +void AudioResamplerDyn<TC, TI, TO>::setResampler(unsigned resampleType) +{ + // stride 16 (falls back to stride 2 for machines that do not support NEON) + switch (resampleType) { + case RESAMPLETYPE(1, true, 16): + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>; + return; + case RESAMPLETYPE(2, true, 16): + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>; + return; + case RESAMPLETYPE(1, false, 16): + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>; + return; + case RESAMPLETYPE(2, false, 16): + mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>; + return; + default: + LOG_ALWAYS_FATAL("Invalid resampler type: %u", resampleType); + mResampleFunc = NULL; + return; + } +} + +template<typename TC, typename TI, typename TO> +template<int CHANNELS, bool LOCKED, int STRIDE> +void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount, + AudioBufferProvider* provider) +{ + const Constants& c(mConstants); + const TC* const coefs = mConstants.mFirCoefs; + TI* impulse = mInBuffer.getImpulse(); + size_t inputIndex = mInputIndex; + uint32_t phaseFraction = mPhaseFraction; + const uint32_t phaseIncrement = mPhaseIncrement; + size_t outputIndex = 0; + size_t outputSampleCount = outFrameCount * 2; // stereo output + size_t inFrameCount = getInFrameCountRequired(outFrameCount); + const uint32_t phaseWrapLimit = c.mL << c.mShift; + + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + // the following logic is a bit convoluted to keep the main processing loop + // as tight as possible with register allocation. + while (outputIndex < outputSampleCount) { + // buffer is empty, fetch a new one + while (mBuffer.frameCount == 0) { + mBuffer.frameCount = inFrameCount; + provider->getNextBuffer(&mBuffer, + calculateOutputPTS(outputIndex / 2)); + if (mBuffer.raw == NULL) { + goto resample_exit; + } + if (phaseFraction >= phaseWrapLimit) { // read in data + mInBuffer.template readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, + reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + phaseFraction -= phaseWrapLimit; + while (phaseFraction >= phaseWrapLimit) { + inputIndex++; + if (inputIndex >= mBuffer.frameCount) { + inputIndex -= mBuffer.frameCount; + provider->releaseBuffer(&mBuffer); + break; + } + mInBuffer.template readAdvance<CHANNELS>( + impulse, c.mHalfNumCoefs, + reinterpret_cast<TI*>(mBuffer.raw), inputIndex); + phaseFraction -= phaseWrapLimit; + } + } + } + const TI* const in = reinterpret_cast<const TI*>(mBuffer.raw); + const size_t frameCount = mBuffer.frameCount; + const int coefShift = c.mShift; + const int halfNumCoefs = c.mHalfNumCoefs; + const TO* const volumeSimd = mVolumeSimd; + + // reread the last input in. + mInBuffer.template readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + + // main processing loop + while (CC_LIKELY(outputIndex < outputSampleCount)) { + // caution: fir() is inlined and may be large. + // output will be loaded with the appropriate values + // + // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs] + // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs. + // + fir<CHANNELS, LOCKED, STRIDE>( + &out[outputIndex], + phaseFraction, phaseWrapLimit, + coefShift, halfNumCoefs, coefs, + impulse, volumeSimd); + outputIndex += 2; + + phaseFraction += phaseIncrement; + while (phaseFraction >= phaseWrapLimit) { + inputIndex++; + if (inputIndex >= frameCount) { + goto done; // need a new buffer + } + mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex); + phaseFraction -= phaseWrapLimit; + } + } +done: + // often arrives here when input buffer runs out + if (inputIndex >= frameCount) { + inputIndex -= frameCount; + provider->releaseBuffer(&mBuffer); + // mBuffer.frameCount MUST be zero here. + } + } + +resample_exit: + mInBuffer.setImpulse(impulse); + mInputIndex = inputIndex; + mPhaseFraction = phaseFraction; +} + +/* instantiate templates used by AudioResampler::create */ +template class AudioResamplerDyn<float, float, float>; +template class AudioResamplerDyn<int16_t, int16_t, int32_t>; +template class AudioResamplerDyn<int32_t, int16_t, int32_t>; + +// ---------------------------------------------------------------------------- +}; // namespace android diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h new file mode 100644 index 0000000..8c56319 --- /dev/null +++ b/services/audioflinger/AudioResamplerDyn.h @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H +#define ANDROID_AUDIO_RESAMPLER_DYN_H + +#include <stdint.h> +#include <sys/types.h> +#include <cutils/log.h> + +#include "AudioResampler.h" + +namespace android { + +/* AudioResamplerDyn + * + * This class template is used for floating point and integer resamplers. + * + * Type variables: + * TC = filter coefficient type (one of int16_t, int32_t, or float) + * TI = input data type (one of int16_t or float) + * TO = output data type (one of int32_t or float) + * + * For integer input data types TI, the coefficient type TC is either int16_t or int32_t. + * For float input data types TI, the coefficient type TC is float. + */ + +template<typename TC, typename TI, typename TO> +class AudioResamplerDyn: public AudioResampler { +public: + AudioResamplerDyn(int bitDepth, int inChannelCount, + int32_t sampleRate, src_quality quality); + + virtual ~AudioResamplerDyn(); + + virtual void init(); + + virtual void setSampleRate(int32_t inSampleRate); + + virtual void setVolume(int16_t left, int16_t right); + + virtual void resample(int32_t* out, size_t outFrameCount, + AudioBufferProvider* provider); + +private: + + class Constants { // stores the filter constants. + public: + Constants() : + mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefs(NULL) + {} + void set(int L, int halfNumCoefs, + int inSampleRate, int outSampleRate); + + int mL; // interpolation phases in the filter. + int mShift; // right shift to get polyphase index + unsigned int mHalfNumCoefs; // filter half #coefs + const TC* mFirCoefs; // polyphase filter bank + }; + + class InBuffer { // buffer management for input type TI + public: + InBuffer(); + ~InBuffer(); + void init(); + + void resize(int CHANNELS, int halfNumCoefs); + + // used for direct management of the mImpulse pointer + inline TI* getImpulse() { + return mImpulse; + } + + inline void setImpulse(TI *impulse) { + mImpulse = impulse; + } + + template<int CHANNELS> + inline void readAgain(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + + template<int CHANNELS> + inline void readAdvance(TI*& impulse, const int halfNumCoefs, + const TI* const in, const size_t inputIndex); + + private: + // tuning parameter guidelines: 2 <= multiple <= 8 + static const int kStateSizeMultipleOfFilterLength = 4; + + // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS. + TI* mState; // base pointer for the input buffer storage + TI* mImpulse; // current location of the impulse response (centered) + TI* mRingFull; // mState <= mImpulse < mRingFull + size_t mStateCount; // size of state in units of TI. + }; + + void createKaiserFir(Constants &c, double stopBandAtten, + int inSampleRate, int outSampleRate, double tbwCheat); + + void setResampler(unsigned resampleType); + + template<int CHANNELS, bool LOCKED, int STRIDE> + void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider); + + // declare a pointer to member function for resample + typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out, + size_t outFrameCount, AudioBufferProvider* provider); + + // data - the contiguous storage and layout of these is important. + InBuffer mInBuffer; + Constants mConstants; // current set of coefficient parameters + TO __attribute__ ((aligned (8))) mVolumeSimd[2]; // must be aligned or NEON may crash + resample_ABP_t mResampleFunc; // called function for resampling + int32_t mFilterSampleRate; // designed filter sample rate. + src_quality mFilterQuality; // designed filter quality. + void* mCoefBuffer; // if a filter is created, this is not null +}; + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/ diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h new file mode 100644 index 0000000..d024b2f --- /dev/null +++ b/services/audioflinger/AudioResamplerFirGen.h @@ -0,0 +1,709 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_GEN_H +#define ANDROID_AUDIO_RESAMPLER_FIR_GEN_H + +namespace android { + +/* + * generates a sine wave at equal steps. + * + * As most of our functions use sine or cosine at equal steps, + * it is very efficient to compute them that way (single multiply and subtract), + * rather than invoking the math library sin() or cos() each time. + * + * SineGen uses Goertzel's Algorithm (as a generator not a filter) + * to calculate sine(wstart + n * wstep) or cosine(wstart + n * wstep) + * by stepping through 0, 1, ... n. + * + * e^i(wstart+wstep) = 2cos(wstep) * e^i(wstart) - e^i(wstart-wstep) + * + * or looking at just the imaginary sine term, as the cosine follows identically: + * + * sin(wstart+wstep) = 2cos(wstep) * sin(wstart) - sin(wstart-wstep) + * + * Goertzel's algorithm is more efficient than the angle addition formula, + * e^i(wstart+wstep) = e^i(wstart) * e^i(wstep), which takes up to + * 4 multiplies and 2 adds (or 3* and 3+) and requires both sine and + * cosine generation due to the complex * complex multiply (full rotation). + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * + */ + +class SineGen { +public: + SineGen(double wstart, double wstep, bool cosine = false) { + if (cosine) { + mCurrent = cos(wstart); + mPrevious = cos(wstart - wstep); + } else { + mCurrent = sin(wstart); + mPrevious = sin(wstart - wstep); + } + mTwoCos = 2.*cos(wstep); + } + SineGen(double expNow, double expPrev, double twoCosStep) { + mCurrent = expNow; + mPrevious = expPrev; + mTwoCos = twoCosStep; + } + inline double value() const { + return mCurrent; + } + inline void advance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + } + inline double valueAdvance() { + double tmp = mCurrent; + mCurrent = mCurrent*mTwoCos - mPrevious; + mPrevious = tmp; + return tmp; + } + +private: + double mCurrent; // current value of sine/cosine + double mPrevious; // previous value of sine/cosine + double mTwoCos; // stepping factor +}; + +/* + * generates a series of sine generators, phase offset by fixed steps. + * + * This is used to generate polyphase sine generators, one per polyphase + * in the filter code below. + * + * The SineGen returned by value() starts at innerStart = outerStart + n*outerStep; + * increments by innerStep. + * + */ + +class SineGenGen { +public: + SineGenGen(double outerStart, double outerStep, double innerStep, bool cosine = false) + : mSineInnerCur(outerStart, outerStep, cosine), + mSineInnerPrev(outerStart-innerStep, outerStep, cosine) + { + mTwoCos = 2.*cos(innerStep); + } + inline SineGen value() { + return SineGen(mSineInnerCur.value(), mSineInnerPrev.value(), mTwoCos); + } + inline void advance() { + mSineInnerCur.advance(); + mSineInnerPrev.advance(); + } + inline SineGen valueAdvance() { + return SineGen(mSineInnerCur.valueAdvance(), mSineInnerPrev.valueAdvance(), mTwoCos); + } + +private: + SineGen mSineInnerCur; // generate the inner sine values (stepped by outerStep). + SineGen mSineInnerPrev; // generate the inner sine previous values + // (behind by innerStep, stepped by outerStep). + double mTwoCos; // the inner stepping factor for the returned SineGen. +}; + +static inline double sqr(double x) { + return x * x; +} + +/* + * rounds a double to the nearest integer for FIR coefficients. + * + * One variant uses noise shaping, which must keep error history + * to work (the err parameter, initialized to 0). + * The other variant is a non-noise shaped version for + * S32 coefficients (noise shaping doesn't gain much). + * + * Caution: No bounds saturation is applied, but isn't needed in this case. + * + * @param x is the value to round. + * + * @param maxval is the maximum integer scale factor expressed as an int64 (for headroom). + * Typically this may be the maximum positive integer+1 (using the fact that double precision + * FIR coefficients generated here are never that close to 1.0 to pose an overflow condition). + * + * @param err is the previous error (actual - rounded) for the previous rounding op. + * For 16b coefficients this can improve stopband dB performance by up to 2dB. + * + * Many variants exist for the noise shaping: http://en.wikipedia.org/wiki/Noise_shaping + * + */ + +static inline int64_t toint(double x, int64_t maxval, double& err) { + double val = x * maxval; + double ival = floor(val + 0.5 + err*0.2); + err = val - ival; + return static_cast<int64_t>(ival); +} + +static inline int64_t toint(double x, int64_t maxval) { + return static_cast<int64_t>(floor(x * maxval + 0.5)); +} + +/* + * Modified Bessel function of the first kind + * http://en.wikipedia.org/wiki/Bessel_function + * + * The formulas are taken from Abramowitz and Stegun, + * _Handbook of Mathematical Functions_ (links below): + * + * http://people.math.sfu.ca/~cbm/aands/page_375.htm + * http://people.math.sfu.ca/~cbm/aands/page_378.htm + * + * http://dlmf.nist.gov/10.25 + * http://dlmf.nist.gov/10.40 + * + * Note we assume x is nonnegative (the function is symmetric, + * pass in the absolute value as needed). + * + * Constants are compile time derived with templates I0Term<> and + * I0ATerm<> to the precision of the compiler. The series can be expanded + * to any precision needed, but currently set around 24b precision. + * + * We use a bit of template math here, constexpr would probably be + * more appropriate for a C++11 compiler. + * + * For the intermediate range 3.75 < x < 15, we use minimax polynomial fit. + * + */ + +template <int N> +struct I0Term { + static const double value = I0Term<N-1>::value / (4. * N * N); +}; + +template <> +struct I0Term<0> { + static const double value = 1.; +}; + +template <int N> +struct I0ATerm { + static const double value = I0ATerm<N-1>::value * (2.*N-1.) * (2.*N-1.) / (8. * N); +}; + +template <> +struct I0ATerm<0> { // 1/sqrt(2*PI); + static const double value = 0.398942280401432677939946059934381868475858631164934657665925; +}; + +#if USE_HORNERS_METHOD +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Horner's Method: http://en.wikipedia.org/wiki/Horner's_method + * + * This has fewer multiplications than Estrin's method below, but has back to back + * floating point dependencies. + * + * On ARM this appears to work slower, so USE_HORNERS_METHOD is not default enabled. + */ + +inline double Poly2(double A, double B, double x) { + return A + x * B; +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return A + x * (B + x * (C + x * (D))); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G)))))); +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + return A + x * (B + x * (C + x * (D + x * (E + x * (F + x * (G + x * (H + x * (I)))))))); +} + +#else +/* Polynomial evaluation of A + Bx + Cx^2 + Dx^3 + ... + * using Estrin's Method: http://en.wikipedia.org/wiki/Estrin's_scheme + * + * This is typically faster, perhaps gains about 5-10% overall on ARM processors + * over Horner's method above. + */ + +inline double Poly2(double A, double B, double x) { + return A + B * x; +} + +inline double Poly3(double A, double B, double C, double x, double x2) { + return Poly2(A, B, x) + C * x2; +} + +inline double Poly3(double A, double B, double C, double x) { + return Poly2(A, B, x) + C * x * x; +} + +inline double Poly4(double A, double B, double C, double D, double x, double x2) { + return Poly2(A, B, x) + Poly2(C, D, x) * x2; // same as poly2(poly2, poly2, x2); +} + +inline double Poly4(double A, double B, double C, double D, double x) { + return Poly4(A, B, C, D, x, x * x); +} + +inline double Poly7(double A, double B, double C, double D, double E, double F, double G, + double x) { + double x2 = x * x; + return Poly4(A, B, C, D, x, x2) + Poly3(E, F, G, x, x2) * (x2 * x2); +} + +inline double Poly8(double A, double B, double C, double D, double E, double F, double G, + double H, double x, double x2, double x4) { + return Poly4(A, B, C, D, x, x2) + Poly4(E, F, G, H, x, x2) * x4; +} + +inline double Poly9(double A, double B, double C, double D, double E, double F, double G, + double H, double I, double x) { + double x2 = x * x; +#if 1 + // It does not seem faster to explicitly decompose Poly8 into Poly4, but + // could depend on compiler floating point scheduling. + double x4 = x2 * x2; + return Poly8(A, B, C, D, E, F, G, H, x, x2, x4) + I * (x4 * x4); +#else + double val = Poly4(A, B, C, D, x, x2); + double x4 = x2 * x2; + return val + Poly4(E, F, G, H, x, x2) * x4 + I * (x4 * x4); +#endif +} +#endif + +static inline double I0(double x) { + if (x < 3.75) { + x *= x; + return Poly7(I0Term<0>::value, I0Term<1>::value, + I0Term<2>::value, I0Term<3>::value, + I0Term<4>::value, I0Term<5>::value, + I0Term<6>::value, x); // e < 1.6e-7 + } + if (1) { + /* + * Series expansion coefs are easy to calculate, but are expanded around 0, + * so error is unequal over the interval 0 < x < 3.75, the error being + * significantly better near 0. + * + * A better solution is to use precise minimax polynomial fits. + * + * We use a slightly more complicated solution for 3.75 < x < 15, based on + * the tables in Blair and Edwards, "Stable Rational Minimax Approximations + * to the Modified Bessel Functions I0(x) and I1(x)", Chalk Hill Nuclear Laboratory, + * AECL-4928. + * + * http://www.iaea.org/inis/collection/NCLCollectionStore/_Public/06/178/6178667.pdf + * + * See Table 11 for 0 < x < 15; e < 10^(-7.13). + * + * Note: Beta cannot exceed 15 (hence Stopband cannot exceed 144dB = 24b). + * + * This speeds up overall computation by about 40% over using the else clause below, + * which requires sqrt and exp. + * + */ + + x *= x; + double num = Poly9(-0.13544938430e9, -0.33153754512e8, + -0.19406631946e7, -0.48058318783e5, + -0.63269783360e3, -0.49520779070e1, + -0.24970910370e-1, -0.74741159550e-4, + -0.18257612460e-6, x); + double y = x - 225.; // reflection around 15 (squared) + double den = Poly4(-0.34598737196e8, 0.23852643181e6, + -0.70699387620e3, 0.10000000000e1, y); + return num / den; + +#if IO_EXTENDED_BETA + /* Table 42 for x > 15; e < 10^(-8.11). + * This is used for Beta>15, but is disabled here as + * we never use Beta that high. + * + * NOTE: This should be enabled only for x > 15. + */ + + double y = 1./x; + double z = y - (1./15); + double num = Poly2(0.415079861746e1, -0.5149092496e1, z); + double den = Poly3(0.103150763823e2, -0.14181687413e2, + 0.1000000000e1, z); + return exp(x) * sqrt(y) * num / den; +#endif + } else { + /* + * NOT USED, but reference for large Beta. + * + * Abramowitz and Stegun asymptotic formula. + * works for x > 3.75. + */ + double y = 1./x; + return exp(x) * sqrt(y) * + // note: reciprocal squareroot may be easier! + // http://en.wikipedia.org/wiki/Fast_inverse_square_root + Poly9(I0ATerm<0>::value, I0ATerm<1>::value, + I0ATerm<2>::value, I0ATerm<3>::value, + I0ATerm<4>::value, I0ATerm<5>::value, + I0ATerm<6>::value, I0ATerm<7>::value, + I0ATerm<8>::value, y); // (... e) < 1.9e-7 + } +} + +/* A speed optimized version of the Modified Bessel I0() which incorporates + * the sqrt and numerator multiply and denominator divide into the computation. + * This speeds up filter computation by about 10-15%. + */ +static inline double I0SqrRat(double x2, double num, double den) { + if (x2 < (3.75 * 3.75)) { + return Poly7(I0Term<0>::value, I0Term<1>::value, + I0Term<2>::value, I0Term<3>::value, + I0Term<4>::value, I0Term<5>::value, + I0Term<6>::value, x2) * num / den; // e < 1.6e-7 + } + num *= Poly9(-0.13544938430e9, -0.33153754512e8, + -0.19406631946e7, -0.48058318783e5, + -0.63269783360e3, -0.49520779070e1, + -0.24970910370e-1, -0.74741159550e-4, + -0.18257612460e-6, x2); // e < 10^(-7.13). + double y = x2 - 225.; // reflection around 15 (squared) + den *= Poly4(-0.34598737196e8, 0.23852643181e6, + -0.70699387620e3, 0.10000000000e1, y); + return num / den; +} + +/* + * calculates the transition bandwidth for a Kaiser filter + * + * Formula 3.2.8, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + * Formula 7.76, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + * + * @param halfNumCoef is half the number of coefficients per filter phase. + * + * @param stopBandAtten is the stop band attenuation desired. + * + * @return the transition bandwidth in normalized frequency (0 <= f <= 0.5) + */ +static inline double firKaiserTbw(int halfNumCoef, double stopBandAtten) { + return (stopBandAtten - 7.95)/((2.*14.36)*halfNumCoef); +} + +/* + * calculates the fir transfer response of the overall polyphase filter at w. + * + * Calculates the DTFT transfer coefficient H(w) for 0 <= w <= PI, utilizing the + * fact that h[n] is symmetric (cosines only, no complex arithmetic). + * + * We use Goertzel's algorithm to accelerate the computation to essentially + * a single multiply and 2 adds per filter coefficient h[]. + * + * Be careful be careful to consider that h[n] is the overall polyphase filter, + * with L phases, so rescaling H(w)/L is probably what you expect for "unity gain", + * as you only use one of the polyphases at a time. + */ +template <typename T> +static inline double firTransfer(const T* coef, int L, int halfNumCoef, double w) { + double accum = static_cast<double>(coef[0])*0.5; // "center coefficient" from first bank + coef += halfNumCoef; // skip first filterbank (picked up by the last filterbank). +#if SLOW_FIRTRANSFER + /* Original code for reference. This is equivalent to the code below, but slower. */ + for (int i=1 ; i<=L ; ++i) { + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + accum += cos(ix*w)*static_cast<double>(*coef++); + } + } +#else + /* + * Our overall filter is stored striped by polyphases, not a contiguous h[n]. + * We could fetch coefficients in a non-contiguous fashion + * but that will not scale to vector processing. + * + * We apply Goertzel's algorithm directly to each polyphase filter bank instead of + * using cosine generation/multiplication, thereby saving one multiply per inner loop. + * + * See: http://en.wikipedia.org/wiki/Goertzel_algorithm + * Also: Oppenheim and Schafer, _Discrete Time Signal Processing, 3e_, p. 720. + * + * We use the basic recursion to incorporate the cosine steps into real sequence x[n]: + * s[n] = x[n] + (2cosw)*s[n-1] + s[n-2] + * + * y[n] = s[n] - e^(iw)s[n-1] + * = sum_{k=-\infty}^{n} x[k]e^(-iw(n-k)) + * = e^(-iwn) sum_{k=0}^{n} x[k]e^(iwk) + * + * The summation contains the frequency steps we want multiplied by the source + * (similar to a DTFT). + * + * Using symmetry, and just the real part (be careful, this must happen + * after any internal complex multiplications), the polyphase filterbank + * transfer function is: + * + * Hpp[n, w, w_0] = sum_{k=0}^{n} x[k] * cos(wk + w_0) + * = Re{ e^(iwn + iw_0) y[n]} + * = cos(wn+w_0) * s[n] - cos(w(n+1)+w_0) * s[n-1] + * + * using the fact that s[n] of real x[n] is real. + * + */ + double dcos = 2. * cos(L*w); + int start = ((halfNumCoef)*L + 1); + SineGen cc((start - L) * w, w, true); // cosine + SineGen cp(start * w, w, true); // cosine + for (int i=1 ; i<=L ; ++i) { + double sc = 0; + double sp = 0; + for (int j=0 ; j<halfNumCoef ; ++j) { + double tmp = sc; + sc = static_cast<double>(*coef++) + dcos*sc - sp; + sp = tmp; + } + // If we are awfully clever, we can apply Goertzel's algorithm + // again on the sc and sp sequences returned here. + accum += cc.valueAdvance() * sc - cp.valueAdvance() * sp; + } +#endif + return accum*2.; +} + +/* + * evaluates the minimum and maximum |H(f)| bound in a band region. + * + * This is usually done with equally spaced increments in the target band in question. + * The passband is often very small, and sampled that way. The stopband is often much + * larger. + * + * We use the fact that the overall polyphase filter has an additional bank at the end + * for interpolation; hence it is overspecified for the H(f) computation. Thus the + * first polyphase is never actually checked, excepting its first term. + * + * In this code we use the firTransfer() evaluator above, which uses Goertzel's + * algorithm to calculate the transfer function at each point. + * + * TODO: An alternative with equal spacing is the FFT/DFT. An alternative with unequal + * spacing is a chirp transform. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fstart is the normalized frequency start. + * + * @param fend is the normalized frequency end. + * + * @param steps is the number of steps to take (sampling) between frequency start and end + * + * @param firMin returns the minimum transfer |H(f)| found + * + * @param firMax returns the maximum transfer |H(f)| found + * + * 0 <= f <= 0.5. + * This is used to test passband and stopband performance. + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fstart, double fend, int steps, double &firMin, double &firMax) { + double wstart = fstart*(2.*M_PI); + double wend = fend*(2.*M_PI); + double wstep = (wend - wstart)/steps; + double fmax, fmin; + double trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + fmin = fmax = trf; + wstart += wstep; + for (int i=1; i<steps; ++i) { + trf = firTransfer(coef, L, halfNumCoef, wstart); + if (trf<0) { + trf = -trf; + } + if (trf>fmax) { + fmax = trf; + } + else if (trf<fmin) { + fmin = trf; + } + wstart += wstep; + } + // renormalize - this is only needed for integer filter types + double norm = 1./((1ULL<<(sizeof(T)*8-1))*L); + + firMin = fmin * norm; + firMax = fmax * norm; +} + +/* + * evaluates the |H(f)| lowpass band characteristics. + * + * This function tests the lowpass characteristics for the overall polyphase filter, + * and is used to verify the design. For this case, fp should be set to the + * passband normalized frequency from 0 to 0.5 for the overall filter (thus it + * is the designed polyphase bank value / L). Likewise for fs. + * + * @param coef is the designed polyphase filter banks + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param fp is the passband normalized frequency, 0 < fp < fs < 0.5. + * + * @param fs is the stopband normalized frequency, 0 < fp < fs < 0.5. + * + * @param passSteps is the number of passband sampling steps. + * + * @param stopSteps is the number of stopband sampling steps. + * + * @param passMin is the minimum value in the passband + * + * @param passMax is the maximum value in the passband (useful for scaling). This should + * be less than 1., to avoid sine wave test overflow. + * + * @param passRipple is the passband ripple. Typically this should be less than 0.1 for + * an audio filter. Generally speaker/headphone device characteristics will dominate + * the passband term. + * + * @param stopMax is the maximum value in the stopband. + * + * @param stopRipple is the stopband ripple, also known as stopband attenuation. + * Typically this should be greater than ~80dB for low quality, and greater than + * ~100dB for full 16b quality, otherwise aliasing may become noticeable. + * + */ +template <typename T> +static void testFir(const T* coef, int L, int halfNumCoef, + double fp, double fs, int passSteps, int stopSteps, + double &passMin, double &passMax, double &passRipple, + double &stopMax, double &stopRipple) { + double fmin, fmax; + testFir(coef, L, halfNumCoef, 0., fp, passSteps, fmin, fmax); + double d1 = (fmax - fmin)/2.; + passMin = fmin; + passMax = fmax; + passRipple = -20.*log10(1. - d1); // passband ripple + testFir(coef, L, halfNumCoef, fs, 0.5, stopSteps, fmin, fmax); + // fmin is really not important for the stopband. + stopMax = fmax; + stopRipple = -20.*log10(fmax); // stopband ripple/attenuation +} + +/* + * Calculates the overall polyphase filter based on a windowed sinc function. + * + * The windowed sinc is an odd length symmetric filter of exactly L*halfNumCoef*2+1 + * taps for the entire kernel. This is then decomposed into L+1 polyphase filterbanks. + * The last filterbank is used for interpolation purposes (and is mostly composed + * of the first bank shifted by one sample), and is unnecessary if one does + * not do interpolation. + * + * We use the last filterbank for some transfer function calculation purposes, + * so it needs to be generated anyways. + * + * @param coef is the caller allocated space for coefficients. This should be + * exactly (L+1)*halfNumCoef in size. + * + * @param L is the number of phases (for interpolation) + * + * @param halfNumCoef should be half the number of coefficients for a single + * polyphase. + * + * @param stopBandAtten is the stopband value, should be >50dB. + * + * @param fcr is cutoff frequency/sampling rate (<0.5). At this point, the energy + * should be 6dB less. (fcr is where the amplitude drops by half). Use the + * firKaiserTbw() to calculate the transition bandwidth. fcr is the midpoint + * between the stop band and the pass band (fstop+fpass)/2. + * + * @param atten is the attenuation (generally slightly less than 1). + */ + +template <typename T> +static inline void firKaiserGen(T* coef, int L, int halfNumCoef, + double stopBandAtten, double fcr, double atten) { + // + // Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48 + // Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542 + // + // See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf + // + // Kaiser window and beta parameter + // + // | 0.1102*(A - 8.7) A > 50 + // beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 <= A <= 50 + // | 0. A < 21 + // + // with A is the desired stop-band attenuation in dBFS + // + // 30 dB 2.210 + // 40 dB 3.384 + // 50 dB 4.538 + // 60 dB 5.658 + // 70 dB 6.764 + // 80 dB 7.865 + // 90 dB 8.960 + // 100 dB 10.056 + + const int N = L * halfNumCoef; // non-negative half + const double beta = 0.1102 * (stopBandAtten - 8.7); // >= 50dB always + const double xstep = (2. * M_PI) * fcr / L; + const double xfrac = 1. / N; + const double yscale = atten * L / (I0(beta) * M_PI); + const double sqrbeta = sqr(beta); + + // We use sine generators, which computes sines on regular step intervals. + // This speeds up overall computation about 40% from computing the sine directly. + + SineGenGen sgg(0., xstep, L*xstep); // generates sine generators (one per polyphase) + + for (int i=0 ; i<=L ; ++i) { // generate an extra set of coefs for interpolation + + // computation for a single polyphase of the overall filter. + SineGen sg = sgg.valueAdvance(); // current sine generator for "j" inner loop. + double err = 0; // for noise shaping on int16_t coefficients (over each polyphase) + + for (int j=0, ix=i ; j<halfNumCoef ; ++j, ix+=L) { + double y; + if (CC_LIKELY(ix)) { + double x = static_cast<double>(ix); + + // sine generator: sg.valueAdvance() returns sin(ix*xstep); + // y = I0(beta * sqrt(1.0 - sqr(x * xfrac))) * yscale * sg.valueAdvance() / x; + y = I0SqrRat(sqrbeta * (1.0 - sqr(x * xfrac)), yscale * sg.valueAdvance(), x); + } else { + y = 2. * atten * fcr; // center of filter, sinc(0) = 1. + sg.advance(); + } + + if (is_same<T, int16_t>::value) { // int16_t needs noise shaping + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1), err)); + } else if (is_same<T, int32_t>::value) { + *coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1))); + } else { // assumed float or double + *coef++ = static_cast<T>(y); + } + } + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_GEN_H*/ diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h new file mode 100644 index 0000000..bf2163f --- /dev/null +++ b/services/audioflinger/AudioResamplerFirOps.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H + +namespace android { + +#if defined(__arm__) && !defined(__thumb__) +#define USE_INLINE_ASSEMBLY (true) +#else +#define USE_INLINE_ASSEMBLY (false) +#endif + +#if USE_INLINE_ASSEMBLY && defined(__ARM_NEON__) +#define USE_NEON (true) +#include <arm_neon.h> +#else +#define USE_NEON (false) +#endif + +template<typename T, typename U> +struct is_same +{ + static const bool value = false; +}; + +template<typename T> +struct is_same<T, T> // partial specialization +{ + static const bool value = true; +}; + +static inline +int32_t mulRL(int left, int32_t in, uint32_t vRL) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smultb %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } else { + asm( "smultt %[out], %[in], %[vRL] \n" + : [out]"=r"(out) + : [in]"%r"(in), [vRL]"r"(vRL) + : ); + } + return out; +#else + int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16); + return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16); +#endif +} + +static inline +int32_t mulAdd(int16_t in, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlabb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + v * in; +#endif +} + +static inline +int32_t mulAdd(int16_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smlawb %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16); +#endif +} + +static inline +int32_t mulAdd(int32_t in, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + asm( "smmla %[out], %[v], %[in], %[a] \n" + : [out]"=r"(out) + : [in]"%r"(in), [v]"r"(v), [a]"r"(a) + : ); + return out; +#else + return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32); +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlabb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlabt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + v * s; +#endif +} + +static inline +int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a) +{ +#if USE_INLINE_ASSEMBLY + int32_t out; + if (left) { + asm( "smlawb %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } else { + asm( "smlawt %[out], %[v], %[inRL], %[a] \n" + : [out]"=r"(out) + : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a) + : ); + } + return out; +#else + int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16); + return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16); +#endif +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h new file mode 100644 index 0000000..76d2d66 --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcess.h @@ -0,0 +1,333 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H + +namespace android { + +// depends on AudioResamplerFirOps.h + +/* variant for input type TI = int16_t input samples */ +template<typename TC> +static inline +void mac(int32_t& l, int32_t& r, TC coef, const int16_t* samples) +{ + uint32_t rl = *reinterpret_cast<const uint32_t*>(samples); + l = mulAddRL(1, rl, coef, l); + r = mulAddRL(0, rl, coef, r); +} + +template<typename TC> +static inline +void mac(int32_t& l, TC coef, const int16_t* samples) +{ + l = mulAdd(samples[0], coef, l); +} + +/* variant for input type TI = float input samples */ +template<typename TC> +static inline +void mac(float& l, float& r, TC coef, const float* samples) +{ + l += *samples++ * coef; + r += *samples++ * coef; +} + +template<typename TC> +static inline +void mac(float& l, TC coef, const float* samples) +{ + l += *samples++ * coef; +} + +/* variant for output type TO = int32_t output samples */ +static inline +int32_t volumeAdjust(int32_t value, int32_t volume) +{ + return 2 * mulRL(0, value, volume); // Note: only use top 16b +} + +/* variant for output type TO = float output samples */ +static inline +float volumeAdjust(float value, float volume) +{ + return value * volume; +} + +/* + * Calculates a single output frame (two samples). + * + * This function computes both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * This is a locked phase filter (it does not compute the interpolation). + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + */ + +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO> +static inline +void ProcessL(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TI* sP, + const TI* sN, + const TO* const volumeLR) +{ + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2) + if (CHANNELS == 2) { + TO l = 0; + TO r = 0; + do { + mac(l, r, *coefsP++, sP); + sP -= CHANNELS; + mac(l, r, *coefsN++, sN); + sN += CHANNELS; + } while (--count > 0); + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(r, volumeLR[1]); + } else { /* CHANNELS == 1 */ + TO l = 0; + do { + mac(l, *coefsP++, sP); + sP -= CHANNELS; + mac(l, *coefsN++, sN); + sN += CHANNELS; + } while (--count > 0); + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(l, volumeLR[1]); + } +} + +/* + * Calculates a single output frame (two samples) interpolating phase. + * + * This function computes both the positive half FIR dot product and + * the negative half FIR dot product, accumulates, and then applies the volume. + * + * This is an interpolated phase filter. + * + * Use fir() to compute the proper coefficient pointers for a polyphase + * filter bank. + */ + +template<typename TC, typename T> +void adjustLerp(T& lerpP __unused) +{ +} + +template<int32_t, typename T> +void adjustLerp(T& lerpP) +{ + lerpP >>= 16; // lerpP is 32bit for NEON int32_t, but always 16 bit for non-NEON path +} + +template<typename TC, typename TINTERP> +static inline +TC interpolate(TC coef_0, TC coef_1, TINTERP lerp) +{ + return lerp * (coef_1 - coef_0) + coef_0; +} + +template<int16_t, uint32_t> +static inline +int16_t interpolate(int16_t coef_0, int16_t coef_1, uint32_t lerp) +{ + return (static_cast<int16_t>(lerp) * ((coef_1-coef_0)<<1)>>16) + coef_0; +} + +template<int32_t, uint32_t> +static inline +int32_t interpolate(int32_t coef_0, int32_t coef_1, uint32_t lerp) +{ + return mulAdd(static_cast<int16_t>(lerp), (coef_1-coef_0)<<1, coef_0); +} + +template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP> +static inline +void Process(TO* const out, + int count, + const TC* coefsP, + const TC* coefsN, + const TC* coefsP1 __unused, + const TC* coefsN1 __unused, + const TI* sP, + const TI* sN, + TINTERP lerpP, + const TO* const volumeLR) +{ + COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS >= 1 && CHANNELS <= 2) + adjustLerp<TC, TINTERP>(lerpP); // coefficient type adjustment for interpolation + + if (CHANNELS == 2) { + TO l = 0; + TO r = 0; + for (size_t i = 0; i < count; ++i) { + mac(l, r, interpolate(coefsP[0], coefsP[count], lerpP), sP); + coefsP++; + sP -= CHANNELS; + mac(l, r, interpolate(coefsN[count], coefsN[0], lerpP), sN); + coefsN++; + sN += CHANNELS; + } + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(r, volumeLR[1]); + } else { /* CHANNELS == 1 */ + TO l = 0; + for (size_t i = 0; i < count; ++i) { + mac(l, interpolate(coefsP[0], coefsP[count], lerpP), sP); + coefsP++; + sP -= CHANNELS; + mac(l, interpolate(coefsN[count], coefsN[0], lerpP), sN); + coefsN++; + sN += CHANNELS; + } + out[0] += volumeAdjust(l, volumeLR[0]); + out[1] += volumeAdjust(l, volumeLR[1]); + } +} + +/* + * Calculates a single output frame (two samples) from input sample pointer. + * + * This sets up the params for the accelerated Process() and ProcessL() + * functions to do the appropriate dot products. + * + * @param out should point to the output buffer with space for at least one output frame. + * + * @param phase is the fractional distance between input frames for interpolation: + * phase >= 0 && phase < phaseWrapLimit. It can be thought of as a rational fraction + * of phase/phaseWrapLimit. + * + * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases + * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift). + * + * @param coefShift gives the bit alignment of the polyphase index in the phase parameter. + * + * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the + * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored. + * + * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to + * and including the #polyphases. Each polyphase of the filter has half-length halfNumCoefs + * (due to symmetry). The total size of the filter bank in coefficients is + * (#polyphases+1)*halfNumCoefs. + * + * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line). + * + * The coefs should be attenuated (to compensate for passband ripple) + * if storing back into the native format. + * + * @param samples are unaligned input samples. The position is in the "middle" of the + * sample array with respect to the FIR filter: + * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs; + * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1. + * + * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel, + * expressed as a S32 integer. A negative value inverts the channel 180 degrees. + * The pointer volumeLR should be aligned to a minimum of 8 bytes. + * A typical value for volume is 0x1000 to align to a unity gain output of 20.12. + * + * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where + * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling. + * + * The filter polyphase index is given by indexP = phase >> coefShift. Due to + * odd length symmetric filter, the polyphase index of the negative half depends on + * whether interpolation is used. + * + * The fractional siting between the polyphase indices is given by the bits below coefShift: + * + * lerpP = phase << 32 - coefShift >> 1; // for 32 bit unsigned phase multiply + * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply + * + * For integer types, this is expressed as: + * + * lerpP = phase << sizeof(phase)*8 - coefShift + * >> (sizeof(phase)-sizeof(*coefs))*8 + 1; + * + * For floating point, lerpP is the fractional phase scaled to [0.0, 1.0): + * + * lerpP = (phase << 32 - coefShift) / (1 << 32); // floating point equivalent + */ + +template<int CHANNELS, bool LOCKED, int STRIDE, typename TC, typename TI, typename TO> +static inline +void fir(TO* const out, + const uint32_t phase, const uint32_t phaseWrapLimit, + const int coefShift, const int halfNumCoefs, const TC* const coefs, + const TI* const samples, const TO* const volumeLR) +{ + // NOTE: be very careful when modifying the code here. register + // pressure is very high and a small change might cause the compiler + // to generate far less efficient code. + // Always sanity check the result with objdump or test-resample. + + if (LOCKED) { + // locked polyphase (no interpolation) + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase) >> coefShift; + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const TI* sP = samples; + const TI* sN = samples + CHANNELS; + + // dot product filter. + ProcessL<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR); + } else { + // interpolated polyphase + // Compute the polyphase filter index on the positive and negative side. + uint32_t indexP = phase >> coefShift; + uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement. + const TC* coefsP = coefs + indexP*halfNumCoefs; + const TC* coefsN = coefs + indexN*halfNumCoefs; + const TC* coefsP1 = coefsP + halfNumCoefs; + const TC* coefsN1 = coefsN + halfNumCoefs; + const TI* sP = samples; + const TI* sN = samples + CHANNELS; + + // Interpolation fraction lerpP derived by shifting all the way up and down + // to clear the appropriate bits and align to the appropriate level + // for the integer multiply. The constants should resolve in compile time. + // + // The interpolated filter coefficient is derived as follows for the pos/neg half: + // + // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP) + // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP) + + // on-the-fly interpolated dot product filter + if (is_same<TC, float>::value || is_same<TC, double>::value) { + static const TC scale = 1. / (65536. * 65536.); // scale phase bits to [0.0, 1.0) + TC lerpP = TC(phase << (sizeof(phase)*8 - coefShift)) * scale; + + Process<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR); + } else { + uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift) + >> ((sizeof(phase)-sizeof(*coefs))*8 + 1); + + Process<CHANNELS, STRIDE>(out, + halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR); + } + } +} + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/ diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h new file mode 100644 index 0000000..f311cef --- /dev/null +++ b/services/audioflinger/AudioResamplerFirProcessNeon.h @@ -0,0 +1,1149 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H +#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H + +namespace android { + +// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h + +#if USE_NEON +// +// NEON specializations are enabled for Process() and ProcessL() +// +// TODO: Stride 16 and Stride 8 can be combined with one pass stride 8 (if necessary) +// and looping stride 16 (or vice versa). This has some polyphase coef data alignment +// issues with S16 coefs. Consider this later. + +// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out. +#define ASSEMBLY_ACCUMULATE_MONO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes */\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output */\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums */\ + "vpadd.s32 d0, d0, d0 \n"/* (1+4d) and replicate L/R */\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume */\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating) */\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d) store result */ + +#define ASSEMBLY_ACCUMULATE_STEREO \ + "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes*/\ + "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output*/\ + "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums from q0*/\ + "vpadd.s32 d8, d8, d9 \n"/* (1) add all 4 partial sums from q4*/\ + "vpadd.s32 d0, d0, d8 \n"/* (1+4d) combine into L/R*/\ + "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume*/\ + "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating)*/\ + "vst1.s32 {d3}, %[out] \n"/* (2+2d)store result*/ + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (0 combines+) reverse right positive + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 q8, q8, q9 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply reversed samples by coef + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs + "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + "vrev64.16 q3, q3 \n"// (1) reverse 8 frames of the right positive + + "vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d17 \n"// (1) multiply reversed samples left + "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples left + "vmlal.s16 q4, d6, d17 \n"// (1) multiply reversed samples right + "vmlal.s16 q4, d7, d16 \n"// (1) multiply reversed samples right + "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left + "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left + "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right + "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #8 \n"// (1) update loop counter + "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples + "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// accumulate result + "vadd.s32 q0, q0, q13 \n"// accumulate result + + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + "subs %[count], %[count], #8 \n"// update loop counter + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<2, 16>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 16; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {q2, q3}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {q5, q6}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs + "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs + + "vsub.s32 q12, q12, q8 \n"// interpolate (step1) + "vsub.s32 q13, q13, q9 \n"// interpolate (step1) + "vsub.s32 q14, q14, q10 \n"// interpolate (step1) + "vsub.s32 q15, q15, q11 \n"// interpolate (step1) + + "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2) + "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2) + + "vadd.s32 q8, q8, q12 \n"// interpolate (step3) + "vadd.s32 q9, q9, q13 \n"// interpolate (step3) + "vadd.s32 q10, q10, q14 \n"// interpolate (step3) + "vadd.s32 q11, q11, q15 \n"// interpolate (step3) + + "vrev64.16 q2, q2 \n"// reverse 8 frames of the positive side + "vrev64.16 q3, q3 \n"// reverse 8 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q0, q0, q15 \n"// (+1) accumulate result + "vadd.s32 q0, q0, q13 \n"// (+1) accumulate result + + "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef + + "vadd.s32 q4, q4, q12 \n"// accumulate result + "vadd.s32 q13, q13, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// (+1) accumulate result + "vadd.s32 q4, q4, q13 \n"// (+1) accumulate result + + "subs %[count], %[count], #8 \n"// update loop counter + "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs + + "vrev64.16 d4, d4 \n"// (1) reversed s3, s2, s1, s0, s7, s6, s5, s4 + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed)samples by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q10" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (2+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (2) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d20}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q10" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15 + "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// (2+0d) load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// (2) load 4 16-bits mono samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 4 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 4 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 4 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 d4, d4 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4 + + "vadd.s16 d16, d16, d17 \n"// (1+2d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + // reordering the vmal to do d6, d7 before d4, d5 is slower(?) + "vmlal.s16 q0, d4, d16 \n"// (1+0d) multiply (reversed)by coef + "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples + + // moving these ARM instructions before neon above seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void Process<2, 8>(int32_t* const out, + int count, + const int16_t* coefsP, + const int16_t* coefsN, + const int16_t* coefsP1, + const int16_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// (1) acc_L = 0 + "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// (3) load 8 16-bits stereo samples + "vld1.16 {d16}, [%[coefsP0]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d17}, [%[coefsP1]:64]! \n"// (1) load 8 16-bits coefs for interpolation + "vld1.16 {d20}, [%[coefsN1]:64]! \n"// (1) load 8 16-bits coefs + "vld1.16 {d21}, [%[coefsN0]:64]! \n"// (1) load 8 16-bits coefs for interpolation + + "vsub.s16 d17, d17, d16 \n"// (1) interpolate (step1) 1st set of coefs + "vsub.s16 d21, d21, d20 \n"// (1) interpolate (step1) 2nd set of coets + + "vqrdmulh.s16 d17, d17, d2[0] \n"// (2) interpolate (step2) 1st set of coefs + "vqrdmulh.s16 d21, d21, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs + + "vrev64.16 q2, q2 \n"// (1) reverse 8 frames of the left positive + + "vadd.s16 d16, d16, d17 \n"// (1+1d) interpolate (step3) 1st set + "vadd.s16 d20, d20, d21 \n"// (1+1d) interpolate (step3) 2nd set + + "vmlal.s16 q0, d4, d16 \n"// (1) multiply (reversed) samples left + "vmlal.s16 q4, d5, d16 \n"// (1) multiply (reversed) samples right + "vmlal.s16 q0, d6, d20 \n"// (1) multiply samples left + "vmlal.s16 q4, d7, d20 \n"// (1) multiply samples right + + // moving these ARM before neon seems to be slower + "subs %[count], %[count], #4 \n"// (1) update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + // sP used after branch (warning) + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [coefsP1] "+r" (coefsP1), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q4", "q5", "q6", + "q8", "q9", "q10", "q11" + ); +} + +template <> +inline void ProcessL<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// (stall) extend samples to 31 bits + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// (stall) accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline void ProcessL<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int16_t* sP, + const int16_t* sN, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs + + "vrev64.16 q2, q2 \n"// reverse 2 frames of the positive side + + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsN0] "+r" (coefsN), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +template <> +inline void Process<1, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 1; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + + "1: \n" + + "vld1.16 {d4}, [%[sP]] \n"// load 4 16-bits mono samples + "vld1.16 {d6}, [%[sN]]! \n"// load 4 16-bits mono samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 d4, d4 \n"// reverse 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[0] \n"// interpolate (step2) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #8 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_MONO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", + "q8", "q9", "q10", "q11", + "q12", "q14" + ); +} + +template <> +inline +void Process<2, 8>(int32_t* const out, + int count, + const int32_t* coefsP, + const int32_t* coefsN, + const int32_t* coefsP1, + const int32_t* coefsN1, + const int16_t* sP, + const int16_t* sN, + uint32_t lerpP, + const int32_t* const volumeLR) +{ + const int CHANNELS = 2; // template specialization does not preserve params + const int STRIDE = 8; + sP -= CHANNELS*((STRIDE>>1)-1); + asm ( + "vmov.32 d2[0], %[lerpP] \n"// load the positive phase + "veor q0, q0, q0 \n"// result, initialize to 0 + "veor q4, q4, q4 \n"// result, initialize to 0 + + "1: \n" + "vld2.16 {d4, d5}, [%[sP]] \n"// load 4 16-bits stereo samples + "vld2.16 {d6, d7}, [%[sN]]! \n"// load 4 16-bits stereo samples + "vld1.32 {q8}, [%[coefsP0]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q9}, [%[coefsP1]:128]! \n"// load 4 32-bits coefs for interpolation + "vld1.32 {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs + "vld1.32 {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation + + "vrev64.16 q2, q2 \n"// (reversed) 2 frames of the positive side + + "vsub.s32 q9, q9, q8 \n"// interpolate (step1) 1st set of coefs + "vsub.s32 q11, q11, q10 \n"// interpolate (step1) 2nd set of coets + "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits + "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits + + "vqrdmulh.s32 q9, q9, d2[0] \n"// interpolate (step2) 1st set of coefs + "vqrdmulh.s32 q11, q11, d2[1] \n"// interpolate (step3) 2nd set of coefs + "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits + "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits + + "vadd.s32 q8, q8, q9 \n"// interpolate (step3) 1st set + "vadd.s32 q10, q10, q11 \n"// interpolate (step4) 2nd set + + "vqrdmulh.s32 q12, q12, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef + "vqrdmulh.s32 q15, q15, q10 \n"// multiply samples by interpolated coef + + "vadd.s32 q0, q0, q12 \n"// accumulate result + "vadd.s32 q4, q4, q13 \n"// accumulate result + "vadd.s32 q0, q0, q14 \n"// accumulate result + "vadd.s32 q4, q4, q15 \n"// accumulate result + + "subs %[count], %[count], #4 \n"// update loop counter + "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples + + "bne 1b \n"// loop + + ASSEMBLY_ACCUMULATE_STEREO + + : [out] "=Uv" (out[0]), + [count] "+r" (count), + [coefsP0] "+r" (coefsP), + [coefsP1] "+r" (coefsP1), + [coefsN0] "+r" (coefsN), + [coefsN1] "+r" (coefsN1), + [sP] "+r" (sP), + [sN] "+r" (sN) + : [lerpP] "r" (lerpP), + [vLR] "r" (volumeLR) + : "cc", "memory", + "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", + "q12", "q13", "q14", "q15" + ); +} + +#endif //USE_NEON + +}; // namespace android + +#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/ diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp index 207f26b..d0a7a58 100644 --- a/services/audioflinger/AudioResamplerSinc.cpp +++ b/services/audioflinger/AudioResamplerSinc.cpp @@ -540,7 +540,7 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount, uint32_t phaseIncrement = mPhaseIncrement; size_t outputIndex = 0; size_t outputSampleCount = outFrameCount * 2; - size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate; + size_t inFrameCount = getInFrameCountRequired(outFrameCount); while (outputIndex < outputSampleCount) { // buffer is empty, fetch a new one diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp index 010e233..29b56db 100644 --- a/services/audioflinger/Effects.cpp +++ b/services/audioflinger/Effects.cpp @@ -116,8 +116,9 @@ status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle) continue; } // first non destroyed handle is considered in control - if (controlHandle == NULL) + if (controlHandle == NULL) { controlHandle = h; + } if (h->priority() <= priority) { break; } @@ -804,7 +805,112 @@ bool AudioFlinger::EffectModule::isOffloaded() const return mOffloaded; } -void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) +String8 effectFlagsToString(uint32_t flags) { + String8 s; + + s.append("conn. mode: "); + switch (flags & EFFECT_FLAG_TYPE_MASK) { + case EFFECT_FLAG_TYPE_INSERT: s.append("insert"); break; + case EFFECT_FLAG_TYPE_AUXILIARY: s.append("auxiliary"); break; + case EFFECT_FLAG_TYPE_REPLACE: s.append("replace"); break; + case EFFECT_FLAG_TYPE_PRE_PROC: s.append("preproc"); break; + case EFFECT_FLAG_TYPE_POST_PROC: s.append("postproc"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + s.append("insert pref: "); + switch (flags & EFFECT_FLAG_INSERT_MASK) { + case EFFECT_FLAG_INSERT_ANY: s.append("any"); break; + case EFFECT_FLAG_INSERT_FIRST: s.append("first"); break; + case EFFECT_FLAG_INSERT_LAST: s.append("last"); break; + case EFFECT_FLAG_INSERT_EXCLUSIVE: s.append("exclusive"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + s.append("volume mgmt: "); + switch (flags & EFFECT_FLAG_VOLUME_MASK) { + case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break; + case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break; + case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + + uint32_t devind = flags & EFFECT_FLAG_DEVICE_MASK; + if (devind) { + s.append("device indication: "); + switch (devind) { + case EFFECT_FLAG_DEVICE_IND: s.append("requires updates"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + s.append("input mode: "); + switch (flags & EFFECT_FLAG_INPUT_MASK) { + case EFFECT_FLAG_INPUT_DIRECT: s.append("direct"); break; + case EFFECT_FLAG_INPUT_PROVIDER: s.append("provider"); break; + case EFFECT_FLAG_INPUT_BOTH: s.append("direct+provider"); break; + default: s.append("not set"); break; + } + s.append(", "); + + s.append("output mode: "); + switch (flags & EFFECT_FLAG_OUTPUT_MASK) { + case EFFECT_FLAG_OUTPUT_DIRECT: s.append("direct"); break; + case EFFECT_FLAG_OUTPUT_PROVIDER: s.append("provider"); break; + case EFFECT_FLAG_OUTPUT_BOTH: s.append("direct+provider"); break; + default: s.append("not set"); break; + } + s.append(", "); + + uint32_t accel = flags & EFFECT_FLAG_HW_ACC_MASK; + if (accel) { + s.append("hardware acceleration: "); + switch (accel) { + case EFFECT_FLAG_HW_ACC_SIMPLE: s.append("non-tunneled"); break; + case EFFECT_FLAG_HW_ACC_TUNNEL: s.append("tunneled"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + uint32_t modeind = flags & EFFECT_FLAG_AUDIO_MODE_MASK; + if (modeind) { + s.append("mode indication: "); + switch (modeind) { + case EFFECT_FLAG_AUDIO_MODE_IND: s.append("required"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + uint32_t srcind = flags & EFFECT_FLAG_AUDIO_SOURCE_MASK; + if (srcind) { + s.append("source indication: "); + switch (srcind) { + case EFFECT_FLAG_AUDIO_SOURCE_IND: s.append("required"); break; + default: s.append("unknown/reserved"); break; + } + s.append(", "); + } + + if (flags & EFFECT_FLAG_OFFLOAD_MASK) { + s.append("offloadable, "); + } + + int len = s.length(); + if (s.length() > 2) { + char *str = s.lockBuffer(len); + s.unlockBuffer(len - 2); + } + return s; +} + + +void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -838,9 +944,10 @@ void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) mDescriptor.type.node[2], mDescriptor.type.node[3],mDescriptor.type.node[4],mDescriptor.type.node[5]); result.append(buffer); - snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X\n", + snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n", mDescriptor.apiVersion, - mDescriptor.flags); + mDescriptor.flags, + effectFlagsToString(mDescriptor.flags).string()); result.append(buffer); snprintf(buffer, SIZE, "\t\t- name: %s\n", mDescriptor.name); @@ -851,37 +958,37 @@ void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args) result.append("\t\t- Input configuration:\n"); result.append("\t\t\tFrames Smp rate Channels Format Buffer\n"); - snprintf(buffer, SIZE, "\t\t\t%05zu %05d %08x %6d %p\n", + snprintf(buffer, SIZE, "\t\t\t%05zu %05d %08x %6d (%s) %p\n", mConfig.inputCfg.buffer.frameCount, mConfig.inputCfg.samplingRate, mConfig.inputCfg.channels, mConfig.inputCfg.format, + formatToString((audio_format_t)mConfig.inputCfg.format), mConfig.inputCfg.buffer.raw); result.append(buffer); result.append("\t\t- Output configuration:\n"); result.append("\t\t\tBuffer Frames Smp rate Channels Format\n"); - snprintf(buffer, SIZE, "\t\t\t%p %05zu %05d %08x %d\n", + snprintf(buffer, SIZE, "\t\t\t%p %05zu %05d %08x %d (%s)\n", mConfig.outputCfg.buffer.raw, mConfig.outputCfg.buffer.frameCount, mConfig.outputCfg.samplingRate, mConfig.outputCfg.channels, - mConfig.outputCfg.format); + mConfig.outputCfg.format, + formatToString((audio_format_t)mConfig.outputCfg.format)); result.append(buffer); snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size()); result.append(buffer); - result.append("\t\t\tPid Priority Ctrl Locked client server\n"); + result.append("\t\t\t Pid Priority Ctrl Locked client server\n"); for (size_t i = 0; i < mHandles.size(); ++i) { EffectHandle *handle = mHandles[i]; if (handle != NULL && !handle->destroyed_l()) { - handle->dump(buffer, SIZE); + handle->dumpToBuffer(buffer, SIZE); result.append(buffer); } } - result.append("\n"); - write(fd, result.string(), result.length()); if (locked) { @@ -911,18 +1018,15 @@ AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect, } int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int); mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset); - if (mCblkMemory != 0) { - mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer()); - - if (mCblk != NULL) { - new(mCblk) effect_param_cblk_t(); - mBuffer = (uint8_t *)mCblk + bufOffset; - } - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t)); + mCblkMemory.clear(); return; } + new(mCblk) effect_param_cblk_t(); + mBuffer = (uint8_t *)mCblk + bufOffset; } AudioFlinger::EffectHandle::~EffectHandle() @@ -939,6 +1043,11 @@ AudioFlinger::EffectHandle::~EffectHandle() disconnect(false); } +status_t AudioFlinger::EffectHandle::initCheck() +{ + return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY; +} + status_t AudioFlinger::EffectHandle::enable() { ALOGV("enable %p", this); @@ -1179,15 +1288,15 @@ status_t AudioFlinger::EffectHandle::onTransact( } -void AudioFlinger::EffectHandle::dump(char* buffer, size_t size) +void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size) { bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock); - snprintf(buffer, size, "\t\t\t%05d %05d %01u %01u %05u %05u\n", + snprintf(buffer, size, "\t\t\t%5d %5d %3s %3s %5u %5u\n", (mClient == 0) ? getpid_cached : mClient->pid(), mPriority, - mHasControl, - !locked, + mHasControl ? "yes" : "no", + locked ? "yes" : "no", mCblk ? mCblk->clientIndex : 0, mCblk ? mCblk->serverIndex : 0 ); @@ -1568,33 +1677,35 @@ void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args) char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "Effects for session %d:\n", mSessionId); + size_t numEffects = mEffects.size(); + snprintf(buffer, SIZE, " %d effects for session %d\n", numEffects, mSessionId); result.append(buffer); - bool locked = AudioFlinger::dumpTryLock(mLock); - // failed to lock - AudioFlinger is probably deadlocked - if (!locked) { - result.append("\tCould not lock mutex:\n"); - } + if (numEffects) { + bool locked = AudioFlinger::dumpTryLock(mLock); + // failed to lock - AudioFlinger is probably deadlocked + if (!locked) { + result.append("\tCould not lock mutex:\n"); + } - result.append("\tNum fx In buffer Out buffer Active tracks:\n"); - snprintf(buffer, SIZE, "\t%02zu %p %p %d\n", - mEffects.size(), - mInBuffer, - mOutBuffer, - mActiveTrackCnt); - result.append(buffer); - write(fd, result.string(), result.size()); + result.append("\tIn buffer Out buffer Active tracks:\n"); + snprintf(buffer, SIZE, "\t%p %p %d\n", + mInBuffer, + mOutBuffer, + mActiveTrackCnt); + result.append(buffer); + write(fd, result.string(), result.size()); - for (size_t i = 0; i < mEffects.size(); ++i) { - sp<EffectModule> effect = mEffects[i]; - if (effect != 0) { - effect->dump(fd, args); + for (size_t i = 0; i < numEffects; ++i) { + sp<EffectModule> effect = mEffects[i]; + if (effect != 0) { + effect->dump(fd, args); + } } - } - if (locked) { - mLock.unlock(); + if (locked) { + mLock.unlock(); + } } } diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h index b717857..ccc4825 100644 --- a/services/audioflinger/Effects.h +++ b/services/audioflinger/Effects.h @@ -169,6 +169,7 @@ public: const sp<IEffectClient>& effectClient, int32_t priority); virtual ~EffectHandle(); + virtual status_t initCheck(); // IEffect virtual status_t enable(); @@ -208,7 +209,7 @@ public: // destroyed_l() must be called with the associated EffectModule mLock held bool destroyed_l() const { return mDestroyed; } - void dump(char* buffer, size_t size); + void dumpToBuffer(char* buffer, size_t size); protected: friend class AudioFlinger; // for mEffect, mHasControl, mEnabled diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp index 85d637e..ca0d65e 100644 --- a/services/audioflinger/FastMixer.cpp +++ b/services/audioflinger/FastMixer.cpp @@ -212,7 +212,7 @@ bool FastMixer::threadLoop() case FastMixerState::MIX_WRITE: break; default: - LOG_FATAL("bad command %d", command); + LOG_ALWAYS_FATAL("bad command %d", command); } // there is a non-idle state available to us; did the state change? @@ -236,9 +236,10 @@ bool FastMixer::threadLoop() sampleRate = Format_sampleRate(format); ALOG_ASSERT(Format_channelCount(format) == FCC_2); } + dumpState->mSampleRate = sampleRate; } - if ((format != previousFormat) || (frameCount != previous->mFrameCount)) { + if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) { // FIXME to avoid priority inversion, don't delete here delete mixer; mixer = NULL; @@ -440,8 +441,9 @@ bool FastMixer::threadLoop() } int64_t pts; - if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) + if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) { pts = AudioBufferProvider::kInvalidPTS; + } // process() is CPU-bound mixer->process(pts); @@ -695,7 +697,7 @@ static int compare_uint32_t(const void *pa, const void *pb) void FastMixerDumpState::dump(int fd) const { if (mCommand == FastMixerState::INITIAL) { - fdprintf(fd, "FastMixer not initialized\n"); + fdprintf(fd, " FastMixer not initialized\n"); return; } #define COMMAND_MAX 32 @@ -729,10 +731,10 @@ void FastMixerDumpState::dump(int fd) const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) + (mMeasuredWarmupTs.tv_nsec / 1000000.0); double mixPeriodSec = (double) mFrameCount / (double) mSampleRate; - fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n" - " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" - " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n" - " mixPeriod=%.2f ms\n", + fdprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n" + " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" + " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n" + " mixPeriod=%.2f ms\n", string, mWriteSequence, mFramesWritten, mNumTracks, mWriteErrors, mUnderruns, mOverruns, mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles, @@ -783,14 +785,20 @@ void FastMixerDumpState::dump(int fd) const previousCpukHz = sampleCpukHz; #endif } - fdprintf(fd, "Simple moving statistics over last %.1f seconds:\n", wall.n() * mixPeriodSec); - fdprintf(fd, " wall clock time in ms per mix cycle:\n" - " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", - wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, wall.stddev()*1e-6); - fdprintf(fd, " raw CPU load in us per mix cycle:\n" - " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", - loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, - loadNs.stddev()*1e-3); + if (n) { + fdprintf(fd, " Simple moving statistics over last %.1f seconds:\n", + wall.n() * mixPeriodSec); + fdprintf(fd, " wall clock time in ms per mix cycle:\n" + " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", + wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, + wall.stddev()*1e-6); + fdprintf(fd, " raw CPU load in us per mix cycle:\n" + " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", + loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, + loadNs.stddev()*1e-3); + } else { + fdprintf(fd, " No FastMixer statistics available currently\n"); + } #ifdef CPU_FREQUENCY_STATISTICS fdprintf(fd, " CPU clock frequency in MHz:\n" " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", @@ -808,9 +816,9 @@ void FastMixerDumpState::dump(int fd) const left.sample(tail[i]); right.sample(tail[n - (i + 1)]); } - fdprintf(fd, "Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n" - " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n" - " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", + fdprintf(fd, " Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n" + " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n" + " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6, right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6, right.stddev()*1e-6); @@ -823,9 +831,9 @@ void FastMixerDumpState::dump(int fd) const // Instead we always display all tracks, with an indication // of whether we think the track is active. uint32_t trackMask = mTrackMask; - fdprintf(fd, "Fast tracks: kMaxFastTracks=%u activeMask=%#x\n", + fdprintf(fd, " Fast tracks: kMaxFastTracks=%u activeMask=%#x\n", FastMixerState::kMaxFastTracks, trackMask); - fdprintf(fd, "Index Active Full Partial Empty Recent Ready\n"); + fdprintf(fd, " Index Active Full Partial Empty Recent Ready\n"); for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) { bool isActive = trackMask & 1; const FastTrackDump *ftDump = &mTracks[i]; @@ -845,7 +853,7 @@ void FastMixerDumpState::dump(int fd) const mostRecent = "?"; break; } - fdprintf(fd, "%5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no", + fdprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no", (underruns.mBitFields.mFull) & UNDERRUN_MASK, (underruns.mBitFields.mPartial) & UNDERRUN_MASK, (underruns.mBitFields.mEmpty) & UNDERRUN_MASK, diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h index 6158925..7aeddef 100644 --- a/services/audioflinger/FastMixer.h +++ b/services/audioflinger/FastMixer.h @@ -18,10 +18,10 @@ #define ANDROID_AUDIO_FAST_MIXER_H #include <utils/Debug.h> -#include <utils/Thread.h> extern "C" { #include "../private/bionic_futex.h" } +#include "FastThread.h" #include "StateQueue.h" #include "FastMixerState.h" @@ -29,10 +29,10 @@ namespace android { typedef StateQueue<FastMixerState> FastMixerStateQueue; -class FastMixer : public Thread { +class FastMixer : public FastThread { public: - FastMixer() : Thread(false /*canCallJava*/) { } + FastMixer() : FastThread() { } virtual ~FastMixer() { } FastMixerStateQueue* sq() { return &mSQ; } diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp index 43ff233..4631274 100644 --- a/services/audioflinger/FastMixerState.cpp +++ b/services/audioflinger/FastMixerState.cpp @@ -29,10 +29,10 @@ FastTrack::~FastTrack() { } -FastMixerState::FastMixerState() : +FastMixerState::FastMixerState() : FastThreadState(), mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0), - mFrameCount(0), mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), - mDumpState(NULL), mTeeSink(NULL), mNBLogWriter(NULL) + mFrameCount(0), + mDumpState(NULL), mTeeSink(NULL) { } diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h index 9739fe9..10696e8 100644 --- a/services/audioflinger/FastMixerState.h +++ b/services/audioflinger/FastMixerState.h @@ -21,6 +21,7 @@ #include <media/ExtendedAudioBufferProvider.h> #include <media/nbaio/NBAIO.h> #include <media/nbaio/NBLog.h> +#include "FastThreadState.h" namespace android { @@ -48,7 +49,7 @@ struct FastTrack { }; // Represents a single state of the fast mixer -struct FastMixerState { +struct FastMixerState : FastThreadState { FastMixerState(); /*virtual*/ ~FastMixerState(); @@ -61,23 +62,17 @@ struct FastMixerState { NBAIO_Sink* mOutputSink; // HAL output device, must already be negotiated int mOutputSinkGen; // increment when mOutputSink is assigned size_t mFrameCount; // number of frames per fast mix buffer - enum Command { - INITIAL = 0, // used only for the initial state - HOT_IDLE = 1, // do nothing - COLD_IDLE = 2, // wait for the futex - IDLE = 3, // either HOT_IDLE or COLD_IDLE - EXIT = 4, // exit from thread + + // Extends FastThreadState::Command + static const Command // The following commands also process configuration changes, and can be "or"ed: MIX = 0x8, // mix tracks WRITE = 0x10, // write to output sink - MIX_WRITE = 0x18, // mix tracks and write to output sink - } mCommand; - int32_t* mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex - unsigned mColdGen; // increment when COLD_IDLE is requested so it's only performed once + MIX_WRITE = 0x18; // mix tracks and write to output sink + // This might be a one-time configuration rather than per-state FastMixerDumpState* mDumpState; // if non-NULL, then update dump state periodically NBAIO_Sink* mTeeSink; // if non-NULL, then duplicate write()s to this non-blocking sink - NBLog::Writer* mNBLogWriter; // non-blocking logger }; // struct FastMixerState } // namespace android diff --git a/services/audioflinger/FastThread.h b/services/audioflinger/FastThread.h new file mode 100644 index 0000000..6caf7bd --- /dev/null +++ b/services/audioflinger/FastThread.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_THREAD_H +#define ANDROID_AUDIO_FAST_THREAD_H + +#include <utils/Thread.h> + +namespace android { + +// FastThread is the common abstract base class of FastMixer and FastCapture +class FastThread : public Thread { + +public: + FastThread() : Thread(false /*canCallJava*/) { } + virtual ~FastThread() { } + +protected: + virtual bool threadLoop() = 0; + +}; // class FastThread + +} // android + +#endif // ANDROID_AUDIO_FAST_THREAD_H diff --git a/services/audioflinger/FastThreadState.cpp b/services/audioflinger/FastThreadState.cpp new file mode 100644 index 0000000..427ada5 --- /dev/null +++ b/services/audioflinger/FastThreadState.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "FastThreadState.h" + +namespace android { + +FastThreadState::FastThreadState() : + mCommand(INITIAL), mColdFutexAddr(NULL), mColdGen(0), mNBLogWriter(NULL) +{ +} + +FastThreadState::~FastThreadState() +{ +} + +} // namespace android diff --git a/services/audioflinger/FastThreadState.h b/services/audioflinger/FastThreadState.h new file mode 100644 index 0000000..148fb7b --- /dev/null +++ b/services/audioflinger/FastThreadState.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIO_FAST_THREAD_STATE_H +#define ANDROID_AUDIO_FAST_THREAD_STATE_H + +#include <stdint.h> +#include <media/nbaio/NBLog.h> + +namespace android { + +// Represents a single state of a FastThread +struct FastThreadState { + FastThreadState(); + /*virtual*/ ~FastThreadState(); + + typedef uint32_t Command; + static const Command + INITIAL = 0, // used only for the initial state + HOT_IDLE = 1, // do nothing + COLD_IDLE = 2, // wait for the futex + IDLE = 3, // either HOT_IDLE or COLD_IDLE + EXIT = 4; // exit from thread + // additional values defined per subclass + Command mCommand; + + int32_t* mColdFutexAddr; // for COLD_IDLE only, pointer to the associated futex + unsigned mColdGen; // increment when COLD_IDLE is requested so it's only performed once + + NBLog::Writer* mNBLogWriter; // non-blocking logger +}; // struct FastThreadState + +} // android + +#endif // ANDROID_AUDIO_FAST_THREAD_STATE_H diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h index 43b77f3..e9c6834 100644 --- a/services/audioflinger/PlaybackTracks.h +++ b/services/audioflinger/PlaybackTracks.h @@ -34,9 +34,10 @@ public: int uid, IAudioFlinger::track_flags_t flags); virtual ~Track(); + virtual status_t initCheck() const; static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); + void dump(char* buffer, size_t size, bool active); virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE, int triggerSession = 0); @@ -93,6 +94,10 @@ protected: bool isReady() const; void setPaused() { mState = PAUSED; } void reset(); + bool isFlushPending() const { return mFlushHwPending; } + void flushAck(); + bool isResumePending(); + void resumeAck(); bool isOutputTrack() const { return (mStreamType == AUDIO_STREAM_CNT); @@ -154,6 +159,7 @@ private: bool mIsInvalid; // non-resettable latch, set by invalidate() AudioTrackServerProxy* mAudioTrackServerProxy; bool mResumeToStopping; // track was paused in stopping state. + bool mFlushHwPending; // track requests for thread flush }; // end of Track class TimedTrack : public Track { diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h index 57de568..6fc06d8 100644 --- a/services/audioflinger/RecordTracks.h +++ b/services/audioflinger/RecordTracks.h @@ -45,7 +45,10 @@ public: return tmp; } static void appendDumpHeader(String8& result); - void dump(char* buffer, size_t size); + void dump(char* buffer, size_t size, bool active); + + void handleSyncStartEvent(const sp<SyncEvent>& event); + void clearSyncStartEvent(); private: friend class AudioFlinger; // for mState @@ -59,5 +62,33 @@ private: // releaseBuffer() not overridden bool mOverflow; // overflow on most recent attempt to fill client buffer - AudioRecordServerProxy* mAudioRecordServerProxy; + + // updated by RecordThread::readInputParameters_l() + AudioResampler *mResampler; + + // interleaved stereo pairs of fixed-point Q4.27 + int32_t *mRsmpOutBuffer; + // current allocated frame count for the above, which may be larger than needed + size_t mRsmpOutFrameCount; + + size_t mRsmpInUnrel; // unreleased frames remaining from + // most recent getNextBuffer + // for debug only + + // rolling counter that is never cleared + int32_t mRsmpInFront; // next available frame + + AudioBufferProvider::Buffer mSink; // references client's buffer sink in shared memory + + // sync event triggering actual audio capture. Frames read before this event will + // be dropped and therefore not read by the application. + sp<SyncEvent> mSyncStartEvent; + + // number of captured frames to drop after the start sync event has been received. + // when < 0, maximum frames to drop before starting capture even if sync event is + // not received + ssize_t mFramesToDrop; + + // used by resampler to find source frames + ResamplerBufferProvider *mResamplerBufferProvider; }; diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp index cac785a..ae3dd8b 100644 --- a/services/audioflinger/Threads.cpp +++ b/services/audioflinger/Threads.cpp @@ -34,6 +34,7 @@ #include <audio_effects/effect_ns.h> #include <audio_effects/effect_aec.h> #include <audio_utils/primitives.h> +#include <audio_utils/format.h> // NBAIO implementations #include <media/nbaio/AudioStreamOutSink.h> @@ -104,10 +105,10 @@ static const uint32_t kMinThreadSleepTimeUs = 5000; // maximum divider applied to the active sleep time in the mixer thread loop static const uint32_t kMaxThreadSleepTimeShift = 2; -// minimum normal mix buffer size, expressed in milliseconds rather than frames -static const uint32_t kMinNormalMixBufferSizeMs = 20; -// maximum normal mix buffer size -static const uint32_t kMaxNormalMixBufferSizeMs = 24; +// minimum normal sink buffer size, expressed in milliseconds rather than frames +static const uint32_t kMinNormalSinkBufferSizeMs = 20; +// maximum normal sink buffer size +static const uint32_t kMaxNormalSinkBufferSizeMs = 24; // Offloaded output thread standby delay: allows track transition without going to standby static const nsecs_t kOffloadStandbyDelayNs = seconds(1); @@ -185,7 +186,11 @@ CpuStats::CpuStats() { } -void CpuStats::sample(const String8 &title) { +void CpuStats::sample(const String8 &title +#ifndef DEBUG_CPU_USAGE + __unused +#endif + ) { #ifdef DEBUG_CPU_USAGE // get current thread's delta CPU time in wall clock ns double wcNs; @@ -269,8 +274,9 @@ AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio : Thread(false /*canCallJava*/), mType(type), mAudioFlinger(audioFlinger), - // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are - // set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters() + // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize + // are set by PlaybackThread::readOutputParameters_l() or + // RecordThread::readInputParameters_l() mParamStatus(NO_ERROR), //FIXME: mStandby should be true here. Is this some kind of hack? mStandby(false), mOutDevice(outDevice), mInDevice(inDevice), @@ -297,6 +303,17 @@ AudioFlinger::ThreadBase::~ThreadBase() } } +status_t AudioFlinger::ThreadBase::readyToRun() +{ + status_t status = initCheck(); + if (status == NO_ERROR) { + ALOGI("AudioFlinger's thread %p ready to run", this); + } else { + ALOGE("No working audio driver found."); + } + return status; +} + void AudioFlinger::ThreadBase::exit() { ALOGV("ThreadBase::exit"); @@ -369,7 +386,13 @@ void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32 void AudioFlinger::ThreadBase::processConfigEvents() { - mLock.lock(); + Mutex::Autolock _l(mLock); + processConfigEvents_l(); +} + +// post condition: mConfigEvents.isEmpty() +void AudioFlinger::ThreadBase::processConfigEvents_l() +{ while (!mConfigEvents.isEmpty()) { ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size()); ConfigEvent *event = mConfigEvents[0]; @@ -377,35 +400,81 @@ void AudioFlinger::ThreadBase::processConfigEvents() // release mLock before locking AudioFlinger mLock: lock order is always // AudioFlinger then ThreadBase to avoid cross deadlock mLock.unlock(); - switch(event->type()) { - case CFG_EVENT_PRIO: { - PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); - // FIXME Need to understand why this has be done asynchronously - int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), - true /*asynchronous*/); - if (err != 0) { - ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; " - "error %d", - prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); - } - } break; - case CFG_EVENT_IO: { - IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); - mAudioFlinger->mLock.lock(); + switch (event->type()) { + case CFG_EVENT_PRIO: { + PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event); + // FIXME Need to understand why this has be done asynchronously + int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(), + true /*asynchronous*/); + if (err != 0) { + ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", + prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err); + } + } break; + case CFG_EVENT_IO: { + IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event); + { + Mutex::Autolock _l(mAudioFlinger->mLock); audioConfigChanged_l(ioEvent->event(), ioEvent->param()); - mAudioFlinger->mLock.unlock(); - } break; - default: - ALOGE("processConfigEvents() unknown event type %d", event->type()); - break; + } + } break; + default: + ALOGE("processConfigEvents() unknown event type %d", event->type()); + break; } delete event; mLock.lock(); } - mLock.unlock(); } -void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) +String8 channelMaskToString(audio_channel_mask_t mask, bool output) { + String8 s; + if (output) { + if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, "); + if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, "); + if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, "); + if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, "); + if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, "); + if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,"); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, "); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " ); + if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " ); + if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, "); + } else { + if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, "); + if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, "); + if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, "); + if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, "); + if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, "); + if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, "); + if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, "); + if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, "); + if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, "); + if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, "); + if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, "); + if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, "); + if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, "); + if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, "); + if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, "); + } + int len = s.length(); + if (s.length() > 2) { + char *str = s.lockBuffer(len); + s.unlockBuffer(len - 2); + } + return s; +} + +void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; @@ -413,47 +482,43 @@ void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args) bool locked = AudioFlinger::dumpTryLock(mLock); if (!locked) { - snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this); - write(fd, buffer, strlen(buffer)); - } - - snprintf(buffer, SIZE, "io handle: %d\n", mId); - result.append(buffer); - snprintf(buffer, SIZE, "TID: %d\n", getTid()); - result.append(buffer); - snprintf(buffer, SIZE, "standby: %d\n", mStandby); - result.append(buffer); - snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate); - result.append(buffer); - snprintf(buffer, SIZE, "HAL frame count: %zu\n", mFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask); - result.append(buffer); - snprintf(buffer, SIZE, "Format: %d\n", mFormat); - result.append(buffer); - snprintf(buffer, SIZE, "Frame size: %zu\n", mFrameSize); - result.append(buffer); - - snprintf(buffer, SIZE, "\nPending setParameters commands: \n"); - result.append(buffer); - result.append(" Index Command"); - for (size_t i = 0; i < mNewParameters.size(); ++i) { - snprintf(buffer, SIZE, "\n %02zu ", i); - result.append(buffer); - result.append(mNewParameters[i]); + fdprintf(fd, "thread %p maybe dead locked\n", this); + } + + fdprintf(fd, " I/O handle: %d\n", mId); + fdprintf(fd, " TID: %d\n", getTid()); + fdprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no"); + fdprintf(fd, " Sample rate: %u\n", mSampleRate); + fdprintf(fd, " HAL frame count: %zu\n", mFrameCount); + fdprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize); + fdprintf(fd, " Channel Count: %u\n", mChannelCount); + fdprintf(fd, " Channel Mask: 0x%08x (%s)\n", mChannelMask, + channelMaskToString(mChannelMask, mType != RECORD).string()); + fdprintf(fd, " Format: 0x%x (%s)\n", mFormat, formatToString(mFormat)); + fdprintf(fd, " Frame size: %zu\n", mFrameSize); + fdprintf(fd, " Pending setParameters commands:"); + size_t numParams = mNewParameters.size(); + if (numParams) { + fdprintf(fd, "\n Index Command"); + for (size_t i = 0; i < numParams; ++i) { + fdprintf(fd, "\n %02zu ", i); + fdprintf(fd, mNewParameters[i]); + } + fdprintf(fd, "\n"); + } else { + fdprintf(fd, " none\n"); } - - snprintf(buffer, SIZE, "\n\nPending config events: \n"); - result.append(buffer); - for (size_t i = 0; i < mConfigEvents.size(); i++) { - mConfigEvents[i]->dump(buffer, SIZE); - result.append(buffer); + fdprintf(fd, " Pending config events:"); + size_t numConfig = mConfigEvents.size(); + if (numConfig) { + for (size_t i = 0; i < numConfig; i++) { + mConfigEvents[i]->dump(buffer, SIZE); + fdprintf(fd, "\n %s", buffer); + } + fdprintf(fd, "\n"); + } else { + fdprintf(fd, " none\n"); } - result.append("\n"); - - write(fd, result.string(), result.size()); if (locked) { mLock.unlock(); @@ -466,10 +531,11 @@ void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "\n- %zu Effect Chains:\n", mEffectChains.size()); + size_t numEffectChains = mEffectChains.size(); + snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains); write(fd, buffer, strlen(buffer)); - for (size_t i = 0; i < mEffectChains.size(); ++i) { + for (size_t i = 0; i < numEffectChains; ++i) { sp<EffectChain> chain = mEffectChains[i]; if (chain != 0) { chain->dump(fd, args); @@ -586,7 +652,7 @@ void AudioFlinger::ThreadBase::clearPowerManager() mPowerManager.clear(); } -void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who) +void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) { sp<ThreadBase> thread = mThread.promote(); if (thread != 0) { @@ -739,8 +805,7 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status - ) + status_t *status) { sp<EffectModule> effect; sp<EffectHandle> handle; @@ -756,6 +821,15 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( goto Exit; } + // Reject any effect on Direct output threads for now, since the format of + // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo). + if (mType == DIRECT) { + ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s", + desc->name, mName); + lStatus = BAD_VALUE; + goto Exit; + } + // Allow global effects only on offloaded and mixer threads if (sessionId == AUDIO_SESSION_OUTPUT_MIX) { switch (mType) { @@ -829,7 +903,10 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( } // create effect handle and connect it to effect module handle = new EffectHandle(effect, client, effectClient, priority); - lStatus = effect->addHandle(handle.get()); + lStatus = handle->initCheck(); + if (lStatus == OK) { + lStatus = effect->addHandle(handle.get()); + } if (enabled != NULL) { *enabled = (int)effect->isEnabled(); } @@ -850,9 +927,7 @@ Exit: handle.clear(); } - if (status != NULL) { - *status = lStatus; - } + *status = lStatus; return handle; } @@ -1001,8 +1076,18 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge audio_devices_t device, type_t type) : ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type), - mNormalFrameCount(0), mMixBuffer(NULL), - mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0), + mNormalFrameCount(0), mSinkBuffer(NULL), + mMixerBufferEnabled(false), + mMixerBuffer(NULL), + mMixerBufferSize(0), + mMixerBufferFormat(AUDIO_FORMAT_INVALID), + mMixerBufferValid(false), + mEffectBufferEnabled(false), + mEffectBuffer(NULL), + mEffectBufferSize(0), + mEffectBufferFormat(AUDIO_FORMAT_INVALID), + mEffectBufferValid(false), + mSuspended(0), mBytesWritten(0), mActiveTracksGeneration(0), // mStreamTypes[] initialized in constructor body mOutput(output), @@ -1044,11 +1129,11 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge } } - readOutputParameters(); + readOutputParameters_l(); // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor // There is no AUDIO_STREAM_MIN, and ++ operator does not compile - for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT; + for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_CNT; stream = (audio_stream_type_t) (stream + 1)) { mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream); mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream); @@ -1060,7 +1145,9 @@ AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinge AudioFlinger::PlaybackThread::~PlaybackThread() { mAudioFlinger->unregisterWriter(mNBLogWriter); - delete [] mAllocMixBuffer; + free(mSinkBuffer); + free(mMixerBuffer); + free(mEffectBuffer); } void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) @@ -1070,13 +1157,13 @@ void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args) dumpEffectChains(fd, args); } -void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args) +void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; - result.appendFormat("Output thread %p stream volumes in dB:\n ", this); + result.appendFormat(" Stream volumes in dB: "); for (int i = 0; i < AUDIO_STREAM_CNT; ++i) { const stream_type_t *st = &mStreamTypes[i]; if (i > 0) { @@ -1091,75 +1178,69 @@ void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& ar write(fd, result.string(), result.length()); result.clear(); - snprintf(buffer, SIZE, "Output thread %p tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<Track> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. + FastTrackUnderruns underruns = getFastTrackUnderruns(0); + fdprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n", + underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); + + size_t numtracks = mTracks.size(); + size_t numactive = mActiveTracks.size(); + fdprintf(fd, " %d Tracks", numtracks); + size_t numactiveseen = 0; + if (numtracks) { + fdprintf(fd, " of which %d are active\n", numactive); + Track::appendDumpHeader(result); + for (size_t i = 0; i < numtracks; ++i) { + sp<Track> track = mTracks[i]; + if (track != 0) { + bool active = mActiveTracks.indexOf(track) >= 0; + if (active) { + numactiveseen++; + } + track->dump(buffer, SIZE, active); + result.append(buffer); + } } + } else { + result.append("\n"); } - - snprintf(buffer, SIZE, "Output thread %p active tracks\n", this); - result.append(buffer); - Track::appendDumpHeader(result); - for (size_t i = 0; i < mActiveTracks.size(); ++i) { - sp<Track> track = mActiveTracks[i].promote(); - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + if (numactiveseen != numactive) { + // some tracks in the active list were not in the tracks list + snprintf(buffer, SIZE, " The following tracks are in the active list but" + " not in the track list\n"); + result.append(buffer); + Track::appendDumpHeader(result); + for (size_t i = 0; i < numactive; ++i) { + sp<Track> track = mActiveTracks[i].promote(); + if (track != 0 && mTracks.indexOf(track) < 0) { + track->dump(buffer, SIZE, true); + result.append(buffer); + } } } + write(fd, result.string(), result.size()); - // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. - FastTrackUnderruns underruns = getFastTrackUnderruns(0); - fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n", - underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); } void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args) { - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; - - snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this); - result.append(buffer); - snprintf(buffer, SIZE, "Normal frame count: %zu\n", mNormalFrameCount); - result.append(buffer); - snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n", - ns2ms(systemTime() - mLastWriteTime)); - result.append(buffer); - snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites); - result.append(buffer); - snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites); - result.append(buffer); - snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite); - result.append(buffer); - snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended); - result.append(buffer); - snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer); - result.append(buffer); - write(fd, result.string(), result.size()); - fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask); + fdprintf(fd, "\nOutput thread %p:\n", this); + fdprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount); + fdprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime)); + fdprintf(fd, " Total writes: %d\n", mNumWrites); + fdprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites); + fdprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no"); + fdprintf(fd, " Suspend count: %d\n", mSuspended); + fdprintf(fd, " Sink buffer : %p\n", mSinkBuffer); + fdprintf(fd, " Mixer buffer: %p\n", mMixerBuffer); + fdprintf(fd, " Effect buffer: %p\n", mEffectBuffer); + fdprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask); dumpBase(fd, args); } // Thread virtuals -status_t AudioFlinger::PlaybackThread::readyToRun() -{ - status_t status = initCheck(); - if (status == NO_ERROR) { - ALOGI("AudioFlinger's thread %p ready to run", this); - } else { - ALOGE("No working audio driver found."); - } - return status; -} void AudioFlinger::PlaybackThread::onFirstRef() { @@ -1182,7 +1263,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, @@ -1190,6 +1271,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac int uid, status_t *status) { + size_t frameCount = *pFrameCount; sp<Track> track; status_t lStatus; @@ -1256,29 +1338,36 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac } } } + *pFrameCount = frameCount; - if (mType == DIRECT) { + switch (mType) { + + case DIRECT: if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) { if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { - ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x " - "for output %p with format %d", + ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x " + "for output %p with format %#x", sampleRate, format, channelMask, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; } } - } else if (mType == OFFLOAD) { + break; + + case OFFLOAD: if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { - ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \"" - "for output %p with format %d", + ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \"" + "for output %p with format %#x", sampleRate, format, channelMask, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; } - } else { + break; + + default: if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) { - ALOGE("createTrack_l() Bad parameter: format %d \"" - "for output %p with format %d", + ALOGE("createTrack_l() Bad parameter: format %#x \"" + "for output %p with format %#x", format, mOutput, mFormat); lStatus = BAD_VALUE; goto Exit; @@ -1289,11 +1378,13 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac lStatus = BAD_VALUE; goto Exit; } + break; + } lStatus = initCheck(); if (lStatus != NO_ERROR) { - ALOGE("Audio driver not initialized."); + ALOGE("createTrack_l() audio driver not initialized"); goto Exit; } @@ -1325,12 +1416,14 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac channelMask, frameCount, sharedBuffer, sessionId, uid); } - if (track == 0 || track->getCblk() == NULL || track->name() < 0) { - lStatus = NO_MEMORY; + // new Track always returns non-NULL, + // but TimedTrack::create() is a factory that could fail by returning NULL + lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY; + if (lStatus != NO_ERROR) { + ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus); // track must be cleared from the caller as the caller has the AF lock goto Exit; } - mTracks.add(track); sp<EffectChain> chain = getEffectChain_l(sessionId); @@ -1352,9 +1445,7 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrac lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -1473,9 +1564,7 @@ status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) status = NO_ERROR; } - ALOGV("signal playback thread"); - broadcast_l(); - + onAddNewTrack_l(); return status; } @@ -1601,7 +1690,7 @@ void AudioFlinger::PlaybackThread::resetDraining(uint32_t sequence) // static int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event, - void *param, + void *param __unused, void *cookie) { AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie; @@ -1620,29 +1709,30 @@ int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event, return 0; } -void AudioFlinger::PlaybackThread::readOutputParameters() +void AudioFlinger::PlaybackThread::readOutputParameters_l() { - // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL + // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common); mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common); if (!audio_is_output_channel(mChannelMask)) { - LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask); + LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask); } if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) { - LOG_FATAL("HAL channel mask %#x not supported for mixed output; " + LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output; " "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask); } mChannelCount = popcount(mChannelMask); mFormat = mOutput->stream->common.get_format(&mOutput->stream->common); if (!audio_is_valid_format(mFormat)) { - LOG_FATAL("HAL format %d not valid for output", mFormat); + LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat); } if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) { - LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT", - mFormat); + LOG_ALWAYS_FATAL("HAL format %#x not supported for mixed output; " + "must be AUDIO_FORMAT_PCM_16_BIT", mFormat); } mFrameSize = audio_stream_frame_size(&mOutput->stream->common); - mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize; + mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common); + mFrameCount = mBufferSize / mFrameSize; if (mFrameCount & 15) { ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames", mFrameCount); @@ -1657,12 +1747,12 @@ void AudioFlinger::PlaybackThread::readOutputParameters() } } - // Calculate size of normal mix buffer relative to the HAL output buffer size + // Calculate size of normal sink buffer relative to the HAL output buffer size double multiplier = 1.0; if (mType == MIXER && (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) { - size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000; - size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000; + size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000; + size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000; // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer minNormalFrameCount = (minNormalFrameCount + 15) & ~15; maxNormalFrameCount = maxNormalFrameCount & ~15; @@ -1680,7 +1770,7 @@ void AudioFlinger::PlaybackThread::readOutputParameters() } } else { // prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL - // SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast + // SRC (it would be unusual for the normal sink buffer size to not be a multiple of fast // track, but we sometimes have to do this to satisfy the maximum frame count // constraint) // FIXME this rounding up should not be done if no HAL SRC @@ -1696,18 +1786,40 @@ void AudioFlinger::PlaybackThread::readOutputParameters() mNormalFrameCount = multiplier * mFrameCount; // round up to nearest 16 frames to satisfy AudioMixer mNormalFrameCount = (mNormalFrameCount + 15) & ~15; - ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount, + ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount, mNormalFrameCount); - delete[] mAllocMixBuffer; - size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize; - mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1]; - mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align); - memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize); + // mSinkBuffer is the sink buffer. Size is always multiple-of-16 frames. + // Originally this was int16_t[] array, need to remove legacy implications. + free(mSinkBuffer); + mSinkBuffer = NULL; + // For sink buffer size, we use the frame size from the downstream sink to avoid problems + // with non PCM formats for compressed music, e.g. AAC, and Offload threads. + const size_t sinkBufferSize = mNormalFrameCount * mFrameSize; + (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize); + + // We resize the mMixerBuffer according to the requirements of the sink buffer which + // drives the output. + free(mMixerBuffer); + mMixerBuffer = NULL; + if (mMixerBufferEnabled) { + mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // also valid: AUDIO_FORMAT_PCM_16_BIT. + mMixerBufferSize = mNormalFrameCount * mChannelCount + * audio_bytes_per_sample(mMixerBufferFormat); + (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize); + } + free(mEffectBuffer); + mEffectBuffer = NULL; + if (mEffectBufferEnabled) { + mEffectBufferFormat = AUDIO_FORMAT_PCM_16_BIT; // Note: Effects support 16b only + mEffectBufferSize = mNormalFrameCount * mChannelCount + * audio_bytes_per_sample(mEffectBufferFormat); + (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize); + } // force reconfiguration of effect chains and engines to take new buffer size and audio // parameters into account - // Note that mLock is not held when readOutputParameters() is called from the constructor + // Note that mLock is not held when readOutputParameters_l() is called from the constructor // but in this case nothing is done below as no audio sessions have effect yet so it doesn't // matter. // create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains @@ -1841,7 +1953,7 @@ void AudioFlinger::PlaybackThread::threadLoop_removeTracks( const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i = 0 ; i < count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); if (!track->isOutputTrack()) { @@ -1882,12 +1994,12 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() mLastWriteTime = systemTime(); mInWrite = true; ssize_t bytesWritten; + const size_t offset = mCurrentWriteLength - mBytesRemaining; // If an NBAIO sink is present, use it to write the normal mixer's submix if (mNormalSink != 0) { -#define mBitShift 2 // FIXME - size_t count = mBytesRemaining >> mBitShift; - size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1; + const size_t count = mBytesRemaining / mFrameSize; + ATRACE_BEGIN("write"); // update the setpoint when AudioFlinger::mScreenState changes uint32_t screenState = AudioFlinger::mScreenState; @@ -1899,10 +2011,10 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2); } } - ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count); + ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count); ATRACE_END(); if (framesWritten > 0) { - bytesWritten = framesWritten << mBitShift; + bytesWritten = framesWritten * mFrameSize; } else { bytesWritten = framesWritten; } @@ -1917,7 +2029,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() // otherwise use the HAL / AudioStreamOut directly } else { // Direct output and offload threads - size_t offset = (mCurrentWriteLength - mBytesRemaining); + if (mUseAsyncWrite) { ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request"); mWriteAckSequence += 2; @@ -1928,7 +2040,7 @@ ssize_t AudioFlinger::PlaybackThread::threadLoop_write() // FIXME We should have an implementation of timestamps for direct output threads. // They are used e.g for multichannel PCM playback over HDMI. bytesWritten = mOutput->stream->write(mOutput->stream, - (char *)mMixBuffer + offset, mBytesRemaining); + (char *)mSinkBuffer + offset, mBytesRemaining); if (mUseAsyncWrite && ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) { // do not wait for async callback in case of error of full write @@ -1967,7 +2079,7 @@ void AudioFlinger::PlaybackThread::threadLoop_exit() /* The derived values that are cached: - - mixBufferSize from frame count * frame size + - mSinkBufferSize from frame count * frame size - activeSleepTime from activeSleepTimeUs() - idleSleepTime from idleSleepTimeUs() - standbyDelay from mActiveSleepTimeUs (DIRECT only) @@ -1986,7 +2098,7 @@ The parameters that affect these derived values are: void AudioFlinger::PlaybackThread::cacheParameters_l() { - mixBufferSize = mNormalFrameCount * mFrameSize; + mSinkBufferSize = mNormalFrameCount * mFrameSize; activeSleepTime = activeSleepTimeUs(); idleSleepTime = idleSleepTimeUs(); } @@ -2009,13 +2121,14 @@ void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamTy status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain) { int session = chain->sessionId(); - int16_t *buffer = mMixBuffer; + int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled + ? mEffectBuffer : mSinkBuffer); bool ownsBuffer = false; ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session); if (session > 0) { // Only one effect chain can be present in direct output thread and it uses - // the mix buffer as input + // the sink buffer as input if (mType != DIRECT) { size_t numSamples = mNormalFrameCount * mChannelCount; buffer = new int16_t[numSamples]; @@ -2049,7 +2162,8 @@ status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& c } chain->setInBuffer(buffer, ownsBuffer); - chain->setOutBuffer(mMixBuffer); + chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled + ? mEffectBuffer : mSinkBuffer)); // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect // chains list in order to be processed last as it contains output stage effects // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before @@ -2099,7 +2213,7 @@ size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& for (size_t i = 0; i < mTracks.size(); ++i) { sp<Track> track = mTracks[i]; if (session == track->sessionId()) { - track->setMainBuffer(mMixBuffer); + track->setMainBuffer(reinterpret_cast<int16_t*>(mSinkBuffer)); chain->decTrackCnt(); } } @@ -2302,14 +2416,32 @@ bool AudioFlinger::PlaybackThread::threadLoop() // must be written to HAL threadLoop_sleepTime(); if (sleepTime == 0) { - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; } } + // Either threadLoop_mix() or threadLoop_sleepTime() should have set + // mMixerBuffer with data if mMixerBufferValid is true and sleepTime == 0. + // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid) + // or mSinkBuffer (if there are no effects). + // + // This is done pre-effects computation; if effects change to + // support higher precision, this needs to move. + // + // mMixerBufferValid is only set true by MixerThread::prepareTracks_l(). + // TODO use sleepTime == 0 as an additional condition. + if (mMixerBufferValid) { + void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer; + audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat; + + memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat, + mNormalFrameCount * mChannelCount); + } + mBytesRemaining = mCurrentWriteLength; if (isSuspended()) { sleepTime = suspendSleepTimeUs(); // simulate write to HAL when suspended - mBytesWritten += mixBufferSize; + mBytesWritten += mSinkBufferSize; mBytesRemaining = 0; } @@ -2330,6 +2462,16 @@ bool AudioFlinger::PlaybackThread::threadLoop() } } + // Only if the Effects buffer is enabled and there is data in the + // Effects buffer (buffer valid), we need to + // copy into the sink buffer. + // TODO use sleepTime == 0 as an additional condition. + if (mEffectBufferValid) { + //ALOGV("writing effect buffer to sink buffer format %#x", mFormat); + memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat, + mNormalFrameCount * mChannelCount); + } + // enable changes in effect chain unlockEffectChains(effectChains); @@ -2348,20 +2490,20 @@ bool AudioFlinger::PlaybackThread::threadLoop() (mMixerStatus == MIXER_DRAIN_ALL)) { threadLoop_drain(); } -if (mType == MIXER) { - // write blocked detection - nsecs_t now = systemTime(); - nsecs_t delta = now - mLastWriteTime; - if (!mStandby && delta > maxPeriod) { - mNumDelayedWrites++; - if ((now - lastWarning) > kWarningThrottleNs) { - ATRACE_NAME("underrun"); - ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", - ns2ms(delta), mNumDelayedWrites, this); - lastWarning = now; + if (mType == MIXER) { + // write blocked detection + nsecs_t now = systemTime(); + nsecs_t delta = now - mLastWriteTime; + if (!mStandby && delta > maxPeriod) { + mNumDelayedWrites++; + if ((now - lastWarning) > kWarningThrottleNs) { + ATRACE_NAME("underrun"); + ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p", + ns2ms(delta), mNumDelayedWrites, this); + lastWarning = now; + } } } -} } else { usleep(sleepTime); @@ -2409,7 +2551,7 @@ if (mType == MIXER) { void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove) { size_t count = tracksToRemove.size(); - if (count) { + if (count > 0) { for (size_t i=0 ; i<count ; i++) { const sp<Track>& track = tracksToRemove.itemAt(i); mActiveTracks.remove(track); @@ -2473,7 +2615,7 @@ AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, Aud // create an NBAIO sink for the HAL output stream, and negotiate mOutputSink = new AudioStreamOutSink(output->stream); size_t numCounterOffers = 0; - const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)}; + const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)}; ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers); ALOG_ASSERT(index == 0); @@ -2713,12 +2855,6 @@ void AudioFlinger::MixerThread::threadLoop_standby() PlaybackThread::threadLoop_standby(); } -// Empty implementation for standard mixer -// Overridden for offloaded playback -void AudioFlinger::PlaybackThread::flushOutput_l() -{ -} - bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l() { return false; @@ -2750,6 +2886,12 @@ void AudioFlinger::PlaybackThread::threadLoop_standby() } } +void AudioFlinger::PlaybackThread::onAddNewTrack_l() +{ + ALOGV("signal playback thread"); + broadcast_l(); +} + void AudioFlinger::MixerThread::threadLoop_mix() { // obtain the presentation timestamp of the next output buffer @@ -2768,7 +2910,7 @@ void AudioFlinger::MixerThread::threadLoop_mix() // mix buffers... mAudioMixer->process(pts); - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; // increase sleep time progressively when application underrun condition clears. // Only increase sleep time if the mixer is ready for two consecutive times to avoid // that a steady state of alternating ready/not ready conditions keeps the sleep time @@ -2802,7 +2944,13 @@ void AudioFlinger::MixerThread::threadLoop_sleepTime() sleepTime = idleSleepTime; } } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) { - memset (mMixBuffer, 0, mixBufferSize); + // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared + // before effects processing or output. + if (mMixerBufferValid) { + memset(mMixerBuffer, 0, mMixerBufferSize); + } else { + memset(mSinkBuffer, 0, mSinkBufferSize); + } sleepTime = 0; ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED), "anticipated start"); @@ -2849,6 +2997,9 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac state = sq->begin(); } + mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found. + mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found. + for (size_t i=0 ; i<count ; i++) { const sp<Track> t = mActiveTracks[i].promote(); if (t == 0) { @@ -2967,7 +3118,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac break; case TrackBase::IDLE: default: - LOG_FATAL("unexpected track state %d", track->mState); + LOG_ALWAYS_FATAL("unexpected track state %d", track->mState); } if (isActive) { @@ -2998,7 +3149,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // because we're about to decrement the last sp<> on those tracks. block = FastMixerStateQueue::BLOCK_UNTIL_ACKED; } else { - LOG_FATAL("fast track %d should have been active", j); + LOG_ALWAYS_FATAL("fast track %d should have been active", j); } tracksToRemove->add(track); // Avoids a misleading display in dumpsys @@ -3027,12 +3178,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac // +1 for rounding and +1 for additional sample needed for interpolation desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1; // add frames already consumed but not yet released by the resampler - // because cblk->framesReady() will include these frames + // because mAudioTrackServerProxy->framesReady() will include these frames desiredFrames += mAudioMixer->getUnreleasedFrames(track->name()); +#if 0 // the minimum track buffer size is normally twice the number of frames necessary // to fill one buffer and the resampler should not leave more than one buffer worth // of unreleased frames after each pass, but just in case... ALOG_ASSERT(desiredFrames <= cblk->frameCount_); +#endif } uint32_t minFrames = 1; if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() && @@ -3048,10 +3201,14 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac mixedTracks++; - // track->mainBuffer() != mMixBuffer means there is an effect chain - // connected to the track + // track->mainBuffer() != mSinkBuffer or mMixerBuffer means + // there is an effect chain connected to the track chain.clear(); - if (track->mainBuffer() != mMixBuffer) { + if (track->mainBuffer() != mSinkBuffer && + track->mainBuffer() != mMixerBuffer) { + if (mEffectBufferEnabled) { + mEffectBufferValid = true; // Later can set directly. + } chain = getEffectChain_l(track->sessionId()); // Delegate volume control to effect in track effect chain if needed if (chain != 0) { @@ -3177,10 +3334,41 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTrac AudioMixer::RESAMPLE, AudioMixer::SAMPLE_RATE, (void *)(uintptr_t)reqSampleRate); - mAudioMixer->setParameter( - name, - AudioMixer::TRACK, - AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); + /* + * Select the appropriate output buffer for the track. + * + * Tracks with effects go into their own effects chain buffer + * and from there into either mEffectBuffer or mSinkBuffer. + * + * Other tracks can use mMixerBuffer for higher precision + * channel accumulation. If this buffer is enabled + * (mMixerBufferEnabled true), then selected tracks will accumulate + * into it. + * + */ + if (mMixerBufferEnabled + && (track->mainBuffer() == mSinkBuffer + || track->mainBuffer() == mMixerBuffer)) { + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer); + // TODO: override track->mainBuffer()? + mMixerBufferValid = true; + } else { + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_16_BIT); + mAudioMixer->setParameter( + name, + AudioMixer::TRACK, + AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer()); + } mAudioMixer->setParameter( name, AudioMixer::TRACK, @@ -3294,13 +3482,30 @@ track_is_ready: ; // remove all the tracks that need to be... removeTracks_l(*tracksToRemove); - // mix buffer must be cleared if all tracks are connected to an - // effect chain as in this case the mixer will not write to - // mix buffer and track effects will accumulate into it + // sink or mix buffer must be cleared if all tracks are connected to an + // effect chain as in this case the mixer will not write to the sink or mix buffer + // and track effects will accumulate into it if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || (mixedTracks == 0 && fastTracks > 0))) { // FIXME as a performance optimization, should remember previous zero status - memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); + if (mMixerBufferValid) { + memset(mMixerBuffer, 0, mMixerBufferSize); + // TODO: In testing, mSinkBuffer below need not be cleared because + // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer + // after mixing. + // + // To enforce this guarantee: + // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) || + // (mixedTracks == 0 && fastTracks > 0)) + // must imply MIXER_TRACKS_READY. + // Later, we may clear buffers regardless, and skip much of this logic. + } + // TODO - either mEffectBuffer or mSinkBuffer needs to be cleared. + if (mEffectBufferValid) { + memset(mEffectBuffer, 0, mEffectBufferSize); + } + // FIXME as a performance optimization, should remember previous zero status + memset(mSinkBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t)); } // if any fast tracks, then status is ready @@ -3358,6 +3563,7 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l() if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) { status = BAD_VALUE; } else { + // no need to save value, since it's constant reconfig = true; } } @@ -3365,6 +3571,7 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l() if ((audio_channel_mask_t) value != AUDIO_CHANNEL_OUT_STEREO) { status = BAD_VALUE; } else { + // no need to save value, since it's constant reconfig = true; } } @@ -3423,7 +3630,7 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l() keyValuePair.string()); } if (status == NO_ERROR && reconfig) { - readOutputParameters(); + readOutputParameters_l(); delete mAudioMixer; mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate); for (size_t i = 0; i < mTracks.size() ; i++) { @@ -3468,9 +3675,7 @@ void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& ar PlaybackThread::dumpInternals(fd, args); - snprintf(buffer, SIZE, "AudioMixer tracks: %08x\n", mAudioMixer->trackNames()); - result.append(buffer); - write(fd, result.string(), result.size()); + fdprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames()); // Make a non-atomic copy of fast mixer dump state so it won't change underneath us const FastMixerDumpState copy(mFastMixerDumpState); @@ -3688,7 +3893,7 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prep void AudioFlinger::DirectOutputThread::threadLoop_mix() { size_t frameCount = mFrameCount; - int8_t *curBuf = (int8_t *)mMixBuffer; + int8_t *curBuf = (int8_t *)mSinkBuffer; // output audio to hardware while (frameCount) { AudioBufferProvider::Buffer buffer; @@ -3703,7 +3908,7 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix() curBuf += buffer.frameCount * mFrameSize; mActiveTrack->releaseBuffer(&buffer); } - mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer; + mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer; sleepTime = 0; standbyTime = systemTime() + standbyDelay; mActiveTrack.clear(); @@ -3718,20 +3923,20 @@ void AudioFlinger::DirectOutputThread::threadLoop_sleepTime() sleepTime = idleSleepTime; } } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) { - memset(mMixBuffer, 0, mFrameCount * mFrameSize); + memset(mSinkBuffer, 0, mFrameCount * mFrameSize); sleepTime = 0; } } // getTrackName_l() must be called with ThreadBase::mLock held -int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask, - int sessionId) +int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused, + int sessionId __unused) { return 0; } // deleteTrackName_l() must be called with ThreadBase::mLock held -void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name) +void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name __unused) { } @@ -3746,6 +3951,16 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameters_l() AudioParameter param = AudioParameter(keyValuePair); int value; + if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) { + // forward device change to effects that have requested to be + // aware of attached audio device. + if (value != AUDIO_DEVICE_NONE) { + mOutDevice = value; + for (size_t i = 0; i < mEffectChains.size(); i++) { + mEffectChains[i]->setDevice_l(mOutDevice); + } + } + } if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { // do not accept frame count changes if tracks are open as the track buffer // size depends on frame count and correct behavior would not be garantied @@ -3767,7 +3982,7 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameters_l() keyValuePair.string()); } if (status == NO_ERROR && reconfig) { - readOutputParameters(); + readOutputParameters_l(); sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED); } } @@ -3984,6 +4199,17 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr sp<Track> l = mLatestActiveTrack.promote(); bool last = l.get() == track; + if (track->isInvalid()) { + ALOGW("An invalidated track shouldn't be in active list"); + tracksToRemove->add(track); + continue; + } + + if (track->mState == TrackBase::IDLE) { + ALOGW("An idle track shouldn't be in active list"); + continue; + } + if (track->isPausing()) { track->setPaused(); if (last) { @@ -4002,32 +4228,39 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr mBytesRemaining = 0; // stop writing } tracksToRemove->add(track); - } else if (track->framesReady() && track->isReady() && + } else if (track->isFlushPending()) { + track->flushAck(); + if (last) { + mFlushPending = true; + } + } else if (track->isResumePending()){ + track->resumeAck(); + if (last) { + if (mPausedBytesRemaining) { + // Need to continue write that was interrupted + mCurrentWriteLength = mPausedWriteLength; + mBytesRemaining = mPausedBytesRemaining; + mPausedBytesRemaining = 0; + } + if (mHwPaused) { + doHwResume = true; + mHwPaused = false; + // threadLoop_mix() will handle the case that we need to + // resume an interrupted write + } + // enable write to audio HAL + sleepTime = 0; + + // Do not handle new data in this iteration even if track->framesReady() + mixerStatus = MIXER_TRACKS_ENABLED; + } + } else if (track->framesReady() && track->isReady() && !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) { ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->mServer); if (track->mFillingUpStatus == Track::FS_FILLED) { track->mFillingUpStatus = Track::FS_ACTIVE; // make sure processVolume_l() will apply new volume even if 0 mLeftVolFloat = mRightVolFloat = -1.0; - if (track->mState == TrackBase::RESUMING) { - track->mState = TrackBase::ACTIVE; - if (last) { - if (mPausedBytesRemaining) { - // Need to continue write that was interrupted - mCurrentWriteLength = mPausedWriteLength; - mBytesRemaining = mPausedBytesRemaining; - mPausedBytesRemaining = 0; - } - if (mHwPaused) { - doHwResume = true; - mHwPaused = false; - // threadLoop_mix() will handle the case that we need to - // resume an interrupted write - } - // enable write to audio HAL - sleepTime = 0; - } - } } if (last) { @@ -4051,7 +4284,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr // seek when resuming. if (previousTrack->sessionId() != track->sessionId()) { previousTrack->invalidate(); - mFlushPending = true; } } } @@ -4127,9 +4359,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr // if resume is received before pause is executed. if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) { mOutput->stream->pause(mOutput->stream); - if (!doHwPause) { - doHwResume = true; - } } if (mFlushPending) { flushHw_l(); @@ -4145,11 +4374,6 @@ AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTr return mixerStatus; } -void AudioFlinger::OffloadThread::flushOutput_l() -{ - mFlushPending = true; -} - // must be called with thread mutex locked bool AudioFlinger::OffloadThread::waitingAsyncCallback_l() { @@ -4164,15 +4388,15 @@ bool AudioFlinger::OffloadThread::waitingAsyncCallback_l() // must be called with thread mutex locked bool AudioFlinger::OffloadThread::shouldStandby_l() { - bool TrackPaused = false; + bool trackPaused = false; // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack // after a timeout and we will enter standby then. if (mTracks.size() > 0) { - TrackPaused = mTracks[mTracks.size() - 1]->isPaused(); + trackPaused = mTracks[mTracks.size() - 1]->isPaused(); } - return !mStandby && !TrackPaused; + return !mStandby && !trackPaused; } @@ -4190,6 +4414,8 @@ void AudioFlinger::OffloadThread::flushHw_l() mBytesRemaining = 0; mPausedWriteLength = 0; mPausedBytesRemaining = 0; + mHwPaused = false; + if (mUseAsyncWrite) { // discard any pending drain or write ack by incrementing sequence mWriteAckSequence = (mWriteAckSequence + 2) & ~1; @@ -4200,6 +4426,18 @@ void AudioFlinger::OffloadThread::flushHw_l() } } +void AudioFlinger::OffloadThread::onAddNewTrack_l() +{ + sp<Track> previousTrack = mPreviousTrack.promote(); + sp<Track> latestTrack = mLatestActiveTrack.promote(); + + if (previousTrack != 0 && latestTrack != 0 && + (previousTrack->sessionId() != latestTrack->sessionId())) { + mFlushPending = true; + } + PlaybackThread::onAddNewTrack_l(); +} + // ---------------------------------------------------------------------------- AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, @@ -4224,11 +4462,11 @@ void AudioFlinger::DuplicatingThread::threadLoop_mix() if (outputsReady(outputTracks)) { mAudioMixer->process(AudioBufferProvider::kInvalidPTS); } else { - memset(mMixBuffer, 0, mixBufferSize); + memset(mSinkBuffer, 0, mSinkBufferSize); } sleepTime = 0; writeFrames = mNormalFrameCount; - mCurrentWriteLength = mixBufferSize; + mCurrentWriteLength = mSinkBufferSize; standbyTime = systemTime() + standbyDelay; } @@ -4243,7 +4481,7 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() } else if (mBytesWritten != 0) { if (mMixerStatus == MIXER_TRACKS_ENABLED) { writeFrames = mNormalFrameCount; - memset(mMixBuffer, 0, mixBufferSize); + memset(mSinkBuffer, 0, mSinkBufferSize); } else { // flush remaining overflow buffers in output tracks writeFrames = 0; @@ -4255,10 +4493,18 @@ void AudioFlinger::DuplicatingThread::threadLoop_sleepTime() ssize_t AudioFlinger::DuplicatingThread::threadLoop_write() { for (size_t i = 0; i < outputTracks.size(); i++) { - outputTracks[i]->write(mMixBuffer, writeFrames); + // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT + // for delivery downstream as needed. This in-place conversion is safe as + // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format + // (AUDIO_FORMAT_PCM_8_BIT is not allowed here). + if (mFormat != AUDIO_FORMAT_PCM_16_BIT) { + memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT, + mSinkBuffer, mFormat, writeFrames * mChannelCount); + } + outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames); } mStandby = false; - return (ssize_t)mixBufferSize; + return (ssize_t)mSinkBufferSize; } void AudioFlinger::DuplicatingThread::threadLoop_standby() @@ -4284,10 +4530,16 @@ void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread) Mutex::Autolock _l(mLock); // FIXME explain this formula size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate(); + // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat + // due to current usage case and restrictions on the AudioBufferProvider. + // Actual buffer conversion is done in threadLoop_write(). + // + // TODO: This may change in the future, depending on multichannel + // (and non int16_t*) support on AF::PlaybackThread::OutputTrack OutputTrack *outputTrack = new OutputTrack(thread, this, mSampleRate, - mFormat, + AUDIO_FORMAT_PCM_16_BIT, mChannelMask, frameCount, IPCThreadState::self()->getCallingUid()); @@ -4369,8 +4621,6 @@ void AudioFlinger::DuplicatingThread::cacheParameters_l() AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, audio_io_handle_t id, audio_devices_t outDevice, audio_devices_t inDevice @@ -4379,27 +4629,24 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger, #endif ) : ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD), - mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL), - // mRsmpInIndex and mBufferSize set by readInputParameters() - mReqChannelCount(popcount(channelMask)), - mReqSampleRate(sampleRate) - // mBytesRead is only meaningful while active, and so is cleared in start() - // (but might be better to also clear here for dump?) + mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL), + // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l() + mRsmpInRear(0) #ifdef TEE_SINK , mTeeSink(teeSink) #endif { snprintf(mName, kNameLength, "AudioIn_%X", id); + mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName); - readInputParameters(); + readInputParameters_l(); } AudioFlinger::RecordThread::~RecordThread() { + mAudioFlinger->unregisterWriter(mNBLogWriter); delete[] mRsmpInBuffer; - delete mResampler; - delete[] mRsmpOutBuffer; } void AudioFlinger::RecordThread::onFirstRef() @@ -4407,230 +4654,393 @@ void AudioFlinger::RecordThread::onFirstRef() run(mName, PRIORITY_URGENT_AUDIO); } -status_t AudioFlinger::RecordThread::readyToRun() -{ - status_t status = initCheck(); - ALOGW_IF(status != NO_ERROR,"RecordThread %p could not initialize", this); - return status; -} - bool AudioFlinger::RecordThread::threadLoop() { - AudioBufferProvider::Buffer buffer; - sp<RecordTrack> activeTrack; - Vector< sp<EffectChain> > effectChains; - nsecs_t lastWarning = 0; inputStandBy(); + +reacquire_wakelock: + sp<RecordTrack> activeTrack; + int activeTracksGen; { Mutex::Autolock _l(mLock); - activeTrack = mActiveTrack; - acquireWakeLock_l(activeTrack != 0 ? activeTrack->uid() : -1); + size_t size = mActiveTracks.size(); + activeTracksGen = mActiveTracksGen; + if (size > 0) { + // FIXME an arbitrary choice + activeTrack = mActiveTracks[0]; + acquireWakeLock_l(activeTrack->uid()); + if (size > 1) { + SortedVector<int> tmp; + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } + updateWakeLockUids_l(tmp); + } + } else { + acquireWakeLock_l(-1); + } } - // used to verify we've read at least once before evaluating how many bytes were read - bool readOnce = false; + // used to request a deferred sleep, to be executed later while mutex is unlocked + uint32_t sleepUs = 0; - // start recording - while (!exitPending()) { + // loop while there is work to do + for (;;) { + Vector< sp<EffectChain> > effectChains; - processConfigEvents(); + // sleep with mutex unlocked + if (sleepUs > 0) { + usleep(sleepUs); + sleepUs = 0; + } + + // activeTracks accumulates a copy of a subset of mActiveTracks + Vector< sp<RecordTrack> > activeTracks; { // scope for mLock Mutex::Autolock _l(mLock); - checkForNewParameters_l(); - if (mActiveTrack != 0 && activeTrack != mActiveTrack) { - SortedVector<int> tmp; - tmp.add(mActiveTrack->uid()); - updateWakeLockUids_l(tmp); - } - activeTrack = mActiveTrack; - if (mActiveTrack == 0 && mConfigEvents.isEmpty()) { - standby(); - if (exitPending()) { - break; - } + processConfigEvents_l(); + // return value 'reconfig' is currently unused + bool reconfig = checkForNewParameters_l(); + // check exitPending here because checkForNewParameters_l() and + // checkForNewParameters_l() can temporarily release mLock + if (exitPending()) { + break; + } + + // if no active track(s), then standby and release wakelock + size_t size = mActiveTracks.size(); + if (size == 0) { + standbyIfNotAlreadyInStandby(); + // exitPending() can't become true here releaseWakeLock_l(); ALOGV("RecordThread: loop stopping"); // go to sleep mWaitWorkCV.wait(mLock); ALOGV("RecordThread: loop starting"); - acquireWakeLock_l(mActiveTrack != 0 ? mActiveTrack->uid() : -1); - continue; + goto reacquire_wakelock; } - if (mActiveTrack != 0) { - if (mActiveTrack->isTerminated()) { - removeTrack_l(mActiveTrack); - mActiveTrack.clear(); - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - standby(); - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (mActiveTrack->mState == TrackBase::RESUMING) { - if (mReqChannelCount != mActiveTrack->channelCount()) { - mActiveTrack.clear(); - mStartStopCond.broadcast(); - } else if (readOnce) { - // record start succeeds only if first read from audio input - // succeeds - if (mBytesRead >= 0) { - mActiveTrack->mState = TrackBase::ACTIVE; - } else { - mActiveTrack.clear(); - } - mStartStopCond.broadcast(); - } + + if (mActiveTracksGen != activeTracksGen) { + activeTracksGen = mActiveTracksGen; + SortedVector<int> tmp; + for (size_t i = 0; i < size; i++) { + tmp.add(mActiveTracks[i]->uid()); + } + updateWakeLockUids_l(tmp); + } + + bool doBroadcast = false; + for (size_t i = 0; i < size; ) { + + activeTrack = mActiveTracks[i]; + if (activeTrack->isTerminated()) { + removeTrack_l(activeTrack); + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + size--; + continue; + } + + TrackBase::track_state activeTrackState = activeTrack->mState; + switch (activeTrackState) { + + case TrackBase::PAUSING: + mActiveTracks.remove(activeTrack); + mActiveTracksGen++; + doBroadcast = true; + size--; + continue; + + case TrackBase::STARTING_1: + sleepUs = 10000; + i++; + continue; + + case TrackBase::STARTING_2: + doBroadcast = true; mStandby = false; + activeTrack->mState = TrackBase::ACTIVE; + break; + + case TrackBase::ACTIVE: + break; + + case TrackBase::IDLE: + i++; + continue; + + default: + LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState); + } + + activeTracks.add(activeTrack); + i++; + + } + if (doBroadcast) { + mStartStopCond.broadcast(); + } + + // sleep if there are no active tracks to process + if (activeTracks.size() == 0) { + if (sleepUs == 0) { + sleepUs = kRecordThreadSleepUs; } + continue; } + sleepUs = 0; lockEffectChains_l(effectChains); } - if (mActiveTrack != 0) { - if (mActiveTrack->mState != TrackBase::ACTIVE && - mActiveTrack->mState != TrackBase::RESUMING) { - unlockEffectChains(effectChains); - usleep(kRecordThreadSleepUs); - continue; - } - for (size_t i = 0; i < effectChains.size(); i ++) { - effectChains[i]->process_l(); - } + // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0 - buffer.frameCount = mFrameCount; - status_t status = mActiveTrack->getNextBuffer(&buffer); - if (status == NO_ERROR) { - readOnce = true; - size_t framesOut = buffer.frameCount; - if (mResampler == NULL) { + size_t size = effectChains.size(); + for (size_t i = 0; i < size; i++) { + // thread mutex is not locked, but effect chain is locked + effectChains[i]->process_l(); + } + + // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one. + // Only the client(s) that are too slow will overrun. But if even the fastest client is too + // slow, then this RecordThread will overrun by not calling HAL read often enough. + // If destination is non-contiguous, first read past the nominal end of buffer, then + // copy to the right place. Permitted because mRsmpInBuffer was over-allocated. + + int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1); + ssize_t bytesRead = mInput->stream->read(mInput->stream, + &mRsmpInBuffer[rear * mChannelCount], mBufferSize); + if (bytesRead <= 0) { + ALOGE("read failed: bytesRead=%d < %u", bytesRead, mBufferSize); + // Force input into standby so that it tries to recover at next read attempt + inputStandBy(); + sleepUs = kRecordThreadSleepUs; + continue; + } + ALOG_ASSERT((size_t) bytesRead <= mBufferSize); + size_t framesRead = bytesRead / mFrameSize; + ALOG_ASSERT(framesRead > 0); + if (mTeeSink != 0) { + (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead); + } + // If destination is non-contiguous, we now correct for reading past end of buffer. + size_t part1 = mRsmpInFramesP2 - rear; + if (framesRead > part1) { + memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount], + (framesRead - part1) * mFrameSize); + } + rear = mRsmpInRear += framesRead; + + size = activeTracks.size(); + // loop over each active track + for (size_t i = 0; i < size; i++) { + activeTrack = activeTracks[i]; + + enum { + OVERRUN_UNKNOWN, + OVERRUN_TRUE, + OVERRUN_FALSE + } overrun = OVERRUN_UNKNOWN; + + // loop over getNextBuffer to handle circular sink + for (;;) { + + activeTrack->mSink.frameCount = ~0; + status_t status = activeTrack->getNextBuffer(&activeTrack->mSink); + size_t framesOut = activeTrack->mSink.frameCount; + LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0)); + + int32_t front = activeTrack->mRsmpInFront; + ssize_t filled = rear - front; + size_t framesIn; + + if (filled < 0) { + // should not happen, but treat like a massive overrun and re-sync + framesIn = 0; + activeTrack->mRsmpInFront = rear; + overrun = OVERRUN_TRUE; + } else if ((size_t) filled <= mRsmpInFrames) { + framesIn = (size_t) filled; + } else { + // client is not keeping up with server, but give it latest data + framesIn = mRsmpInFrames; + activeTrack->mRsmpInFront = front = rear - framesIn; + overrun = OVERRUN_TRUE; + } + + if (framesOut == 0 || framesIn == 0) { + break; + } + + if (activeTrack->mResampler == NULL) { // no resampling - while (framesOut) { - size_t framesIn = mFrameCount - mRsmpInIndex; - if (framesIn) { - int8_t *src = (int8_t *)mRsmpInBuffer + mRsmpInIndex * mFrameSize; - int8_t *dst = buffer.i8 + (buffer.frameCount - framesOut) * - mActiveTrack->mFrameSize; - if (framesIn > framesOut) - framesIn = framesOut; - mRsmpInIndex += framesIn; - framesOut -= framesIn; - if (mChannelCount == mReqChannelCount) { - memcpy(dst, src, framesIn * mFrameSize); - } else { - if (mChannelCount == 1) { - upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } else { - downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, - (int16_t *)src, framesIn); - } - } + if (framesIn > framesOut) { + framesIn = framesOut; + } else { + framesOut = framesIn; + } + int8_t *dst = activeTrack->mSink.i8; + while (framesIn > 0) { + front &= mRsmpInFramesP2 - 1; + size_t part1 = mRsmpInFramesP2 - front; + if (part1 > framesIn) { + part1 = framesIn; } - if (framesOut && mFrameCount == mRsmpInIndex) { - void *readInto; - if (framesOut == mFrameCount && mChannelCount == mReqChannelCount) { - readInto = buffer.raw; - framesOut = 0; - } else { - readInto = mRsmpInBuffer; - mRsmpInIndex = 0; - } - mBytesRead = mInput->stream->read(mInput->stream, readInto, - mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) - { - ALOGE("Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - mRsmpInIndex = mFrameCount; - framesOut = 0; - buffer.frameCount = 0; - } -#ifdef TEE_SINK - else if (mTeeSink != 0) { - (void) mTeeSink->write(readInto, - mBytesRead >> Format_frameBitShift(mTeeSink->format())); - } -#endif + int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize); + if (mChannelCount == activeTrack->mChannelCount) { + memcpy(dst, src, part1 * mFrameSize); + } else if (mChannelCount == 1) { + upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (int16_t *)src, + part1); + } else { + downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (int16_t *)src, + part1); } + dst += part1 * activeTrack->mFrameSize; + front += part1; + framesIn -= part1; } + activeTrack->mRsmpInFront += framesOut; + } else { // resampling + // FIXME framesInNeeded should really be part of resampler API, and should + // depend on the SRC ratio + // to keep mRsmpInBuffer full so resampler always has sufficient input + size_t framesInNeeded; + // FIXME only re-calculate when it changes, and optimize for common ratios + double inOverOut = (double) mSampleRate / activeTrack->mSampleRate; + double outOverIn = (double) activeTrack->mSampleRate / mSampleRate; + framesInNeeded = ceil(framesOut * inOverOut) + 1; + ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g", + framesInNeeded, framesOut, inOverOut); + // Although we theoretically have framesIn in circular buffer, some of those are + // unreleased frames, and thus must be discounted for purpose of budgeting. + size_t unreleased = activeTrack->mRsmpInUnrel; + framesIn = framesIn > unreleased ? framesIn - unreleased : 0; + if (framesIn < framesInNeeded) { + ALOGV("not enough to resample: have %u frames in but need %u in to " + "produce %u out given in/out ratio of %.4g", + framesIn, framesInNeeded, framesOut, inOverOut); + size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * outOverIn) : 0; + LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut); + if (newFramesOut == 0) { + break; + } + framesInNeeded = ceil(newFramesOut * inOverOut) + 1; + ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g", + framesInNeeded, newFramesOut, outOverIn); + LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded); + ALOGV("success 2: have %u frames in and need %u in to produce %u out " + "given in/out ratio of %.4g", + framesIn, framesInNeeded, newFramesOut, inOverOut); + framesOut = newFramesOut; + } else { + ALOGV("success 1: have %u in and need %u in to produce %u out " + "given in/out ratio of %.4g", + framesIn, framesInNeeded, framesOut, inOverOut); + } - // resampler accumulates, but we only have one source track - memset(mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); - // alter output frame count as if we were expecting stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - framesOut >>= 1; + // reallocate mRsmpOutBuffer as needed; we will grow but never shrink + if (activeTrack->mRsmpOutFrameCount < framesOut) { + // FIXME why does each track need it's own mRsmpOutBuffer? can't they share? + delete[] activeTrack->mRsmpOutBuffer; + // resampler always outputs stereo + activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2]; + activeTrack->mRsmpOutFrameCount = framesOut; } - mResampler->resample(mRsmpOutBuffer, framesOut, - this /* AudioBufferProvider* */); + + // resampler accumulates, but we only have one source track + memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t)); + activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut, + // FIXME how about having activeTrack implement this interface itself? + activeTrack->mResamplerBufferProvider + /*this*/ /* AudioBufferProvider* */); // ditherAndClamp() works as long as all buffers returned by - // mActiveTrack->getNextBuffer() are 32 bit aligned which should be always true. - if (mChannelCount == 2 && mReqChannelCount == 1) { - // temporarily type pun mRsmpOutBuffer from Q19.12 to int16_t - ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut); + // activeTrack->getNextBuffer() are 32 bit aligned which should be always true. + if (activeTrack->mChannelCount == 1) { + // temporarily type pun mRsmpOutBuffer from Q4.27 to int16_t + ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer, + framesOut); // the resampler always outputs stereo samples: // do post stereo to mono conversion - downmix_to_mono_i16_from_stereo_i16(buffer.i16, (int16_t *)mRsmpOutBuffer, - framesOut); + downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16, + (int16_t *)activeTrack->mRsmpOutBuffer, framesOut); } else { - ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut); + ditherAndClamp((int32_t *)activeTrack->mSink.raw, + activeTrack->mRsmpOutBuffer, framesOut); } // now done with mRsmpOutBuffer } - if (mFramestoDrop == 0) { - mActiveTrack->releaseBuffer(&buffer); + + if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) { + overrun = OVERRUN_FALSE; + } + + if (activeTrack->mFramesToDrop == 0) { + if (framesOut > 0) { + activeTrack->mSink.frameCount = framesOut; + activeTrack->releaseBuffer(&activeTrack->mSink); + } } else { - if (mFramestoDrop > 0) { - mFramestoDrop -= buffer.frameCount; - if (mFramestoDrop <= 0) { - clearSyncStartEvent(); + // FIXME could do a partial drop of framesOut + if (activeTrack->mFramesToDrop > 0) { + activeTrack->mFramesToDrop -= framesOut; + if (activeTrack->mFramesToDrop <= 0) { + activeTrack->clearSyncStartEvent(); } } else { - mFramestoDrop += buffer.frameCount; - if (mFramestoDrop >= 0 || mSyncStartEvent == 0 || - mSyncStartEvent->isCancelled()) { + activeTrack->mFramesToDrop += framesOut; + if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 || + activeTrack->mSyncStartEvent->isCancelled()) { ALOGW("Synced record %s, session %d, trigger session %d", - (mFramestoDrop >= 0) ? "timed out" : "cancelled", - mActiveTrack->sessionId(), - (mSyncStartEvent != 0) ? mSyncStartEvent->triggerSession() : 0); - clearSyncStartEvent(); + (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled", + activeTrack->sessionId(), + (activeTrack->mSyncStartEvent != 0) ? + activeTrack->mSyncStartEvent->triggerSession() : 0); + activeTrack->clearSyncStartEvent(); } } } - mActiveTrack->clearOverflow(); + + if (framesOut == 0) { + break; + } } - // client isn't retrieving buffers fast enough - else { - if (!mActiveTrack->setOverflow()) { + + switch (overrun) { + case OVERRUN_TRUE: + // client isn't retrieving buffers fast enough + if (!activeTrack->setOverflow()) { nsecs_t now = systemTime(); + // FIXME should lastWarning per track? if ((now - lastWarning) > kWarningThrottleNs) { ALOGW("RecordThread: buffer overflow"); lastWarning = now; } } - // Release the processor for a while before asking for a new buffer. - // This will give the application more chance to read from the buffer and - // clear the overflow. - usleep(kRecordThreadSleepUs); + break; + case OVERRUN_FALSE: + activeTrack->clearOverflow(); + break; + case OVERRUN_UNKNOWN: + break; } + } + // enable changes in effect chain unlockEffectChains(effectChains); - effectChains.clear(); + // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end } - standby(); + standbyIfNotAlreadyInStandby(); { Mutex::Autolock _l(mLock); @@ -4638,7 +5048,8 @@ bool AudioFlinger::RecordThread::threadLoop() sp<RecordTrack> track = mTracks[i]; track->invalidate(); } - mActiveTrack.clear(); + mActiveTracks.clear(); + mActiveTracksGen++; mStartStopCond.broadcast(); } @@ -4648,7 +5059,7 @@ bool AudioFlinger::RecordThread::threadLoop() return false; } -void AudioFlinger::RecordThread::standby() +void AudioFlinger::RecordThread::standbyIfNotAlreadyInStandby() { if (!mStandby) { inputStandBy(); @@ -4661,26 +5072,23 @@ void AudioFlinger::RecordThread::inputStandBy() mInput->stream->common.standby(&mInput->stream->common); } -sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( +// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held +sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, status_t *status) { + size_t frameCount = *pFrameCount; sp<RecordTrack> track; status_t lStatus; - lStatus = initCheck(); - if (lStatus != NO_ERROR) { - ALOGE("createRecordTrack_l() audio driver not initialized"); - goto Exit; - } // client expresses a preference for FAST, but we get the final say if (*flags & IAudioFlinger::TRACK_FAST) { if ( @@ -4688,21 +5096,24 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR ( (tid != -1) && ((frameCount == 0) || + // FIXME not necessarily true, should be native frame count for native SR! (frameCount >= mFrameCount)) ) && - // FIXME when record supports non-PCM data, also check for audio_is_linear_pcm(format) + // PCM data + audio_is_linear_pcm(format) && // mono or stereo ( (channelMask == AUDIO_CHANNEL_OUT_MONO) || (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) && // hardware sample rate + // FIXME actually the native hardware sample rate (sampleRate == mSampleRate) && - // record thread has an associated fast recorder - hasFastRecorder() - // FIXME test that RecordThread for this fast track has a capable output HAL - // FIXME add a permission test also? + // record thread has an associated fast capture + hasFastCapture() + // fast capture does not require slots ) { - // if frameCount not specified, then it defaults to fast recorder (HAL) frame count + // if frameCount not specified, then it defaults to fast capture (HAL) frame count if (frameCount == 0) { + // FIXME wrong mFrameCount frameCount = mFrameCount * kFastTrackMultiplier; } ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d", @@ -4710,11 +5121,12 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR } else { ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%d " "mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u " - "hasFastRecorder=%d tid=%d", + "hasFastCapture=%d tid=%d", frameCount, mFrameCount, format, audio_is_linear_pcm(format), - channelMask, sampleRate, mSampleRate, hasFastRecorder(), tid); + channelMask, sampleRate, mSampleRate, hasFastCapture(), tid); *flags &= ~IAudioFlinger::TRACK_FAST; + // FIXME It's not clear that we need to enforce this any more, since we have a pipe. // For compatibility with AudioRecord calculation, buffer depth is forced // to be at least 2 x the record thread frame count and cover audio hardware latency. // This is probably too conservative, but legacy application code may depend on it. @@ -4731,8 +5143,13 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR } } } + *pFrameCount = frameCount; - // FIXME use flags and tid similar to createTrack_l() + lStatus = initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("createRecordTrack_l() audio driver not initialized"); + goto Exit; + } { // scope for mLock Mutex::Autolock _l(mLock); @@ -4740,9 +5157,9 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR track = new RecordTrack(this, client, sampleRate, format, channelMask, frameCount, sessionId, uid); - if (track->getCblk() == 0) { - ALOGE("createRecordTrack_l() no control block"); - lStatus = NO_MEMORY; + lStatus = track->initCheck(); + if (lStatus != NO_ERROR) { + ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus); // track must be cleared from the caller as the caller has the AF lock goto Exit; } @@ -4761,12 +5178,11 @@ sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createR sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp); } } + lStatus = NO_ERROR; Exit: - if (status) { - *status = lStatus; - } + *status = lStatus; return track; } @@ -4779,129 +5195,123 @@ status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrac status_t status = NO_ERROR; if (event == AudioSystem::SYNC_EVENT_NONE) { - clearSyncStartEvent(); + recordTrack->clearSyncStartEvent(); } else if (event != AudioSystem::SYNC_EVENT_SAME) { - mSyncStartEvent = mAudioFlinger->createSyncEvent(event, + recordTrack->mSyncStartEvent = mAudioFlinger->createSyncEvent(event, triggerSession, recordTrack->sessionId(), syncStartEventCallback, - this); + recordTrack); // Sync event can be cancelled by the trigger session if the track is not in a // compatible state in which case we start record immediately - if (mSyncStartEvent->isCancelled()) { - clearSyncStartEvent(); + if (recordTrack->mSyncStartEvent->isCancelled()) { + recordTrack->clearSyncStartEvent(); } else { // do not wait for the event for more than AudioSystem::kSyncRecordStartTimeOutMs - mFramestoDrop = - ((AudioSystem::kSyncRecordStartTimeOutMs * mReqSampleRate) / 1000); + recordTrack->mFramesToDrop = - + ((AudioSystem::kSyncRecordStartTimeOutMs * recordTrack->mSampleRate) / 1000); } } { + // This section is a rendezvous between binder thread executing start() and RecordThread AutoMutex lock(mLock); - if (mActiveTrack != 0) { - if (recordTrack != mActiveTrack.get()) { - status = -EBUSY; - } else if (mActiveTrack->mState == TrackBase::PAUSING) { - mActiveTrack->mState = TrackBase::ACTIVE; + if (mActiveTracks.indexOf(recordTrack) >= 0) { + if (recordTrack->mState == TrackBase::PAUSING) { + ALOGV("active record track PAUSING -> ACTIVE"); + recordTrack->mState = TrackBase::ACTIVE; + } else { + ALOGV("active record track state %d", recordTrack->mState); } return status; } - recordTrack->mState = TrackBase::IDLE; - mActiveTrack = recordTrack; + // TODO consider other ways of handling this, such as changing the state to :STARTING and + // adding the track to mActiveTracks after returning from AudioSystem::startInput(), + // or using a separate command thread + recordTrack->mState = TrackBase::STARTING_1; + mActiveTracks.add(recordTrack); + mActiveTracksGen++; mLock.unlock(); status_t status = AudioSystem::startInput(mId); mLock.lock(); + // FIXME should verify that recordTrack is still in mActiveTracks if (status != NO_ERROR) { - mActiveTrack.clear(); - clearSyncStartEvent(); + mActiveTracks.remove(recordTrack); + mActiveTracksGen++; + recordTrack->clearSyncStartEvent(); return status; } - mRsmpInIndex = mFrameCount; - mBytesRead = 0; - if (mResampler != NULL) { - mResampler->reset(); + // Catch up with current buffer indices if thread is already running. + // This is what makes a new client discard all buffered data. If the track's mRsmpInFront + // was initialized to some value closer to the thread's mRsmpInFront, then the track could + // see previously buffered data before it called start(), but with greater risk of overrun. + + recordTrack->mRsmpInFront = mRsmpInRear; + recordTrack->mRsmpInUnrel = 0; + // FIXME why reset? + if (recordTrack->mResampler != NULL) { + recordTrack->mResampler->reset(); } - mActiveTrack->mState = TrackBase::RESUMING; + recordTrack->mState = TrackBase::STARTING_2; // signal thread to start - ALOGV("Signal record thread"); mWaitWorkCV.broadcast(); - // do not wait for mStartStopCond if exiting - if (exitPending()) { - mActiveTrack.clear(); - status = INVALID_OPERATION; - goto startError; - } - mStartStopCond.wait(mLock); - if (mActiveTrack == 0) { + if (mActiveTracks.indexOf(recordTrack) < 0) { ALOGV("Record failed to start"); status = BAD_VALUE; goto startError; } - ALOGV("Record started OK"); return status; } startError: AudioSystem::stopInput(mId); - clearSyncStartEvent(); + recordTrack->clearSyncStartEvent(); + // FIXME I wonder why we do not reset the state here? return status; } -void AudioFlinger::RecordThread::clearSyncStartEvent() -{ - if (mSyncStartEvent != 0) { - mSyncStartEvent->cancel(); - } - mSyncStartEvent.clear(); - mFramestoDrop = 0; -} - void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event) { sp<SyncEvent> strongEvent = event.promote(); if (strongEvent != 0) { - RecordThread *me = (RecordThread *)strongEvent->cookie(); - me->handleSyncStartEvent(strongEvent); - } -} - -void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event) -{ - if (event == mSyncStartEvent) { - // TODO: use actual buffer filling status instead of 2 buffers when info is available - // from audio HAL - mFramestoDrop = mFrameCount * 2; + sp<RefBase> ptr = strongEvent->cookie().promote(); + if (ptr != 0) { + RecordTrack *recordTrack = (RecordTrack *)ptr.get(); + recordTrack->handleSyncStartEvent(strongEvent); + } } } bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) { ALOGV("RecordThread::stop"); AutoMutex _l(mLock); - if (recordTrack != mActiveTrack.get() || recordTrack->mState == TrackBase::PAUSING) { + if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) { return false; } + // note that threadLoop may still be processing the track at this point [without lock] recordTrack->mState = TrackBase::PAUSING; // do not wait for mStartStopCond if exiting if (exitPending()) { return true; } + // FIXME incorrect usage of wait: no explicit predicate or loop mStartStopCond.wait(mLock); - // if we have been restarted, recordTrack == mActiveTrack.get() here - if (exitPending() || recordTrack != mActiveTrack.get()) { + // if we have been restarted, recordTrack is in mActiveTracks here + if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) { ALOGV("Record stopped OK"); return true; } return false; } -bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event) const +bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const { return false; } -status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event) +status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event __unused) { #if 0 // This branch is currently dead code, but is preserved in case it will be needed in future if (!isValidSyncEvent(event)) { @@ -4932,7 +5342,7 @@ void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track) track->terminate(); track->mState = TrackBase::STOPPED; // active tracks are removed by threadLoop() - if (mActiveTrack != track) { + if (mActiveTracks.indexOf(track) < 0) { removeTrack_l(track); } } @@ -4952,104 +5362,119 @@ void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args) void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args) { - const size_t SIZE = 256; - char buffer[SIZE]; - String8 result; + fdprintf(fd, "\nInput thread %p:\n", this); - snprintf(buffer, SIZE, "\nInput thread %p internals\n", this); - result.append(buffer); - - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "In index: %zu\n", mRsmpInIndex); - result.append(buffer); - snprintf(buffer, SIZE, "Buffer size: %zu bytes\n", mBufferSize); - result.append(buffer); - snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL)); - result.append(buffer); - snprintf(buffer, SIZE, "Out channel count: %u\n", mReqChannelCount); - result.append(buffer); - snprintf(buffer, SIZE, "Out sample rate: %u\n", mReqSampleRate); - result.append(buffer); + if (mActiveTracks.size() > 0) { + fdprintf(fd, " Buffer size: %zu bytes\n", mBufferSize); } else { - result.append("No active record client\n"); + fdprintf(fd, " No active record clients\n"); } - write(fd, result.string(), result.size()); - dumpBase(fd, args); } -void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args) +void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; - snprintf(buffer, SIZE, "Input thread %p tracks\n", this); - result.append(buffer); - RecordTrack::appendDumpHeader(result); - for (size_t i = 0; i < mTracks.size(); ++i) { - sp<RecordTrack> track = mTracks[i]; - if (track != 0) { - track->dump(buffer, SIZE); - result.append(buffer); + size_t numtracks = mTracks.size(); + size_t numactive = mActiveTracks.size(); + size_t numactiveseen = 0; + fdprintf(fd, " %d Tracks", numtracks); + if (numtracks) { + fdprintf(fd, " of which %d are active\n", numactive); + RecordTrack::appendDumpHeader(result); + for (size_t i = 0; i < numtracks ; ++i) { + sp<RecordTrack> track = mTracks[i]; + if (track != 0) { + bool active = mActiveTracks.indexOf(track) >= 0; + if (active) { + numactiveseen++; + } + track->dump(buffer, SIZE, active); + result.append(buffer); + } } + } else { + fdprintf(fd, "\n"); } - if (mActiveTrack != 0) { - snprintf(buffer, SIZE, "\nInput thread %p active tracks\n", this); + if (numactiveseen != numactive) { + snprintf(buffer, SIZE, " The following tracks are in the active list but" + " not in the track list\n"); result.append(buffer); RecordTrack::appendDumpHeader(result); - mActiveTrack->dump(buffer, SIZE); - result.append(buffer); + for (size_t i = 0; i < numactive; ++i) { + sp<RecordTrack> track = mActiveTracks[i]; + if (mTracks.indexOf(track) < 0) { + track->dump(buffer, SIZE, true); + result.append(buffer); + } + } } write(fd, result.string(), result.size()); } // AudioBufferProvider interface -status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) -{ - size_t framesReq = buffer->frameCount; - size_t framesReady = mFrameCount - mRsmpInIndex; - int channelCount; - - if (framesReady == 0) { - mBytesRead = mInput->stream->read(mInput->stream, mRsmpInBuffer, mBufferSize); - if (mBytesRead <= 0) { - if ((mBytesRead < 0) && (mActiveTrack->mState == TrackBase::ACTIVE)) { - ALOGE("RecordThread::getNextBuffer() Error reading audio input"); - // Force input into standby so that it tries to - // recover at next read attempt - inputStandBy(); - usleep(kRecordThreadSleepUs); - } - buffer->raw = NULL; - buffer->frameCount = 0; - return NOT_ENOUGH_DATA; - } - mRsmpInIndex = 0; - framesReady = mFrameCount; - } - - if (framesReq > framesReady) { - framesReq = framesReady; - } - - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - buffer->raw = mRsmpInBuffer + mRsmpInIndex * channelCount; - buffer->frameCount = framesReq; +status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer( + AudioBufferProvider::Buffer* buffer, int64_t pts __unused) +{ + RecordTrack *activeTrack = mRecordTrack; + sp<ThreadBase> threadBase = activeTrack->mThread.promote(); + if (threadBase == 0) { + buffer->frameCount = 0; + buffer->raw = NULL; + return NOT_ENOUGH_DATA; + } + RecordThread *recordThread = (RecordThread *) threadBase.get(); + int32_t rear = recordThread->mRsmpInRear; + int32_t front = activeTrack->mRsmpInFront; + ssize_t filled = rear - front; + // FIXME should not be P2 (don't want to increase latency) + // FIXME if client not keeping up, discard + LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames)); + // 'filled' may be non-contiguous, so return only the first contiguous chunk + front &= recordThread->mRsmpInFramesP2 - 1; + size_t part1 = recordThread->mRsmpInFramesP2 - front; + if (part1 > (size_t) filled) { + part1 = filled; + } + size_t ask = buffer->frameCount; + ALOG_ASSERT(ask > 0); + if (part1 > ask) { + part1 = ask; + } + if (part1 == 0) { + // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty + LOG_ALWAYS_FATAL("RecordThread::getNextBuffer() starved"); + buffer->raw = NULL; + buffer->frameCount = 0; + activeTrack->mRsmpInUnrel = 0; + return NOT_ENOUGH_DATA; + } + + buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount; + buffer->frameCount = part1; + activeTrack->mRsmpInUnrel = part1; return NO_ERROR; } // AudioBufferProvider interface -void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer) +void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer( + AudioBufferProvider::Buffer* buffer) { - mRsmpInIndex += buffer->frameCount; + RecordTrack *activeTrack = mRecordTrack; + size_t stepCount = buffer->frameCount; + if (stepCount == 0) { + return; + } + ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel); + activeTrack->mRsmpInUnrel -= stepCount; + activeTrack->mRsmpInFront += stepCount; + buffer->raw = NULL; buffer->frameCount = 0; } @@ -5063,11 +5488,14 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() AudioParameter param = AudioParameter(keyValuePair); int value; audio_format_t reqFormat = mFormat; - uint32_t reqSamplingRate = mReqSampleRate; - uint32_t reqChannelCount = mReqChannelCount; + uint32_t samplingRate = mSampleRate; + audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(mChannelCount); + // TODO Investigate when this code runs. Check with audio policy when a sample rate and + // channel count change can be requested. Do we mandate the first client defines the + // HAL sampling rate and channel count or do we allow changes on the fly? if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) { - reqSamplingRate = value; + samplingRate = value; reconfig = true; } if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) { @@ -5079,14 +5507,19 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() } } if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) { - reqChannelCount = popcount(value); - reconfig = true; + audio_channel_mask_t mask = (audio_channel_mask_t) value; + if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) { + status = BAD_VALUE; + } else { + channelMask = mask; + reconfig = true; + } } if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) { // do not accept frame count changes if tracks are open as the track buffer // size depends on frame count and correct behavior would not be guaranteed // if frame count is changed after track creation - if (mActiveTrack != 0) { + if (mActiveTracks.size() > 0) { status = INVALID_OPERATION; } else { reconfig = true; @@ -5129,6 +5562,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() } mAudioSource = (audio_source_t)value; } + if (status == NO_ERROR) { status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string()); @@ -5142,14 +5576,15 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l() reqFormat == mInput->stream->common.get_format(&mInput->stream->common) && reqFormat == AUDIO_FORMAT_PCM_16_BIT && (mInput->stream->common.get_sample_rate(&mInput->stream->common) - <= (2 * reqSamplingRate)) && + <= (2 * samplingRate)) && popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 && - (reqChannelCount <= FCC_2)) { + (channelMask == AUDIO_CHANNEL_IN_MONO || + channelMask == AUDIO_CHANNEL_IN_STEREO)) { status = NO_ERROR; } if (status == NO_ERROR) { - readInputParameters(); + readInputParameters_l(); sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED); } } @@ -5179,9 +5614,9 @@ String8 AudioFlinger::RecordThread::getParameters(const String8& keys) return out_s8; } -void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { +void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param __unused) { AudioSystem::OutputDescriptor desc; - void *param2 = NULL; + const void *param2 = NULL; switch (event) { case AudioSystem::INPUT_OPENED: @@ -5201,53 +5636,35 @@ void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) { mAudioFlinger->audioConfigChanged_l(event, mId, param2); } -void AudioFlinger::RecordThread::readInputParameters() +void AudioFlinger::RecordThread::readInputParameters_l() { - delete[] mRsmpInBuffer; - // mRsmpInBuffer is always assigned a new[] below - delete[] mRsmpOutBuffer; - mRsmpOutBuffer = NULL; - delete mResampler; - mResampler = NULL; - mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common); mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common); mChannelCount = popcount(mChannelMask); mFormat = mInput->stream->common.get_format(&mInput->stream->common); if (mFormat != AUDIO_FORMAT_PCM_16_BIT) { - ALOGE("HAL format %d not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat); + ALOGE("HAL format %#x not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat); } mFrameSize = audio_stream_frame_size(&mInput->stream->common); mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common); mFrameCount = mBufferSize / mFrameSize; - mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount]; - - if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2) - { - int channelCount; - // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid - // stereo to mono post process as the resampler always outputs stereo. - if (mChannelCount == 1 && mReqChannelCount == 2) { - channelCount = 1; - } else { - channelCount = 2; - } - mResampler = AudioResampler::create(16, channelCount, mReqSampleRate); - mResampler->setSampleRate(mSampleRate); - mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); - mRsmpOutBuffer = new int32_t[mFrameCount * FCC_2]; - - // optmization: if mono to mono, alter input frame count as if we were inputing - // stereo samples - if (mChannelCount == 1 && mReqChannelCount == 1) { - mFrameCount >>= 1; - } + // This is the formula for calculating the temporary buffer size. + // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to + // 1 full output buffer, regardless of the alignment of the available input. + // The value is somewhat arbitrary, and could probably be even larger. + // A larger value should allow more old data to be read after a track calls start(), + // without increasing latency. + mRsmpInFrames = mFrameCount * 7; + mRsmpInFramesP2 = roundup(mRsmpInFrames); + delete[] mRsmpInBuffer; + // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer + mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount]; - } - mRsmpInIndex = mFrameCount; + // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints. + // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks? } -unsigned int AudioFlinger::RecordThread::getInputFramesLost() +uint32_t AudioFlinger::RecordThread::getInputFramesLost() { Mutex::Autolock _l(mLock); if (initCheck() != NO_ERROR) { diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h index a2fb874..5617c0c 100644 --- a/services/audioflinger/Threads.h +++ b/services/audioflinger/Threads.h @@ -36,6 +36,8 @@ public: audio_devices_t outDevice, audio_devices_t inDevice, type_t type); virtual ~ThreadBase(); + virtual status_t readyToRun(); + void dumpBase(int fd, const Vector<String16>& args); void dumpEffectChains(int fd, const Vector<String16>& args); @@ -63,7 +65,7 @@ public: class IoConfigEvent : public ConfigEvent { public: IoConfigEvent(int event, int param) : - ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(event) {} + ConfigEvent(CFG_EVENT_IO), mEvent(event), mParam(param) {} virtual ~IoConfigEvent() {} int event() const { return mEvent; } @@ -141,6 +143,7 @@ public: void sendIoConfigEvent_l(int event, int param = 0); void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio); void processConfigEvents(); + void processConfigEvents_l(); // see note at declaration of mStandby, mOutDevice and mInDevice bool standby() const { return mStandby; } @@ -156,7 +159,7 @@ public: int sessionId, effect_descriptor_t *desc, int *enabled, - status_t *status); + status_t *status /*non-NULL*/); void disconnectEffect(const sp< EffectModule>& effect, EffectHandle *handle, bool unpinIfLast); @@ -198,13 +201,13 @@ public: // effect void removeEffect_l(const sp< EffectModule>& effect); // detach all tracks connected to an auxiliary effect - virtual void detachAuxEffect_l(int effectId) {} + virtual void detachAuxEffect_l(int effectId __unused) {} // returns either EFFECT_SESSION if effects on this audio session exist in one // chain, or TRACK_SESSION if tracks on this audio session exist, or both virtual uint32_t hasAudioSession(int sessionId) const = 0; // the value returned by default implementation is not important as the // strategy is only meaningful for PlaybackThread which implements this method - virtual uint32_t getStrategyForSession_l(int sessionId) { return 0; } + virtual uint32_t getStrategyForSession_l(int sessionId __unused) { return 0; } // suspend or restore effect according to the type of effect passed. a NULL // type pointer means suspend all effects in the session @@ -267,14 +270,15 @@ protected: const sp<AudioFlinger> mAudioFlinger; - // updated by PlaybackThread::readOutputParameters() or - // RecordThread::readInputParameters() + // updated by PlaybackThread::readOutputParameters_l() or + // RecordThread::readInputParameters_l() uint32_t mSampleRate; size_t mFrameCount; // output HAL, direct output, record audio_channel_mask_t mChannelMask; uint32_t mChannelCount; size_t mFrameSize; audio_format_t mFormat; + size_t mBufferSize; // HAL buffer size for read() or write() // Parameter sequence by client: binder thread calling setParameters(): // 1. Lock mLock @@ -303,12 +307,12 @@ protected: Vector<ConfigEvent *> mConfigEvents; // These fields are written and read by thread itself without lock or barrier, - // and read by other threads without lock or barrier via standby() , outDevice() + // and read by other threads without lock or barrier via standby(), outDevice() // and inDevice(). // Because of the absence of a lock or barrier, any other thread that reads // these fields must use the information in isolation, or be prepared to deal // with possibility that it might be inconsistent with other information. - bool mStandby; // Whether thread is currently in standby. + bool mStandby; // Whether thread is currently in standby. audio_devices_t mOutDevice; // output device audio_devices_t mInDevice; // input device audio_source_t mAudioSource; // (see audio.h, audio_source_t) @@ -358,7 +362,6 @@ public: void dump(int fd, const Vector<String16>& args); // Thread virtuals - virtual status_t readyToRun(); virtual bool threadLoop(); // RefBase @@ -391,7 +394,7 @@ protected: virtual bool waitingAsyncCallback(); virtual bool waitingAsyncCallback_l(); virtual bool shouldStandby_l(); - + virtual void onAddNewTrack_l(); // ThreadBase virtuals virtual void preExit(); @@ -419,13 +422,13 @@ public: uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, const sp<IMemory>& sharedBuffer, int sessionId, IAudioFlinger::track_flags_t *flags, pid_t tid, int uid, - status_t *status); + status_t *status /*non-NULL*/); AudioStreamOut* getOutput() const; AudioStreamOut* clearOutput(); @@ -447,7 +450,11 @@ public: virtual String8 getParameters(const String8& keys); virtual void audioConfigChanged_l(int event, int param = 0); status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames); - int16_t *mixBuffer() const { return mMixBuffer; }; + // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency. + // Consider also removing and passing an explicit mMainBuffer initialization + // parameter to AF::PlaybackThread::Track::Track(). + int16_t *mixBuffer() const { + return reinterpret_cast<int16_t *>(mSinkBuffer); }; virtual void detachAuxEffect_l(int effectId); status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track, @@ -475,11 +482,68 @@ public: status_t getTimestamp_l(AudioTimestamp& timestamp); protected: - // updated by readOutputParameters() + // updated by readOutputParameters_l() size_t mNormalFrameCount; // normal mixer and effects - int16_t* mMixBuffer; // frame size aligned mix buffer - int8_t* mAllocMixBuffer; // mixer buffer allocation address + void* mSinkBuffer; // frame size aligned sink buffer + + // TODO: + // Rearrange the buffer info into a struct/class with + // clear, copy, construction, destruction methods. + // + // mSinkBuffer also has associated with it: + // + // mSinkBufferSize: Sink Buffer Size + // mFormat: Sink Buffer Format + + // Mixer Buffer (mMixerBuffer*) + // + // In the case of floating point or multichannel data, which is not in the + // sink format, it is required to accumulate in a higher precision or greater channel count + // buffer before downmixing or data conversion to the sink buffer. + + // Set to "true" to enable the Mixer Buffer otherwise mixer output goes to sink buffer. + bool mMixerBufferEnabled; + + // Storage, 32 byte aligned (may make this alignment a requirement later). + // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames. + void* mMixerBuffer; + + // Size of mMixerBuffer in bytes: mNormalFrameCount * #channels * sampsize. + size_t mMixerBufferSize; + + // The audio format of mMixerBuffer. Set to AUDIO_FORMAT_PCM_(FLOAT|16_BIT) only. + audio_format_t mMixerBufferFormat; + + // An internal flag set to true by MixerThread::prepareTracks_l() + // when mMixerBuffer contains valid data after mixing. + bool mMixerBufferValid; + + // Effects Buffer (mEffectsBuffer*) + // + // In the case of effects data, which is not in the sink format, + // it is required to accumulate in a different buffer before data conversion + // to the sink buffer. + + // Set to "true" to enable the Effects Buffer otherwise effects output goes to sink buffer. + bool mEffectBufferEnabled; + + // Storage, 32 byte aligned (may make this alignment a requirement later). + // Due to constraints on mNormalFrameCount, the buffer size is a multiple of 16 frames. + void* mEffectBuffer; + + // Size of mEffectsBuffer in bytes: mNormalFrameCount * #channels * sampsize. + size_t mEffectBufferSize; + + // The audio format of mEffectsBuffer. Set to AUDIO_FORMAT_PCM_16_BIT only. + audio_format_t mEffectBufferFormat; + + // An internal flag set to true by MixerThread::prepareTracks_l() + // when mEffectsBuffer contains valid data after mixing. + // + // When this is set, all mixer data is routed into the effects buffer + // for any processing (including output processing). + bool mEffectBufferValid; // suspend count, > 0 means suspended. While suspended, the thread continues to pull from // tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle @@ -539,7 +603,7 @@ private: void removeTrack_l(const sp<Track>& track); void broadcast_l(); - void readOutputParameters(); + void readOutputParameters_l(); virtual void dumpInternals(int fd, const Vector<String16>& args); void dumpTracks(int fd, const Vector<String16>& args); @@ -558,7 +622,7 @@ private: // FIXME rename these former local variables of threadLoop to standard "m" names nsecs_t standbyTime; - size_t mixBufferSize; + size_t mSinkBufferSize; // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l() uint32_t activeSleepTime; @@ -623,13 +687,12 @@ private: sp<NBLog::Writer> mFastMixerNBLogWriter; public: virtual bool hasFastMixer() const = 0; - virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const + virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex __unused) const { FastTrackUnderruns dummy; return dummy; } protected: // accessed by both binder threads and within threadLoop(), lock on mutex needed unsigned mFastTrackAvailMask; // bit i set if fast track [i] is available - virtual void flushOutput_l(); private: // timestamp latch: @@ -748,11 +811,11 @@ protected: // threadLoop snippets virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove); virtual void threadLoop_exit(); - virtual void flushOutput_l(); virtual bool waitingAsyncCallback(); virtual bool waitingAsyncCallback_l(); virtual bool shouldStandby_l(); + virtual void onAddNewTrack_l(); private: void flushHw_l(); @@ -838,17 +901,28 @@ public: // record thread -class RecordThread : public ThreadBase, public AudioBufferProvider - // derives from AudioBufferProvider interface for use by resampler +class RecordThread : public ThreadBase { public: + class RecordTrack; + class ResamplerBufferProvider : public AudioBufferProvider + // derives from AudioBufferProvider interface for use by resampler + { + public: + ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { } + virtual ~ResamplerBufferProvider() { } + // AudioBufferProvider interface + virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); + virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); + private: + RecordTrack * const mRecordTrack; + }; + #include "RecordTracks.h" RecordThread(const sp<AudioFlinger>& audioFlinger, AudioStreamIn *input, - uint32_t sampleRate, - audio_channel_mask_t channelMask, audio_io_handle_t id, audio_devices_t outDevice, audio_devices_t inDevice @@ -867,23 +941,23 @@ public: // Thread virtuals virtual bool threadLoop(); - virtual status_t readyToRun(); // RefBase virtual void onFirstRef(); virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; } + sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l( const sp<AudioFlinger::Client>& client, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, - size_t frameCount, + size_t *pFrameCount, int sessionId, int uid, IAudioFlinger::track_flags_t *flags, pid_t tid, - status_t *status); + status_t *status /*non-NULL*/); status_t start(RecordTrack* recordTrack, AudioSystem::sync_event_t event, @@ -897,15 +971,12 @@ public: AudioStreamIn* clearInput(); virtual audio_stream_t* stream() const; - // AudioBufferProvider interface - virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts); - virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer); virtual bool checkForNewParameters_l(); virtual String8 getParameters(const String8& keys); virtual void audioConfigChanged_l(int event, int param = 0); - void readInputParameters(); - virtual unsigned int getInputFramesLost(); + void readInputParameters_l(); + virtual uint32_t getInputFramesLost(); virtual status_t addEffectChain_l(const sp<EffectChain>& chain); virtual size_t removeEffectChain_l(const sp<EffectChain>& chain); @@ -920,44 +991,33 @@ public: virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const; static void syncStartEventCallback(const wp<SyncEvent>& event); - void handleSyncStartEvent(const sp<SyncEvent>& event); virtual size_t frameCount() const { return mFrameCount; } - bool hasFastRecorder() const { return false; } + bool hasFastCapture() const { return false; } private: - void clearSyncStartEvent(); - // Enter standby if not already in standby, and set mStandby flag - void standby(); + void standbyIfNotAlreadyInStandby(); // Call the HAL standby method unconditionally, and don't change mStandby flag - void inputStandBy(); + void inputStandBy(); AudioStreamIn *mInput; SortedVector < sp<RecordTrack> > mTracks; - // mActiveTrack has dual roles: it indicates the current active track, and + // mActiveTracks has dual roles: it indicates the current active track(s), and // is used together with mStartStopCond to indicate start()/stop() progress - sp<RecordTrack> mActiveTrack; + SortedVector< sp<RecordTrack> > mActiveTracks; + // generation counter for mActiveTracks + int mActiveTracksGen; Condition mStartStopCond; - // updated by RecordThread::readInputParameters() - AudioResampler *mResampler; - // interleaved stereo pairs of fixed-point signed Q19.12 - int32_t *mRsmpOutBuffer; - int16_t *mRsmpInBuffer; // [mFrameCount * mChannelCount] - size_t mRsmpInIndex; - size_t mBufferSize; // stream buffer size for read() - const uint32_t mReqChannelCount; - const uint32_t mReqSampleRate; - ssize_t mBytesRead; - // sync event triggering actual audio capture. Frames read before this event will - // be dropped and therefore not read by the application. - sp<SyncEvent> mSyncStartEvent; - // number of captured frames to drop after the start sync event has been received. - // when < 0, maximum frames to drop before starting capture even if sync event is - // not received - ssize_t mFramestoDrop; + // resampler converts input at HAL Hz to output at AudioRecord client Hz + int16_t *mRsmpInBuffer; // see new[] for details on the size + size_t mRsmpInFrames; // size of resampler input in frames + size_t mRsmpInFramesP2;// size rounded up to a power-of-2 + + // rolling index that is never cleared + int32_t mRsmpInRear; // last filled frame + 1 // For dumpsys const sp<NBAIO_Sink> mTeeSink; diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h index cd201d9..58705c4 100644 --- a/services/audioflinger/TrackBase.h +++ b/services/audioflinger/TrackBase.h @@ -34,7 +34,9 @@ public: RESUMING, ACTIVE, PAUSING, - PAUSED + PAUSED, + STARTING_1, // for RecordTrack only + STARTING_2, // for RecordTrack only }; TrackBase(ThreadBase *thread, @@ -48,6 +50,7 @@ public: int uid, bool isOut); virtual ~TrackBase(); + virtual status_t initCheck() const { return getCblk() != 0 ? NO_ERROR : NO_MEMORY; } virtual status_t start(AudioSystem::sync_event_t event, int triggerSession) = 0; @@ -78,15 +81,6 @@ protected: virtual uint32_t sampleRate() const { return mSampleRate; } - // Return a pointer to the start of a contiguous slice of the track buffer. - // Parameter 'offset' is the requested start position, expressed in - // monotonically increasing frame units relative to the track epoch. - // Parameter 'frames' is the requested length, also in frame units. - // Always returns non-NULL. It is the caller's responsibility to - // verify that this will be successful; the result of calling this - // function with invalid 'offset' or 'frames' is undefined. - void* getBuffer(uint32_t offset, uint32_t frames) const; - bool isStopped() const { return (mState == STOPPED || mState == FLUSHED); } diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp index d07113c..1064fd1 100644 --- a/services/audioflinger/Tracks.cpp +++ b/services/audioflinger/Tracks.cpp @@ -116,12 +116,11 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( if (client != 0) { mCblkMemory = client->heap()->allocate(size); - if (mCblkMemory != 0) { - mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); - // can't assume mCblk != NULL - } else { + if (mCblkMemory == 0 || + (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) { ALOGE("not enough memory for AudioTrack size=%u", size); client->heap()->dump("AudioTrack"); + mCblkMemory.clear(); return; } } else { @@ -134,7 +133,6 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( if (mCblk != NULL) { new(mCblk) audio_track_cblk_t(); // clear all buffers - mCblk->frameCount_ = frameCount; if (sharedBuffer == 0) { mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); memset(mBuffer, 0, bufferSize); @@ -148,7 +146,7 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase( #ifdef TEE_SINK if (mTeeSinkTrackEnabled) { NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount); - if (pipeFormat != Format_Invalid) { + if (Format_isValid(pipeFormat)) { Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); size_t numCounterOffers = 0; const NBAIO_Format offers[1] = {pipeFormat}; @@ -275,6 +273,11 @@ status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, if (!mTrack->isTimedTrack()) return INVALID_OPERATION; + if (buffer == 0 || buffer->pointer() == NULL) { + ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()"); + return BAD_VALUE; + } + PlaybackThread::TimedTrack* tt = reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); return tt->queueTimedBuffer(buffer, pts); @@ -344,41 +347,42 @@ AudioFlinger::PlaybackThread::Track::Track( mCachedVolume(1.0), mIsInvalid(false), mAudioTrackServerProxy(NULL), - mResumeToStopping(false) + mResumeToStopping(false), + mFlushHwPending(false) { - if (mCblk != NULL) { - if (sharedBuffer == 0) { - mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - } else { - mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - } - mServerProxy = mAudioTrackServerProxy; - // to avoid leaking a track name, do not allocate one unless there is an mCblk - mName = thread->getTrackName_l(channelMask, sessionId); - if (mName < 0) { - ALOGE("no more track names available"); - return; - } - // only allocate a fast track index if we were able to allocate a normal track name - if (flags & IAudioFlinger::TRACK_FAST) { - mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); - ALOG_ASSERT(thread->mFastTrackAvailMask != 0); - int i = __builtin_ctz(thread->mFastTrackAvailMask); - ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); - // FIXME This is too eager. We allocate a fast track index before the - // fast track becomes active. Since fast tracks are a scarce resource, - // this means we are potentially denying other more important fast tracks from - // being created. It would be better to allocate the index dynamically. - mFastIndex = i; - // Read the initial underruns because this field is never cleared by the fast mixer - mObservedUnderruns = thread->getFastTrackUnderruns(i); - thread->mFastTrackAvailMask &= ~(1 << i); - } + if (mCblk == NULL) { + return; + } + + if (sharedBuffer == 0) { + mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, + mFrameSize); + } else { + mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, + mFrameSize); + } + mServerProxy = mAudioTrackServerProxy; + + mName = thread->getTrackName_l(channelMask, sessionId); + if (mName < 0) { + ALOGE("no more track names available"); + return; + } + // only allocate a fast track index if we were able to allocate a normal track name + if (flags & IAudioFlinger::TRACK_FAST) { + mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); + ALOG_ASSERT(thread->mFastTrackAvailMask != 0); + int i = __builtin_ctz(thread->mFastTrackAvailMask); + ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); + // FIXME This is too eager. We allocate a fast track index before the + // fast track becomes active. Since fast tracks are a scarce resource, + // this means we are potentially denying other more important fast tracks from + // being created. It would be better to allocate the index dynamically. + mFastIndex = i; + // Read the initial underruns because this field is never cleared by the fast mixer + mObservedUnderruns = thread->getFastTrackUnderruns(i); + thread->mFastTrackAvailMask &= ~(1 << i); } - ALOGV("Track constructor name %d, calling pid %d", mName, - IPCThreadState::self()->getCallingPid()); } AudioFlinger::PlaybackThread::Track::~Track() @@ -396,6 +400,15 @@ AudioFlinger::PlaybackThread::Track::~Track() } } +status_t AudioFlinger::PlaybackThread::Track::initCheck() const +{ + status_t status = TrackBase::initCheck(); + if (status == NO_ERROR && mName < 0) { + status = NO_MEMORY; + } + return status; +} + void AudioFlinger::PlaybackThread::Track::destroy() { // NOTE: destroyTrack_l() can remove a strong reference to this Track @@ -422,17 +435,19 @@ void AudioFlinger::PlaybackThread::Track::destroy() /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) { - result.append(" Name Client Type Fmt Chn mask Session fCount S F SRate " + result.append(" Name Active Client Type Fmt Chn mask Session fCount S F SRate " "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n"); } -void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) +void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active) { uint32_t vlr = mAudioTrackServerProxy->getVolumeLR(); if (isFastTrack()) { - sprintf(buffer, " F %2d", mFastIndex); + sprintf(buffer, " F %2d", mFastIndex); + } else if (mName >= AudioMixer::TRACK0) { + sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); } else { - sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); + sprintf(buffer, " none"); } track_state state = mState; char stateChar; @@ -487,8 +502,9 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) nowInUnderrun = '?'; break; } - snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " + snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " "%08X %p %p 0x%03X %9u%c\n", + active ? "yes" : "no", (mClient == 0) ? getpid_cached : mClient->pid(), mStreamType, mFormat, @@ -514,7 +530,7 @@ uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const { // AudioBufferProvider interface status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( - AudioBufferProvider::Buffer* buffer, int64_t pts) + AudioBufferProvider::Buffer* buffer, int64_t pts __unused) { ServerProxy::Buffer buf; size_t desiredFrames = buffer->frameCount; @@ -551,7 +567,14 @@ size_t AudioFlinger::PlaybackThread::Track::framesReleased() const // Don't call for fast tracks; the framesReady() could result in priority inversion bool AudioFlinger::PlaybackThread::Track::isReady() const { - if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) { + if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { + return true; + } + + if (isStopping()) { + if (framesReady() > 0) { + mFillingUpStatus = FS_FILLED; + } return true; } @@ -564,8 +587,8 @@ bool AudioFlinger::PlaybackThread::Track::isReady() const { return false; } -status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event, - int triggerSession) +status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused, + int triggerSession __unused) { status_t status = NO_ERROR; ALOGV("start(%d), calling pid %d session %d", @@ -588,7 +611,10 @@ status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t ev // here the track could be either new, or restarted // in both cases "unstop" the track - if (state == PAUSED) { + // initial state-stopping. next state-pausing. + // What if resume is called ? + + if (state == PAUSED || state == PAUSING) { if (mResumeToStopping) { // happened we need to resume to STOPPING_1 mState = TrackBase::STOPPING_1; @@ -719,6 +745,7 @@ void AudioFlinger::PlaybackThread::Track::flush() mRetryCount = PlaybackThread::kMaxTrackRetriesOffload; } + mFlushHwPending = true; mResumeToStopping = false; } else { if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && @@ -739,11 +766,19 @@ void AudioFlinger::PlaybackThread::Track::flush() // Prevent flush being lost if the track is flushed and then resumed // before mixer thread can run. This is important when offloading // because the hardware buffer could hold a large amount of audio - playbackThread->flushOutput_l(); playbackThread->broadcast_l(); } } +// must be called with thread lock held +void AudioFlinger::PlaybackThread::Track::flushAck() +{ + if (!isOffloaded()) + return; + + mFlushHwPending = false; +} + void AudioFlinger::PlaybackThread::Track::reset() { // Do not reset twice to avoid discarding data written just after a flush and before @@ -966,6 +1001,33 @@ void AudioFlinger::PlaybackThread::Track::signal() } } +//To be called with thread lock held +bool AudioFlinger::PlaybackThread::Track::isResumePending() { + + if (mState == RESUMING) + return true; + /* Resume is pending if track was stopping before pause was called */ + if (mState == STOPPING_1 && + mResumeToStopping) + return true; + + return false; +} + +//To be called with thread lock held +void AudioFlinger::PlaybackThread::Track::resumeAck() { + + + if (mState == RESUMING) + mState = ACTIVE; + + // Other possibility of pending resume is stopping_1 state + // Do not update the state from stopping as this prevents + // drain being called. + if (mState == STOPPING_1) { + mResumeToStopping = false; + } +} // ---------------------------------------------------------------------------- sp<AudioFlinger::PlaybackThread::TimedTrack> @@ -979,7 +1041,8 @@ AudioFlinger::PlaybackThread::TimedTrack::create( size_t frameCount, const sp<IMemory>& sharedBuffer, int sessionId, - int uid) { + int uid) +{ if (!client->reserveTimedTrack()) return 0; @@ -1045,15 +1108,14 @@ status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, "AudioFlingerTimed"); - if (mTimedMemoryDealer == NULL) + if (mTimedMemoryDealer == NULL) { return NO_MEMORY; + } } sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) { - newBuffer = mTimedMemoryDealer->allocate(size); - if (newBuffer == NULL) - return NO_MEMORY; + if (newBuffer == 0 || newBuffer->pointer() == NULL) { + return NO_MEMORY; } *buffer = newBuffer; @@ -1152,7 +1214,7 @@ void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( const TimedBuffer& buf, - const char* logTag) { + const char* logTag __unused) { uint32_t bufBytes = buf.buffer()->size(); uint32_t consumedAlready = buf.position(); @@ -1463,7 +1525,7 @@ void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( mTrimQueueHeadOnRelease = false; } } else { - LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" + LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" " buffers in the timed buffer queue"); } @@ -1504,9 +1566,9 @@ AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( mOutBuffer.frameCount = 0; playbackThread->mTracks.add(this); ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " - "mCblk->frameCount_ %u, mChannelMask 0x%08x", + "frameCount %u, mChannelMask 0x%08x", mCblk, mBuffer, - mCblk->frameCount_, mChannelMask); + frameCount, mChannelMask); // since client and server are in the same process, // the buffer has the same virtual address on both sides mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); @@ -1748,7 +1810,7 @@ status_t AudioFlinger::RecordHandle::onTransact( // ---------------------------------------------------------------------------- -// RecordTrack constructor must be called with AudioFlinger::mLock held +// RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held AudioFlinger::RecordThread::RecordTrack::RecordTrack( RecordThread *thread, const sp<Client>& client, @@ -1760,24 +1822,40 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack( int uid) : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/), - mOverflow(false) + mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0), + // See real initialization of mRsmpInFront at RecordThread::start() + mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL) { - ALOGV("RecordTrack constructor"); - if (mCblk != NULL) { - mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, - mFrameSize); - mServerProxy = mAudioRecordServerProxy; + if (mCblk == NULL) { + return; + } + + mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize); + + uint32_t channelCount = popcount(channelMask); + // FIXME I don't understand either of the channel count checks + if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 && + channelCount <= FCC_2) { + // sink SR + mResampler = AudioResampler::create(16, thread->mChannelCount, sampleRate); + // source SR + mResampler->setSampleRate(thread->mSampleRate); + mResampler->setVolume(AudioMixer::UNITY_GAIN, AudioMixer::UNITY_GAIN); + mResamplerBufferProvider = new ResamplerBufferProvider(this); } } AudioFlinger::RecordThread::RecordTrack::~RecordTrack() { ALOGV("%s", __func__); + delete mResampler; + delete[] mRsmpOutBuffer; + delete mResamplerBufferProvider; } // AudioBufferProvider interface status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, - int64_t pts) + int64_t pts __unused) { ServerProxy::Buffer buf; buf.mFrameCount = buffer->frameCount; @@ -1845,19 +1923,45 @@ void AudioFlinger::RecordThread::RecordTrack::invalidate() /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) { - result.append("Client Fmt Chn mask Session S Server fCount\n"); + result.append(" Active Client Fmt Chn mask Session S Server fCount Resampling\n"); } -void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size) +void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active) { - snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6zu\n", + snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %10d\n", + active ? "yes" : "no", (mClient == 0) ? getpid_cached : mClient->pid(), mFormat, mChannelMask, mSessionId, mState, mCblk->mServer, - mFrameCount); + mFrameCount, + mResampler != NULL); + +} + +void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event) +{ + if (event == mSyncStartEvent) { + ssize_t framesToDrop = 0; + sp<ThreadBase> threadBase = mThread.promote(); + if (threadBase != 0) { + // TODO: use actual buffer filling status instead of 2 buffers when info is available + // from audio HAL + framesToDrop = threadBase->mFrameCount * 2; + } + mFramesToDrop = framesToDrop; + } +} + +void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent() +{ + if (mSyncStartEvent != 0) { + mSyncStartEvent->cancel(); + mSyncStartEvent.clear(); + } + mFramesToDrop = 0; } }; // namespace android diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp index 7a314cf..e14b4ae 100644 --- a/services/audioflinger/test-resample.cpp +++ b/services/audioflinger/test-resample.cpp @@ -24,81 +24,112 @@ #include <sys/mman.h> #include <sys/stat.h> #include <errno.h> +#include <inttypes.h> #include <time.h> #include <math.h> +#include <audio_utils/primitives.h> +#include <audio_utils/sndfile.h> +#include <utils/Vector.h> using namespace android; -struct HeaderWav { - HeaderWav(size_t size, int nc, int sr, int bits) { - strncpy(RIFF, "RIFF", 4); - chunkSize = size + sizeof(HeaderWav); - strncpy(WAVE, "WAVE", 4); - strncpy(fmt, "fmt ", 4); - fmtSize = 16; - audioFormat = 1; - numChannels = nc; - samplesRate = sr; - byteRate = sr * numChannels * (bits/8); - align = nc*(bits/8); - bitsPerSample = bits; - strncpy(data, "data", 4); - dataSize = size; - } - - char RIFF[4]; // RIFF - uint32_t chunkSize; // File size - char WAVE[4]; // WAVE - char fmt[4]; // fmt\0 - uint32_t fmtSize; // fmt size - uint16_t audioFormat; // 1=PCM - uint16_t numChannels; // num channels - uint32_t samplesRate; // sample rate in hz - uint32_t byteRate; // Bps - uint16_t align; // 2=16-bit mono, 4=16-bit stereo - uint16_t bitsPerSample; // bits per sample - char data[4]; // "data" - uint32_t dataSize; // size -}; +static bool gVerbose = false; static int usage(const char* name) { - fprintf(stderr,"Usage: %s [-p] [-h] [-s] [-q {dq|lq|mq|hq|vhq}] [-i input-sample-rate] " - "[-o output-sample-rate] [<input-file>] <output-file>\n", name); + fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]" + " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]" + " [-i input-sample-rate] [-o output-sample-rate]" + " [-O csv] [-P csv] [<input-file>]" + " <output-file>\n", name); fprintf(stderr," -p enable profiling\n"); - fprintf(stderr," -h create wav file\n"); - fprintf(stderr," -s stereo\n"); + fprintf(stderr," -f enable filter profiling\n"); + fprintf(stderr," -F enable floating point -q {dlq|dmq|dhq} only"); + fprintf(stderr," -v verbose : log buffer provider calls\n"); + fprintf(stderr," -c # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n"); fprintf(stderr," -q resampler quality\n"); fprintf(stderr," dq : default quality\n"); fprintf(stderr," lq : low quality\n"); fprintf(stderr," mq : medium quality\n"); fprintf(stderr," hq : high quality\n"); fprintf(stderr," vhq : very high quality\n"); - fprintf(stderr," -i input file sample rate\n"); + fprintf(stderr," dlq : dynamic low quality\n"); + fprintf(stderr," dmq : dynamic medium quality\n"); + fprintf(stderr," dhq : dynamic high quality\n"); + fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n"); fprintf(stderr," -o output file sample rate\n"); + fprintf(stderr," -O # frames output per call to resample() in CSV format\n"); + fprintf(stderr," -P # frames provided per call to resample() in CSV format\n"); return -1; } -int main(int argc, char* argv[]) { +// Convert a list of integers in CSV format to a Vector of those values. +// Returns the number of elements in the list, or -1 on error. +int parseCSV(const char *string, Vector<int>& values) +{ + // pass 1: count the number of values and do syntax check + size_t numValues = 0; + bool hadDigit = false; + for (const char *p = string; ; ) { + switch (*p++) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + hadDigit = true; + break; + case '\0': + if (hadDigit) { + // pass 2: allocate and initialize vector of values + values.resize(++numValues); + values.editItemAt(0) = atoi(p = optarg); + for (size_t i = 1; i < numValues; ) { + if (*p++ == ',') { + values.editItemAt(i++) = atoi(p); + } + } + return numValues; + } + // fall through + case ',': + if (hadDigit) { + hadDigit = false; + numValues++; + break; + } + // fall through + default: + return -1; + } + } +} +int main(int argc, char* argv[]) { const char* const progname = argv[0]; - bool profiling = false; - bool writeHeader = false; + bool profileResample = false; + bool profileFilter = false; + bool useFloat = false; int channels = 1; int input_freq = 0; int output_freq = 0; AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY; + Vector<int> Ovalues; + Vector<int> Pvalues; int ch; - while ((ch = getopt(argc, argv, "phsq:i:o:")) != -1) { + while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) { switch (ch) { case 'p': - profiling = true; + profileResample = true; + break; + case 'f': + profileFilter = true; break; - case 'h': - writeHeader = true; + case 'F': + useFloat = true; break; - case 's': - channels = 2; + case 'v': + gVerbose = true; + break; + case 'c': + channels = atoi(optarg); break; case 'q': if (!strcmp(optarg, "dq")) @@ -111,6 +142,12 @@ int main(int argc, char* argv[]) { quality = AudioResampler::HIGH_QUALITY; else if (!strcmp(optarg, "vhq")) quality = AudioResampler::VERY_HIGH_QUALITY; + else if (!strcmp(optarg, "dlq")) + quality = AudioResampler::DYN_LOW_QUALITY; + else if (!strcmp(optarg, "dmq")) + quality = AudioResampler::DYN_MED_QUALITY; + else if (!strcmp(optarg, "dhq")) + quality = AudioResampler::DYN_HIGH_QUALITY; else { usage(progname); return -1; @@ -122,12 +159,35 @@ int main(int argc, char* argv[]) { case 'o': output_freq = atoi(optarg); break; + case 'O': + if (parseCSV(optarg, Ovalues) < 0) { + fprintf(stderr, "incorrect syntax for -O option\n"); + return -1; + } + break; + case 'P': + if (parseCSV(optarg, Pvalues) < 0) { + fprintf(stderr, "incorrect syntax for -P option\n"); + return -1; + } + break; case '?': default: usage(progname); return -1; } } + + if (channels < 1 + || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) { + fprintf(stderr, "invalid number of audio channels %d\n", channels); + return -1; + } + if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) { + fprintf(stderr, "float processing is only possible for dynamic resamplers\n"); + return -1; + } + argc -= optind; argv += optind; @@ -148,25 +208,22 @@ int main(int argc, char* argv[]) { size_t input_size; void* input_vaddr; if (argc == 2) { - struct stat st; - if (stat(file_in, &st) < 0) { - fprintf(stderr, "stat: %s\n", strerror(errno)); - return -1; - } - - int input_fd = open(file_in, O_RDONLY); - if (input_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; - } - - input_size = st.st_size; - input_vaddr = mmap(0, input_size, PROT_READ, MAP_PRIVATE, input_fd, 0); - if (input_vaddr == MAP_FAILED ) { - fprintf(stderr, "mmap: %s\n", strerror(errno)); - return -1; + SF_INFO info; + info.format = 0; + SNDFILE *sf = sf_open(file_in, SFM_READ, &info); + if (sf == NULL) { + perror(file_in); + return EXIT_FAILURE; } + input_size = info.frames * info.channels * sizeof(short); + input_vaddr = malloc(input_size); + (void) sf_readf_short(sf, (short *) input_vaddr, info.frames); + sf_close(sf); + channels = info.channels; + input_freq = info.samplerate; } else { + // data for testing is exactly (input sampling rate/1000)/2 seconds + // so 44.1khz input is 22.05 seconds double k = 1000; // Hz / s double time = (input_freq / 2) / k; size_t input_frames = size_t(input_freq * time); @@ -177,98 +234,287 @@ int main(int argc, char* argv[]) { double t = double(i) / input_freq; double y = sin(M_PI * k * t * t); int16_t yi = floor(y * 32767.0 + 0.5); - for (size_t j=0 ; j<(size_t)channels ; j++) { - in[i*channels + j] = yi / (1+j); + for (int j = 0; j < channels; j++) { + in[i*channels + j] = yi / (1 + j); } } } + size_t input_framesize = channels * sizeof(int16_t); + size_t input_frames = input_size / input_framesize; + + // For float processing, convert input int16_t to float array + if (useFloat) { + void *new_vaddr; + + input_framesize = channels * sizeof(float); + input_size = input_frames * input_framesize; + new_vaddr = malloc(input_size); + memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr), + reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels); + free(input_vaddr); + input_vaddr = new_vaddr; + } // ---------------------------------------------------------- class Provider: public AudioBufferProvider { - int16_t* mAddr; - size_t mNumFrames; + const void* mAddr; // base address + const size_t mNumFrames; // total frames + const size_t mFrameSize; // size of each frame in bytes + size_t mNextFrame; // index of next frame to provide + size_t mUnrel; // number of frames not yet released + const Vector<int> mPvalues; // number of frames provided per call + size_t mNextPidx; // index of next entry in mPvalues to use public: - Provider(const void* addr, size_t size, int channels) { - mAddr = (int16_t*) addr; - mNumFrames = size / (channels*sizeof(int16_t)); + Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues) + : mAddr(addr), + mNumFrames(frames), + mFrameSize(frameSize), + mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) { } virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) { - buffer->frameCount = mNumFrames; - buffer->i16 = mAddr; - return NO_ERROR; + (void)pts; // suppress warning + size_t requestedFrames = buffer->frameCount; + if (requestedFrames > mNumFrames - mNextFrame) { + buffer->frameCount = mNumFrames - mNextFrame; + } + if (!mPvalues.isEmpty()) { + size_t provided = mPvalues[mNextPidx++]; + printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount); + if (provided < buffer->frameCount) { + buffer->frameCount = provided; + } + if (mNextPidx >= mPvalues.size()) { + mNextPidx = 0; + } + } + if (gVerbose) { + printf("getNextBuffer() requested %zu frames out of %zu frames available," + " and returned %zu frames\n", + requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount); + } + mUnrel = buffer->frameCount; + if (buffer->frameCount > 0) { + buffer->raw = (char *)mAddr + mFrameSize * mNextFrame; + return NO_ERROR; + } else { + buffer->raw = NULL; + return NOT_ENOUGH_DATA; + } } virtual void releaseBuffer(Buffer* buffer) { + if (buffer->frameCount > mUnrel) { + fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available " + "to release\n", buffer->frameCount, mUnrel); + mNextFrame += mUnrel; + mUnrel = 0; + } else { + if (gVerbose) { + printf("releaseBuffer() released %zu frames out of %zu frames available " + "to release\n", buffer->frameCount, mUnrel); + } + mNextFrame += buffer->frameCount; + mUnrel -= buffer->frameCount; + } + buffer->frameCount = 0; + buffer->raw = NULL; } - } provider(input_vaddr, input_size, channels); - - size_t input_frames = input_size / (channels * sizeof(int16_t)); - size_t output_size = 2 * 4 * ((int64_t) input_frames * output_freq) / input_freq; - output_size &= ~7; // always stereo, 32-bits - - void* output_vaddr = malloc(output_size); + void reset() { + mNextFrame = 0; + } + } provider(input_vaddr, input_frames, input_framesize, Pvalues); - if (profiling) { - AudioResampler* resampler = AudioResampler::create(16, channels, - output_freq, quality); + if (gVerbose) { + printf("%zu input frames\n", input_frames); + } - size_t out_frames = output_size/8; - resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); + int bit_depth = useFloat ? 32 : 16; + int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples + size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t)); + size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq; + size_t output_size = output_frames * output_framesize; - memset(output_vaddr, 0, output_size); + if (profileFilter) { + // Check how fast sample rate changes are that require filter changes. + // The delta sample rate changes must indicate a downsampling ratio, + // and must be larger than 10% changes. + // + // On fast devices, filters should be generated between 0.1ms - 1ms. + // (single threaded). + AudioResampler* resampler = AudioResampler::create(bit_depth, channels, + 8000, quality); + int looplimit = 100; timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); - resampler->resample((int*) output_vaddr, out_frames, &provider); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(9000); + resampler->setSampleRate(12000); + resampler->setSampleRate(20000); + resampler->setSampleRate(30000); + } clock_gettime(CLOCK_MONOTONIC, &end); int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; - int64_t time = (end_ns - start_ns)/4; - printf("%f Mspl/s\n", out_frames/(time/1e9)/1e6); + int64_t time = end_ns - start_ns; + printf("%.2f sample rate changes with filter calculation/sec\n", + looplimit * 4 / (time / 1e9)); + // Check how fast sample rate changes are without filter changes. + // This should be very fast, probably 0.1us - 1us per sample rate + // change. + resampler->setSampleRate(1000); + looplimit = 1000; + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->setSampleRate(1000+i); + } + clock_gettime(CLOCK_MONOTONIC, &end); + start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + time = end_ns - start_ns; + printf("%.2f sample rate changes without filter calculation/sec\n", + looplimit / (time / 1e9)); + resampler->reset(); delete resampler; } - AudioResampler* resampler = AudioResampler::create(16, channels, + void* output_vaddr = malloc(output_size); + AudioResampler* resampler = AudioResampler::create(bit_depth, channels, output_freq, quality); - size_t out_frames = output_size/8; + + + /* set volume precision to 12 bits, so the volume scale is 1<<12. + * The output int32_t is represented as Q4.27, with 4 bits of guard + * followed by the int16_t Q.15 portion, and then 12 trailing bits of + * additional precision. + * + * Generally 0 < volumePrecision <= 14 (due to the limits of + * int16_t values for Volume). volumePrecision cannot be 0 due + * to rounding and shifts. + */ + const int volumePrecision = 12; // in bits + resampler->setSampleRate(input_freq); - resampler->setVolume(0x1000, 0x1000); + resampler->setVolume(1 << volumePrecision, 1 << volumePrecision); + + if (profileResample) { + /* + * For profiling on mobile devices, upon experimentation + * it is better to run a few trials with a shorter loop limit, + * and take the minimum time. + * + * Long tests can cause CPU temperature to build up and thermal throttling + * to reduce CPU frequency. + * + * For frequency checks (index=0, or 1, etc.): + * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq" + * + * For temperature checks (index=0, or 1, etc.): + * "cat /sys/class/thermal/thermal_zone${index}/temp" + * + * Another way to avoid thermal throttling is to fix the CPU frequency + * at a lower level which prevents excessive temperatures. + */ + const int trials = 4; + const int looplimit = 4; + timespec start, end; + int64_t time = 0; + + for (int n = 0; n < trials; ++n) { + clock_gettime(CLOCK_MONOTONIC, &start); + for (int i = 0; i < looplimit; ++i) { + resampler->resample((int*) output_vaddr, output_frames, &provider); + provider.reset(); // during benchmarking reset only the provider + } + clock_gettime(CLOCK_MONOTONIC, &end); + int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec; + int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec; + int64_t diff_ns = end_ns - start_ns; + if (n == 0 || diff_ns < time) { + time = diff_ns; // save the best out of our trials. + } + } + // Mfrms/s is "Millions of output frames per second". + printf("quality: %d channels: %d msec: %" PRId64 " Mfrms/s: %.2lf\n", + quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6); + resampler->reset(); + } memset(output_vaddr, 0, output_size); - resampler->resample((int*) output_vaddr, out_frames, &provider); + if (gVerbose) { + printf("resample() %zu output frames\n", output_frames); + } + if (Ovalues.isEmpty()) { + Ovalues.push(output_frames); + } + for (size_t i = 0, j = 0; i < output_frames; ) { + size_t thisFrames = Ovalues[j++]; + if (j >= Ovalues.size()) { + j = 0; + } + if (thisFrames == 0 || thisFrames > output_frames - i) { + thisFrames = output_frames - i; + } + resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider); + i += thisFrames; + } + if (gVerbose) { + printf("resample() complete\n"); + } + resampler->reset(); + if (gVerbose) { + printf("reset() complete\n"); + } + delete resampler; + resampler = NULL; - // down-mix (we just truncate and keep the left channel) + // For float processing, convert output format from float to Q4.27, + // which is then converted to int16_t for final storage. + if (useFloat) { + memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr), + reinterpret_cast<float*>(output_vaddr), output_frames * output_channels); + } + + // mono takes left channel only (out of stereo output pair) + // stereo and multichannel preserve all channels. int32_t* out = (int32_t*) output_vaddr; - int16_t* convert = (int16_t*) malloc(out_frames * channels * sizeof(int16_t)); - for (size_t i = 0; i < out_frames; i++) { - for (int j=0 ; j<channels ; j++) { - int32_t s = out[i * 2 + j] >> 12; - if (s > 32767) s = 32767; - else if (s < -32768) s = -32768; + int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t)); + + // round to half towards zero and saturate at int16 (non-dithered) + const int roundVal = (1<<(volumePrecision-1)) - 1; // volumePrecision > 0 + + for (size_t i = 0; i < output_frames; i++) { + for (int j = 0; j < channels; j++) { + int32_t s = out[i * output_channels + j] + roundVal; // add offset here + if (s < 0) { + s = (s + 1) >> volumePrecision; // round to 0 + if (s < -32768) { + s = -32768; + } + } else { + s = s >> volumePrecision; + if (s > 32767) { + s = 32767; + } + } convert[i * channels + j] = int16_t(s); } } // write output to disk - int output_fd = open(file_out, O_WRONLY | O_CREAT | O_TRUNC, - S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - if (output_fd < 0) { - fprintf(stderr, "open: %s\n", strerror(errno)); - return -1; - } - - if (writeHeader) { - HeaderWav wav(out_frames * channels * sizeof(int16_t), channels, output_freq, 16); - write(output_fd, &wav, sizeof(wav)); + SF_INFO info; + info.frames = 0; + info.samplerate = output_freq; + info.channels = channels; + info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16; + SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info); + if (sf == NULL) { + perror(file_out); + return EXIT_FAILURE; } + (void) sf_writef_short(sf, convert, output_frames); + sf_close(sf); - write(output_fd, convert, out_frames * channels * sizeof(int16_t)); - close(output_fd); - - return 0; + return EXIT_SUCCESS; } diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk new file mode 100644 index 0000000..f270bfc --- /dev/null +++ b/services/audiopolicy/Android.mk @@ -0,0 +1,44 @@ +LOCAL_PATH:= $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + AudioPolicyService.cpp + +USE_LEGACY_AUDIO_POLICY = 1 +ifeq ($(USE_LEGACY_AUDIO_POLICY), 1) +LOCAL_SRC_FILES += \ + AudioPolicyInterfaceImplLegacy.cpp \ + AudioPolicyClientImplLegacy.cpp + + LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY +else +LOCAL_SRC_FILES += \ + AudioPolicyInterfaceImpl.cpp \ + AudioPolicyClientImpl.cpp \ + AudioPolicyManager.cpp +endif + +LOCAL_C_INCLUDES := \ + $(TOPDIR)frameworks/av/services/audioflinger \ + $(call include-path-for, audio-effects) \ + $(call include-path-for, audio-utils) + +LOCAL_SHARED_LIBRARIES := \ + libcutils \ + libutils \ + liblog \ + libbinder \ + libmedia \ + libhardware \ + libhardware_legacy + +LOCAL_STATIC_LIBRARIES := \ + libmedia_helper \ + libserviceutility + +LOCAL_MODULE:= libaudiopolicy + +LOCAL_CFLAGS += -fvisibility=hidden + +include $(BUILD_SHARED_LIBRARY) diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp new file mode 100644 index 0000000..44c47c3 --- /dev/null +++ b/services/audiopolicy/AudioPolicyClientImpl.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyClientImpl" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include "AudioPolicyService.h" + +namespace android { + +/* implementation of the client interface from the policy manager */ + +audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->loadHwModule(name); +} + +audio_io_handle_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + uint32_t *pLatencyMs, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask, + pLatencyMs, flags, offloadInfo); +} + +audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput( + audio_io_handle_t output1, + audio_io_handle_t output2) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + return af->openDuplicateOutput(output1, output2); +} + +status_t AudioPolicyService::AudioPolicyClient::closeOutput(audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->closeOutput(output); +} + +status_t AudioPolicyService::AudioPolicyClient::suspendOutput(audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return PERMISSION_DENIED; + } + + return af->suspendOutput(output); +} + +status_t AudioPolicyService::AudioPolicyClient::restoreOutput(audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return PERMISSION_DENIED; + } + + return af->restoreOutput(output); +} + +audio_io_handle_t AudioPolicyService::AudioPolicyClient::openInput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask); +} + +status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->closeInput(input); +} + +status_t AudioPolicyService::AudioPolicyClient::setStreamVolume(audio_stream_type_t stream, + float volume, audio_io_handle_t output, + int delay_ms) +{ + return mAudioPolicyService->setStreamVolume(stream, volume, output, + delay_ms); +} + +status_t AudioPolicyService::AudioPolicyClient::invalidateStream(audio_stream_type_t stream) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->invalidateStream(stream); +} + +void AudioPolicyService::AudioPolicyClient::setParameters(audio_io_handle_t io_handle, + const String8& keyValuePairs, + int delay_ms) +{ + mAudioPolicyService->setParameters(io_handle, keyValuePairs.string(), delay_ms); +} + +String8 AudioPolicyService::AudioPolicyClient::getParameters(audio_io_handle_t io_handle, + const String8& keys) +{ + String8 result = AudioSystem::getParameters(io_handle, keys); + return result; +} + +status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone, + audio_stream_type_t stream) +{ + return mAudioPolicyService->startTone(tone, stream); +} + +status_t AudioPolicyService::AudioPolicyClient::stopTone() +{ + return mAudioPolicyService->stopTone(); +} + +status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms) +{ + return mAudioPolicyService->setVoiceVolume(volume, delay_ms); +} + +status_t AudioPolicyService::AudioPolicyClient::moveEffects(int session, + audio_io_handle_t src_output, + audio_io_handle_t dst_output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->moveEffects(session, src_output, dst_output); +} + + + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp new file mode 100644 index 0000000..53f3e2d --- /dev/null +++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyService" +//#define LOG_NDEBUG 0 + +#include "Configuration.h" +#undef __STRICT_ANSI__ +#define __STDINT_LIMITS +#define __STDC_LIMIT_MACROS +#include <stdint.h> + +#include <sys/time.h> +#include <binder/IServiceManager.h> +#include <utils/Log.h> +#include <cutils/properties.h> +#include <binder/IPCThreadState.h> +#include <utils/String16.h> +#include <utils/threads.h> +#include "AudioPolicyService.h" +#include "ServiceUtilities.h" +#include <hardware_legacy/power.h> +#include <media/AudioEffect.h> +#include <media/EffectsFactoryApi.h> +//#include <media/IAudioFlinger.h> + +#include <hardware/hardware.h> +#include <system/audio.h> +#include <system/audio_policy.h> +#include <hardware/audio_policy.h> +#include <audio_effects/audio_effects_conf.h> +#include <media/AudioParameter.h> + + +namespace android { + +/* implementation of the interface to the policy manager */ +extern "C" { + +audio_module_handle_t aps_load_hw_module(void *service __unused, + const char *name) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->loadHwModule(name); +} + +// deprecated: replaced by aps_open_output_on_module() +audio_io_handle_t aps_open_output(void *service __unused, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + uint32_t *pLatencyMs, + audio_output_flags_t flags) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask, + pLatencyMs, flags); +} + +audio_io_handle_t aps_open_output_on_module(void *service __unused, + audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + uint32_t *pLatencyMs, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask, + pLatencyMs, flags, offloadInfo); +} + +audio_io_handle_t aps_open_dup_output(void *service __unused, + audio_io_handle_t output1, + audio_io_handle_t output2) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + return af->openDuplicateOutput(output1, output2); +} + +int aps_close_output(void *service __unused, audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->closeOutput(output); +} + +int aps_suspend_output(void *service __unused, audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return PERMISSION_DENIED; + } + + return af->suspendOutput(output); +} + +int aps_restore_output(void *service __unused, audio_io_handle_t output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return PERMISSION_DENIED; + } + + return af->restoreOutput(output); +} + +// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored +audio_io_handle_t aps_open_input(void *service __unused, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + audio_in_acoustics_t acoustics __unused) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask); +} + +audio_io_handle_t aps_open_input_on_module(void *service __unused, + audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + ALOGW("%s: could not get AudioFlinger", __func__); + return 0; + } + + return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask); +} + +int aps_close_input(void *service __unused, audio_io_handle_t input) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->closeInput(input); +} + +int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->invalidateStream(stream); +} + +int aps_move_effects(void *service __unused, int session, + audio_io_handle_t src_output, + audio_io_handle_t dst_output) +{ + sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + + return af->moveEffects(session, src_output, dst_output); +} + +char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle, + const char *keys) +{ + String8 result = AudioSystem::getParameters(io_handle, String8(keys)); + return strdup(result.string()); +} + +void aps_set_parameters(void *service, audio_io_handle_t io_handle, + const char *kv_pairs, int delay_ms) +{ + AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; + + audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms); +} + +int aps_set_stream_volume(void *service, audio_stream_type_t stream, + float volume, audio_io_handle_t output, + int delay_ms) +{ + AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; + + return audioPolicyService->setStreamVolume(stream, volume, output, + delay_ms); +} + +int aps_start_tone(void *service, audio_policy_tone_t tone, + audio_stream_type_t stream) +{ + AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; + + return audioPolicyService->startTone(tone, stream); +} + +int aps_stop_tone(void *service) +{ + AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; + + return audioPolicyService->stopTone(); +} + +int aps_set_voice_volume(void *service, float volume, int delay_ms) +{ + AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; + + return audioPolicyService->setVoiceVolume(volume, delay_ms); +} + +}; // extern "C" + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h new file mode 100644 index 0000000..66260e3 --- /dev/null +++ b/services/audiopolicy/AudioPolicyInterface.h @@ -0,0 +1,257 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_AUDIOPOLICY_INTERFACE_H +#define ANDROID_AUDIOPOLICY_INTERFACE_H + +#include <media/AudioSystem.h> +#include <utils/String8.h> + +#include <hardware/audio_policy.h> + +namespace android { + +// ---------------------------------------------------------------------------- + +// The AudioPolicyInterface and AudioPolicyClientInterface classes define the communication interfaces +// between the platform specific audio policy manager and Android generic audio policy manager. +// The platform specific audio policy manager must implement methods of the AudioPolicyInterface class. +// This implementation makes use of the AudioPolicyClientInterface to control the activity and +// configuration of audio input and output streams. +// +// The platform specific audio policy manager is in charge of the audio routing and volume control +// policies for a given platform. +// The main roles of this module are: +// - keep track of current system state (removable device connections, phone state, user requests...). +// System state changes and user actions are notified to audio policy manager with methods of the AudioPolicyInterface. +// - process getOutput() queries received when AudioTrack objects are created: Those queries +// return a handler on an output that has been selected, configured and opened by the audio policy manager and that +// must be used by the AudioTrack when registering to the AudioFlinger with the createTrack() method. +// When the AudioTrack object is released, a putOutput() query is received and the audio policy manager can decide +// to close or reconfigure the output depending on other streams using this output and current system state. +// - similarly process getInput() and putInput() queries received from AudioRecord objects and configure audio inputs. +// - process volume control requests: the stream volume is converted from an index value (received from UI) to a float value +// applicable to each output as a function of platform specific settings and current output route (destination device). It +// also make sure that streams are not muted if not allowed (e.g. camera shutter sound in some countries). +// +// The platform specific audio policy manager is provided as a shared library by platform vendors (as for libaudio.so) +// and is linked with libaudioflinger.so + + +// Audio Policy Manager Interface +class AudioPolicyInterface +{ + +public: + virtual ~AudioPolicyInterface() {} + // + // configuration functions + // + + // indicate a change in device connection status + virtual status_t setDeviceConnectionState(audio_devices_t device, + audio_policy_dev_state_t state, + const char *device_address) = 0; + // retrieve a device connection status + virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, + const char *device_address) = 0; + // indicate a change in phone state. Valid phones states are defined by audio_mode_t + virtual void setPhoneState(audio_mode_t state) = 0; + // force using a specific device category for the specified usage + virtual void setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0; + // retrieve current device category forced for a given usage + virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0; + // set a system property (e.g. camera sound always audible) + virtual void setSystemProperty(const char* property, const char* value) = 0; + // check proper initialization + virtual status_t initCheck() = 0; + + // + // Audio routing query functions + // + + // request an output appropriate for playback of the supplied stream type and parameters + virtual audio_io_handle_t getOutput(audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) = 0; + // indicates to the audio policy manager that the output starts being used by corresponding stream. + virtual status_t startOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session = 0) = 0; + // indicates to the audio policy manager that the output stops being used by corresponding stream. + virtual status_t stopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session = 0) = 0; + // releases the output. + virtual void releaseOutput(audio_io_handle_t output) = 0; + + // request an input appropriate for record from the supplied device with supplied parameters. + virtual audio_io_handle_t getInput(audio_source_t inputSource, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_in_acoustics_t acoustics) = 0; + // indicates to the audio policy manager that the input starts being used. + virtual status_t startInput(audio_io_handle_t input) = 0; + // indicates to the audio policy manager that the input stops being used. + virtual status_t stopInput(audio_io_handle_t input) = 0; + // releases the input. + virtual void releaseInput(audio_io_handle_t input) = 0; + + // + // volume control functions + // + + // initialises stream volume conversion parameters by specifying volume index range. + virtual void initStreamVolume(audio_stream_type_t stream, + int indexMin, + int indexMax) = 0; + + // sets the new stream volume at a level corresponding to the supplied index for the + // supplied device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means + // setting volume for all devices + virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) = 0; + + // retrieve current volume index for the specified stream and the + // specified device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means + // querying the volume of the active device. + virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) = 0; + + // return the strategy corresponding to a given stream type + virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0; + + // return the enabled output devices for the given stream type + virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0; + + // Audio effect management + virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0; + virtual status_t registerEffect(const effect_descriptor_t *desc, + audio_io_handle_t io, + uint32_t strategy, + int session, + int id) = 0; + virtual status_t unregisterEffect(int id) = 0; + virtual status_t setEffectEnabled(int id, bool enabled) = 0; + + virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0; + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, + uint32_t inPastMs = 0) const = 0; + virtual bool isSourceActive(audio_source_t source) const = 0; + + //dump state + virtual status_t dump(int fd) = 0; + + virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0; +}; + + +// Audio Policy client Interface +class AudioPolicyClientInterface +{ +public: + virtual ~AudioPolicyClientInterface() {} + + // + // Audio HW module functions + // + + // loads a HW module. + virtual audio_module_handle_t loadHwModule(const char *name) = 0; + + // + // Audio output Control functions + // + + // opens an audio output with the requested parameters. The parameter values can indicate to use the default values + // in case the audio policy manager has no specific requirements for the output being opened. + // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream. + // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly. + virtual audio_io_handle_t openOutput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + uint32_t *pLatencyMs, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo = NULL) = 0; + // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by + // a special mixer thread in the AudioFlinger. + virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0; + // closes the output stream + virtual status_t closeOutput(audio_io_handle_t output) = 0; + // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in + // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded. + virtual status_t suspendOutput(audio_io_handle_t output) = 0; + // restores a suspended output. + virtual status_t restoreOutput(audio_io_handle_t output) = 0; + + // + // Audio input Control functions + // + + // opens an audio input + virtual audio_io_handle_t openInput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask) = 0; + // closes an audio input + virtual status_t closeInput(audio_io_handle_t input) = 0; + // + // misc control functions + // + + // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes + // for each output (destination device) it is attached to. + virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0) = 0; + + // invalidate a stream type, causing a reroute to an unspecified new output + virtual status_t invalidateStream(audio_stream_type_t stream) = 0; + + // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface. + virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0) = 0; + // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager. + virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) = 0; + + // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing + // over a telephony device during a phone call. + virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream) = 0; + virtual status_t stopTone() = 0; + + // set down link audio volume. + virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0; + + // move effect to the specified output + virtual status_t moveEffects(int session, + audio_io_handle_t srcOutput, + audio_io_handle_t dstOutput) = 0; + +}; + +extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface); +extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface); + + +}; // namespace android + +#endif // ANDROID_AUDIOPOLICY_INTERFACE_H diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp new file mode 100644 index 0000000..c57c4fa --- /dev/null +++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyIntefaceImpl" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include "AudioPolicyService.h" +#include "ServiceUtilities.h" + +namespace android { + + +// ---------------------------------------------------------------------------- + +status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, + audio_policy_dev_state_t state, + const char *device_address) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (!audio_is_output_device(device) && !audio_is_input_device(device)) { + return BAD_VALUE; + } + if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE && + state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { + return BAD_VALUE; + } + + ALOGV("setDeviceConnectionState()"); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->setDeviceConnectionState(device, + state, device_address); +} + +audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState( + audio_devices_t device, + const char *device_address) +{ + if (mAudioPolicyManager == NULL) { + return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; + } + return mAudioPolicyManager->getDeviceConnectionState(device, + device_address); +} + +status_t AudioPolicyService::setPhoneState(audio_mode_t state) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(state) >= AUDIO_MODE_CNT) { + return BAD_VALUE; + } + + ALOGV("setPhoneState()"); + + // TODO: check if it is more appropriate to do it in platform specific policy manager + AudioSystem::setMode(state); + + Mutex::Autolock _l(mLock); + mAudioPolicyManager->setPhoneState(state); + return NO_ERROR; +} + +status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { + return BAD_VALUE; + } + if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) { + return BAD_VALUE; + } + ALOGV("setForceUse()"); + Mutex::Autolock _l(mLock); + mAudioPolicyManager->setForceUse(usage, config); + return NO_ERROR; +} + +audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage) +{ + if (mAudioPolicyManager == NULL) { + return AUDIO_POLICY_FORCE_NONE; + } + if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { + return AUDIO_POLICY_FORCE_NONE; + } + return mAudioPolicyManager->getForceUse(usage); +} + +audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + ALOGV("getOutput()"); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->getOutput(stream, samplingRate, + format, channelMask, flags, offloadInfo); +} + +status_t AudioPolicyService::startOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + ALOGV("startOutput()"); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->startOutput(output, stream, session); +} + +status_t AudioPolicyService::stopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + ALOGV("stopOutput()"); + mOutputCommandThread->stopOutputCommand(output, stream, session); + return NO_ERROR; +} + +status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + ALOGV("doStopOutput from tid %d", gettid()); + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->stopOutput(output, stream, session); +} + +void AudioPolicyService::releaseOutput(audio_io_handle_t output) +{ + if (mAudioPolicyManager == NULL) { + return; + } + ALOGV("releaseOutput()"); + mOutputCommandThread->releaseOutputCommand(output); +} + +void AudioPolicyService::doReleaseOutput(audio_io_handle_t output) +{ + ALOGV("doReleaseOutput from tid %d", gettid()); + Mutex::Autolock _l(mLock); + mAudioPolicyManager->releaseOutput(output); +} + +audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + int audioSession) +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + // already checked by client, but double-check in case the client wrapper is bypassed + if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) { + return 0; + } + + if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { + return 0; + } + + Mutex::Autolock _l(mLock); + // the audio_in_acoustics_t parameter is ignored by get_input() + audio_io_handle_t input = mAudioPolicyManager->getInput(inputSource, samplingRate, + format, channelMask, (audio_in_acoustics_t) 0); + + if (input == 0) { + return input; + } + // create audio pre processors according to input source + audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? + AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; + + ssize_t index = mInputSources.indexOfKey(aliasSource); + if (index < 0) { + return input; + } + ssize_t idx = mInputs.indexOfKey(input); + InputDesc *inputDesc; + if (idx < 0) { + inputDesc = new InputDesc(audioSession); + mInputs.add(input, inputDesc); + } else { + inputDesc = mInputs.valueAt(idx); + } + + Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; + for (size_t i = 0; i < effects.size(); i++) { + EffectDesc *effect = effects[i]; + sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); + status_t status = fx->initCheck(); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to create Fx %s on input %d", effect->mName, input); + // fx goes out of scope and strong ref on AudioEffect is released + continue; + } + for (size_t j = 0; j < effect->mParams.size(); j++) { + fx->setParameter(effect->mParams[j]); + } + inputDesc->mEffects.add(fx); + } + setPreProcessorEnabled(inputDesc, true); + return input; +} + +status_t AudioPolicyService::startInput(audio_io_handle_t input) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + Mutex::Autolock _l(mLock); + + return mAudioPolicyManager->startInput(input); +} + +status_t AudioPolicyService::stopInput(audio_io_handle_t input) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + Mutex::Autolock _l(mLock); + + return mAudioPolicyManager->stopInput(input); +} + +void AudioPolicyService::releaseInput(audio_io_handle_t input) +{ + if (mAudioPolicyManager == NULL) { + return; + } + Mutex::Autolock _l(mLock); + mAudioPolicyManager->releaseInput(input); + + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + return; + } + InputDesc *inputDesc = mInputs.valueAt(index); + setPreProcessorEnabled(inputDesc, false); + delete inputDesc; + mInputs.removeItemsAt(index); +} + +status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, + int indexMin, + int indexMax) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax); + return NO_ERROR; +} + +status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->setStreamVolumeIndex(stream, + index, + device); +} + +status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->getStreamVolumeIndex(stream, + index, + device); +} + +uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream) +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + return mAudioPolicyManager->getStrategyForStream(stream); +} + +//audio policy: use audio_device_t appropriately + +audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream) +{ + if (mAudioPolicyManager == NULL) { + return (audio_devices_t)0; + } + return mAudioPolicyManager->getDevicesForStream(stream); +} + +audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc) +{ + // FIXME change return type to status_t, and return NO_INIT here + if (mAudioPolicyManager == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->getOutputForEffect(desc); +} + +status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc, + audio_io_handle_t io, + uint32_t strategy, + int session, + int id) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id); +} + +status_t AudioPolicyService::unregisterEffect(int id) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + return mAudioPolicyManager->unregisterEffect(id); +} + +status_t AudioPolicyService::setEffectEnabled(int id, bool enabled) +{ + if (mAudioPolicyManager == NULL) { + return NO_INIT; + } + return mAudioPolicyManager->setEffectEnabled(id, enabled); +} + +bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->isStreamActive(stream, inPastMs); +} + +bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const +{ + if (mAudioPolicyManager == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs); +} + +bool AudioPolicyService::isSourceActive(audio_source_t source) const +{ + if (mAudioPolicyManager == NULL) { + return false; + } + Mutex::Autolock _l(mLock); + return mAudioPolicyManager->isSourceActive(source); +} + +status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count) +{ + + if (mAudioPolicyManager == NULL) { + *count = 0; + return NO_INIT; + } + Mutex::Autolock _l(mLock); + status_t status = NO_ERROR; + + size_t index; + for (index = 0; index < mInputs.size(); index++) { + if (mInputs.valueAt(index)->mSessionId == audioSession) { + break; + } + } + if (index == mInputs.size()) { + *count = 0; + return BAD_VALUE; + } + Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; + + for (size_t i = 0; i < effects.size(); i++) { + effect_descriptor_t desc = effects[i]->descriptor(); + if (i < *count) { + descriptors[i] = desc; + } + } + if (effects.size() > *count) { + status = NO_MEMORY; + } + *count = effects.size(); + return status; +} + +bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) +{ + if (mAudioPolicyManager == NULL) { + ALOGV("mAudioPolicyManager == NULL"); + return false; + } + + return mAudioPolicyManager->isOffloadSupported(info); +} + + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp new file mode 100644 index 0000000..bb62ab3 --- /dev/null +++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp @@ -0,0 +1,489 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyService" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> +#include "AudioPolicyService.h" +#include "ServiceUtilities.h" + +#include <system/audio.h> +#include <system/audio_policy.h> +#include <hardware/audio_policy.h> + +namespace android { + + +// ---------------------------------------------------------------------------- + +status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, + audio_policy_dev_state_t state, + const char *device_address) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (!audio_is_output_device(device) && !audio_is_input_device(device)) { + return BAD_VALUE; + } + if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE && + state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { + return BAD_VALUE; + } + + ALOGV("setDeviceConnectionState()"); + Mutex::Autolock _l(mLock); + return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device, + state, device_address); +} + +audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState( + audio_devices_t device, + const char *device_address) +{ + if (mpAudioPolicy == NULL) { + return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; + } + return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device, + device_address); +} + +status_t AudioPolicyService::setPhoneState(audio_mode_t state) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(state) >= AUDIO_MODE_CNT) { + return BAD_VALUE; + } + + ALOGV("setPhoneState()"); + + // TODO: check if it is more appropriate to do it in platform specific policy manager + AudioSystem::setMode(state); + + Mutex::Autolock _l(mLock); + mpAudioPolicy->set_phone_state(mpAudioPolicy, state); + return NO_ERROR; +} + +status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { + return BAD_VALUE; + } + if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) { + return BAD_VALUE; + } + ALOGV("setForceUse()"); + Mutex::Autolock _l(mLock); + mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config); + return NO_ERROR; +} + +audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage) +{ + if (mpAudioPolicy == NULL) { + return AUDIO_POLICY_FORCE_NONE; + } + if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { + return AUDIO_POLICY_FORCE_NONE; + } + return mpAudioPolicy->get_force_use(mpAudioPolicy, usage); +} + +audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + if (mpAudioPolicy == NULL) { + return 0; + } + ALOGV("getOutput()"); + Mutex::Autolock _l(mLock); + return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, + format, channelMask, flags, offloadInfo); +} + +status_t AudioPolicyService::startOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + ALOGV("startOutput()"); + Mutex::Autolock _l(mLock); + return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session); +} + +status_t AudioPolicyService::stopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + ALOGV("stopOutput()"); + mOutputCommandThread->stopOutputCommand(output, stream, session); + return NO_ERROR; +} + +status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + ALOGV("doStopOutput from tid %d", gettid()); + Mutex::Autolock _l(mLock); + return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session); +} + +void AudioPolicyService::releaseOutput(audio_io_handle_t output) +{ + if (mpAudioPolicy == NULL) { + return; + } + ALOGV("releaseOutput()"); + mOutputCommandThread->releaseOutputCommand(output); +} + +void AudioPolicyService::doReleaseOutput(audio_io_handle_t output) +{ + ALOGV("doReleaseOutput from tid %d", gettid()); + Mutex::Autolock _l(mLock); + mpAudioPolicy->release_output(mpAudioPolicy, output); +} + +audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + int audioSession) +{ + if (mpAudioPolicy == NULL) { + return 0; + } + // already checked by client, but double-check in case the client wrapper is bypassed + if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) { + return 0; + } + + if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { + return 0; + } + + Mutex::Autolock _l(mLock); + // the audio_in_acoustics_t parameter is ignored by get_input() + audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, + format, channelMask, (audio_in_acoustics_t) 0); + + if (input == 0) { + return input; + } + // create audio pre processors according to input source + audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? + AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; + + ssize_t index = mInputSources.indexOfKey(aliasSource); + if (index < 0) { + return input; + } + ssize_t idx = mInputs.indexOfKey(input); + InputDesc *inputDesc; + if (idx < 0) { + inputDesc = new InputDesc(audioSession); + mInputs.add(input, inputDesc); + } else { + inputDesc = mInputs.valueAt(idx); + } + + Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; + for (size_t i = 0; i < effects.size(); i++) { + EffectDesc *effect = effects[i]; + sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); + status_t status = fx->initCheck(); + if (status != NO_ERROR && status != ALREADY_EXISTS) { + ALOGW("Failed to create Fx %s on input %d", effect->mName, input); + // fx goes out of scope and strong ref on AudioEffect is released + continue; + } + for (size_t j = 0; j < effect->mParams.size(); j++) { + fx->setParameter(effect->mParams[j]); + } + inputDesc->mEffects.add(fx); + } + setPreProcessorEnabled(inputDesc, true); + return input; +} + +status_t AudioPolicyService::startInput(audio_io_handle_t input) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + Mutex::Autolock _l(mLock); + + return mpAudioPolicy->start_input(mpAudioPolicy, input); +} + +status_t AudioPolicyService::stopInput(audio_io_handle_t input) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + Mutex::Autolock _l(mLock); + + return mpAudioPolicy->stop_input(mpAudioPolicy, input); +} + +void AudioPolicyService::releaseInput(audio_io_handle_t input) +{ + if (mpAudioPolicy == NULL) { + return; + } + Mutex::Autolock _l(mLock); + mpAudioPolicy->release_input(mpAudioPolicy, input); + + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + return; + } + InputDesc *inputDesc = mInputs.valueAt(index); + setPreProcessorEnabled(inputDesc, false); + delete inputDesc; + mInputs.removeItemsAt(index); +} + +status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, + int indexMin, + int indexMax) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax); + return NO_ERROR; +} + +status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (!settingsAllowed()) { + return PERMISSION_DENIED; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + if (mpAudioPolicy->set_stream_volume_index_for_device) { + return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy, + stream, + index, + device); + } else { + return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index); + } +} + +status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + if (uint32_t(stream) >= AUDIO_STREAM_CNT) { + return BAD_VALUE; + } + Mutex::Autolock _l(mLock); + if (mpAudioPolicy->get_stream_volume_index_for_device) { + return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy, + stream, + index, + device); + } else { + return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index); + } +} + +uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream) +{ + if (mpAudioPolicy == NULL) { + return 0; + } + return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream); +} + +//audio policy: use audio_device_t appropriately + +audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream) +{ + if (mpAudioPolicy == NULL) { + return (audio_devices_t)0; + } + return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream); +} + +audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc) +{ + // FIXME change return type to status_t, and return NO_INIT here + if (mpAudioPolicy == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc); +} + +status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc, + audio_io_handle_t io, + uint32_t strategy, + int session, + int id) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id); +} + +status_t AudioPolicyService::unregisterEffect(int id) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + return mpAudioPolicy->unregister_effect(mpAudioPolicy, id); +} + +status_t AudioPolicyService::setEffectEnabled(int id, bool enabled) +{ + if (mpAudioPolicy == NULL) { + return NO_INIT; + } + return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled); +} + +bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const +{ + if (mpAudioPolicy == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs); +} + +bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const +{ + if (mpAudioPolicy == NULL) { + return 0; + } + Mutex::Autolock _l(mLock); + return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs); +} + +bool AudioPolicyService::isSourceActive(audio_source_t source) const +{ + if (mpAudioPolicy == NULL) { + return false; + } + if (mpAudioPolicy->is_source_active == 0) { + return false; + } + Mutex::Autolock _l(mLock); + return mpAudioPolicy->is_source_active(mpAudioPolicy, source); +} + +status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, + effect_descriptor_t *descriptors, + uint32_t *count) +{ + + if (mpAudioPolicy == NULL) { + *count = 0; + return NO_INIT; + } + Mutex::Autolock _l(mLock); + status_t status = NO_ERROR; + + size_t index; + for (index = 0; index < mInputs.size(); index++) { + if (mInputs.valueAt(index)->mSessionId == audioSession) { + break; + } + } + if (index == mInputs.size()) { + *count = 0; + return BAD_VALUE; + } + Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; + + for (size_t i = 0; i < effects.size(); i++) { + effect_descriptor_t desc = effects[i]->descriptor(); + if (i < *count) { + descriptors[i] = desc; + } + } + if (effects.size() > *count) { + status = NO_MEMORY; + } + *count = effects.size(); + return status; +} + +bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) +{ + if (mpAudioPolicy == NULL) { + ALOGV("mpAudioPolicy == NULL"); + return false; + } + + if (mpAudioPolicy->is_offload_supported == NULL) { + ALOGV("HAL does not implement is_offload_supported"); + return false; + } + + return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info); +} + + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp new file mode 100644 index 0000000..45f98d2 --- /dev/null +++ b/services/audiopolicy/AudioPolicyManager.cpp @@ -0,0 +1,4296 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "AudioPolicyManager" +//#define LOG_NDEBUG 0 + +//#define VERY_VERBOSE_LOGGING +#ifdef VERY_VERBOSE_LOGGING +#define ALOGVV ALOGV +#else +#define ALOGVV(a...) do { } while(0) +#endif + +// A device mask for all audio input devices that are considered "virtual" when evaluating +// active inputs in getActiveInput() +#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL AUDIO_DEVICE_IN_REMOTE_SUBMIX +// A device mask for all audio output devices that are considered "remote" when evaluating +// active output devices in isStreamActiveRemotely() +#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL AUDIO_DEVICE_OUT_REMOTE_SUBMIX + +#include <utils/Log.h> +#include "AudioPolicyManager.h" +#include <hardware/audio_effect.h> +#include <hardware/audio.h> +#include <math.h> +#include <hardware_legacy/audio_policy_conf.h> +#include <cutils/properties.h> +#include <media/AudioParameter.h> + +namespace android { + +// ---------------------------------------------------------------------------- +// Definitions for audio_policy.conf file parsing +// ---------------------------------------------------------------------------- + +struct StringToEnum { + const char *name; + uint32_t value; +}; + +#define STRING_TO_ENUM(string) { #string, string } +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) + +const StringToEnum sDeviceNameToEnumTable[] = { + STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB), + STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX), + STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC), + STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO), + STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL), + STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL), + STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC), + STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX), + STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET), + STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY), +}; + +const StringToEnum sFlagNameToEnumTable[] = { + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT), + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY), + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST), + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER), + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD), + STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING), +}; + +const StringToEnum sFormatNameToEnumTable[] = { + STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT), + STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT), + STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT), + STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT), + STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT), + STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED), + STRING_TO_ENUM(AUDIO_FORMAT_MP3), + STRING_TO_ENUM(AUDIO_FORMAT_AAC), + STRING_TO_ENUM(AUDIO_FORMAT_VORBIS), +}; + +const StringToEnum sOutChannelsNameToEnumTable[] = { + STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO), + STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO), + STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1), + STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1), +}; + +const StringToEnum sInChannelsNameToEnumTable[] = { + STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO), + STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO), + STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK), +}; + + +uint32_t AudioPolicyManager::stringToEnum(const struct StringToEnum *table, + size_t size, + const char *name) +{ + for (size_t i = 0; i < size; i++) { + if (strcmp(table[i].name, name) == 0) { + ALOGV("stringToEnum() found %s", table[i].name); + return table[i].value; + } + } + return 0; +} + +const char *AudioPolicyManager::enumToString(const struct StringToEnum *table, + size_t size, + uint32_t value) +{ + for (size_t i = 0; i < size; i++) { + if (table[i].value == value) { + return table[i].name; + } + } + return ""; +} + +bool AudioPolicyManager::stringToBool(const char *value) +{ + return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0)); +} + + +// ---------------------------------------------------------------------------- +// AudioPolicyInterface implementation +// ---------------------------------------------------------------------------- + + +status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device, + audio_policy_dev_state_t state, + const char *device_address) +{ + SortedVector <audio_io_handle_t> outputs; + String8 address = String8(device_address); + + ALOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address); + + // connect/disconnect only 1 device at a time + if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE; + + // handle output devices + if (audio_is_output_device(device)) { + sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device, + address, + 0); + ssize_t index = mAvailableOutputDevices.indexOf(devDesc); + + // save a copy of the opened output descriptors before any output is opened or closed + // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies() + mPreviousOutputs = mOutputs; + switch (state) + { + // handle output device connection + case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: + if (index >= 0) { + ALOGW("setDeviceConnectionState() device already connected: %x", device); + return INVALID_OPERATION; + } + ALOGV("setDeviceConnectionState() connecting device %x", device); + + if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) { + return INVALID_OPERATION; + } + ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %d outputs", + outputs.size()); + // register new device as available + index = mAvailableOutputDevices.add(devDesc); + if (index >= 0) { + mAvailableOutputDevices[index]->mId = nextUniqueId(); + } else { + return NO_MEMORY; + } + + break; + // handle output device disconnection + case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: { + if (index < 0) { + ALOGW("setDeviceConnectionState() device not connected: %x", device); + return INVALID_OPERATION; + } + + ALOGV("setDeviceConnectionState() disconnecting device %x", device); + // remove device from available output devices + mAvailableOutputDevices.remove(devDesc); + + checkOutputsForDevice(device, state, outputs, address); + // not currently handling multiple simultaneous submixes: ignoring remote submix + // case and address + } break; + + default: + ALOGE("setDeviceConnectionState() invalid state: %x", state); + return BAD_VALUE; + } + + // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP + // output is suspended before any tracks are moved to it + checkA2dpSuspend(); + checkOutputForAllStrategies(); + // outputs must be closed after checkOutputForAllStrategies() is executed + if (!outputs.isEmpty()) { + for (size_t i = 0; i < outputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]); + // close unused outputs after device disconnection or direct outputs that have been + // opened by checkOutputsForDevice() to query dynamic parameters + if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) || + (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) && + (desc->mDirectOpenCount == 0))) { + closeOutput(outputs[i]); + } + } + // check again after closing A2DP output to reset mA2dpSuspended if needed + checkA2dpSuspend(); + } + + updateDevicesAndOutputs(); + for (size_t i = 0; i < mOutputs.size(); i++) { + // do not force device change on duplicated output because if device is 0, it will + // also force a device 0 for the two outputs it is duplicated to which may override + // a valid device selection on those outputs. + setOutputDevice(mOutputs.keyAt(i), + getNewDevice(mOutputs.keyAt(i), true /*fromCache*/), + !mOutputs.valueAt(i)->isDuplicated(), + 0); + } + + if (device == AUDIO_DEVICE_OUT_WIRED_HEADSET) { + device = AUDIO_DEVICE_IN_WIRED_HEADSET; + } else if (device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO || + device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET || + device == AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT) { + device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET; + } else { + return NO_ERROR; + } + } + // handle input devices + if (audio_is_input_device(device)) { + sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device, + address, + 0); + + ssize_t index = mAvailableInputDevices.indexOf(devDesc); + switch (state) + { + // handle input device connection + case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: { + if (index >= 0) { + ALOGW("setDeviceConnectionState() device already connected: %d", device); + return INVALID_OPERATION; + } + index = mAvailableInputDevices.add(devDesc); + if (index >= 0) { + mAvailableInputDevices[index]->mId = nextUniqueId(); + } else { + return NO_MEMORY; + } + } + break; + + // handle input device disconnection + case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: { + if (index < 0) { + ALOGW("setDeviceConnectionState() device not connected: %d", device); + return INVALID_OPERATION; + } + mAvailableInputDevices.remove(devDesc); + } break; + + default: + ALOGE("setDeviceConnectionState() invalid state: %x", state); + return BAD_VALUE; + } + + audio_io_handle_t activeInput = getActiveInput(); + if (activeInput != 0) { + AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput); + audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource); + if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) { + ALOGV("setDeviceConnectionState() changing device from %x to %x for input %d", + inputDesc->mDevice, newDevice, activeInput); + inputDesc->mDevice = newDevice; + AudioParameter param = AudioParameter(); + param.addInt(String8(AudioParameter::keyRouting), (int)newDevice); + mpClientInterface->setParameters(activeInput, param.toString()); + } + } + + return NO_ERROR; + } + + ALOGW("setDeviceConnectionState() invalid device: %x", device); + return BAD_VALUE; +} + +audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device, + const char *device_address) +{ + audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; + String8 address = String8(device_address); + sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device, + String8(device_address), + 0); + ssize_t index; + DeviceVector *deviceVector; + + if (audio_is_output_device(device)) { + deviceVector = &mAvailableOutputDevices; + } else if (audio_is_input_device(device)) { + deviceVector = &mAvailableInputDevices; + } else { + ALOGW("getDeviceConnectionState() invalid device type %08x", device); + return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; + } + + index = deviceVector->indexOf(devDesc); + if (index >= 0) { + return AUDIO_POLICY_DEVICE_STATE_AVAILABLE; + } else { + return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; + } +} + +void AudioPolicyManager::setPhoneState(audio_mode_t state) +{ + ALOGV("setPhoneState() state %d", state); + audio_devices_t newDevice = AUDIO_DEVICE_NONE; + if (state < 0 || state >= AUDIO_MODE_CNT) { + ALOGW("setPhoneState() invalid state %d", state); + return; + } + + if (state == mPhoneState ) { + ALOGW("setPhoneState() setting same state %d", state); + return; + } + + // if leaving call state, handle special case of active streams + // pertaining to sonification strategy see handleIncallSonification() + if (isInCall()) { + ALOGV("setPhoneState() in call state management: new state is %d", state); + for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { + handleIncallSonification((audio_stream_type_t)stream, false, true); + } + } + + // store previous phone state for management of sonification strategy below + int oldState = mPhoneState; + mPhoneState = state; + bool force = false; + + // are we entering or starting a call + if (!isStateInCall(oldState) && isStateInCall(state)) { + ALOGV(" Entering call in setPhoneState()"); + // force routing command to audio hardware when starting a call + // even if no device change is needed + force = true; + for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) { + mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] = + sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j]; + } + } else if (isStateInCall(oldState) && !isStateInCall(state)) { + ALOGV(" Exiting call in setPhoneState()"); + // force routing command to audio hardware when exiting a call + // even if no device change is needed + force = true; + for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) { + mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] = + sVolumeProfiles[AUDIO_STREAM_DTMF][j]; + } + } else if (isStateInCall(state) && (state != oldState)) { + ALOGV(" Switching between telephony and VoIP in setPhoneState()"); + // force routing command to audio hardware when switching between telephony and VoIP + // even if no device change is needed + force = true; + } + + // check for device and output changes triggered by new phone state + newDevice = getNewDevice(mPrimaryOutput, false /*fromCache*/); + checkA2dpSuspend(); + checkOutputForAllStrategies(); + updateDevicesAndOutputs(); + + AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mPrimaryOutput); + + // force routing command to audio hardware when ending call + // even if no device change is needed + if (isStateInCall(oldState) && newDevice == AUDIO_DEVICE_NONE) { + newDevice = hwOutputDesc->device(); + } + + int delayMs = 0; + if (isStateInCall(state)) { + nsecs_t sysTime = systemTime(); + for (size_t i = 0; i < mOutputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueAt(i); + // mute media and sonification strategies and delay device switch by the largest + // latency of any output where either strategy is active. + // This avoid sending the ring tone or music tail into the earpiece or headset. + if ((desc->isStrategyActive(STRATEGY_MEDIA, + SONIFICATION_HEADSET_MUSIC_DELAY, + sysTime) || + desc->isStrategyActive(STRATEGY_SONIFICATION, + SONIFICATION_HEADSET_MUSIC_DELAY, + sysTime)) && + (delayMs < (int)desc->mLatency*2)) { + delayMs = desc->mLatency*2; + } + setStrategyMute(STRATEGY_MEDIA, true, mOutputs.keyAt(i)); + setStrategyMute(STRATEGY_MEDIA, false, mOutputs.keyAt(i), MUTE_TIME_MS, + getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/)); + setStrategyMute(STRATEGY_SONIFICATION, true, mOutputs.keyAt(i)); + setStrategyMute(STRATEGY_SONIFICATION, false, mOutputs.keyAt(i), MUTE_TIME_MS, + getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/)); + } + } + + // change routing is necessary + setOutputDevice(mPrimaryOutput, newDevice, force, delayMs); + + // if entering in call state, handle special case of active streams + // pertaining to sonification strategy see handleIncallSonification() + if (isStateInCall(state)) { + ALOGV("setPhoneState() in call state management: new state is %d", state); + for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { + handleIncallSonification((audio_stream_type_t)stream, true, true); + } + } + + // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE + if (state == AUDIO_MODE_RINGTONE && + isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) { + mLimitRingtoneVolume = true; + } else { + mLimitRingtoneVolume = false; + } +} + +void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config) +{ + ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState); + + bool forceVolumeReeval = false; + switch(usage) { + case AUDIO_POLICY_FORCE_FOR_COMMUNICATION: + if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO && + config != AUDIO_POLICY_FORCE_NONE) { + ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config); + return; + } + forceVolumeReeval = true; + mForceUse[usage] = config; + break; + case AUDIO_POLICY_FORCE_FOR_MEDIA: + if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP && + config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY && + config != AUDIO_POLICY_FORCE_ANALOG_DOCK && + config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE && + config != AUDIO_POLICY_FORCE_NO_BT_A2DP) { + ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config); + return; + } + mForceUse[usage] = config; + break; + case AUDIO_POLICY_FORCE_FOR_RECORD: + if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY && + config != AUDIO_POLICY_FORCE_NONE) { + ALOGW("setForceUse() invalid config %d for FOR_RECORD", config); + return; + } + mForceUse[usage] = config; + break; + case AUDIO_POLICY_FORCE_FOR_DOCK: + if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK && + config != AUDIO_POLICY_FORCE_BT_DESK_DOCK && + config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY && + config != AUDIO_POLICY_FORCE_ANALOG_DOCK && + config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) { + ALOGW("setForceUse() invalid config %d for FOR_DOCK", config); + } + forceVolumeReeval = true; + mForceUse[usage] = config; + break; + case AUDIO_POLICY_FORCE_FOR_SYSTEM: + if (config != AUDIO_POLICY_FORCE_NONE && + config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) { + ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config); + } + forceVolumeReeval = true; + mForceUse[usage] = config; + break; + default: + ALOGW("setForceUse() invalid usage %d", usage); + break; + } + + // check for device and output changes triggered by new force usage + checkA2dpSuspend(); + checkOutputForAllStrategies(); + updateDevicesAndOutputs(); + for (size_t i = 0; i < mOutputs.size(); i++) { + audio_io_handle_t output = mOutputs.keyAt(i); + audio_devices_t newDevice = getNewDevice(output, true /*fromCache*/); + setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE)); + if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) { + applyStreamVolumes(output, newDevice, 0, true); + } + } + + audio_io_handle_t activeInput = getActiveInput(); + if (activeInput != 0) { + AudioInputDescriptor *inputDesc = mInputs.valueFor(activeInput); + audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource); + if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) { + ALOGV("setForceUse() changing device from %x to %x for input %d", + inputDesc->mDevice, newDevice, activeInput); + inputDesc->mDevice = newDevice; + AudioParameter param = AudioParameter(); + param.addInt(String8(AudioParameter::keyRouting), (int)newDevice); + mpClientInterface->setParameters(activeInput, param.toString()); + } + } + +} + +audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage) +{ + return mForceUse[usage]; +} + +void AudioPolicyManager::setSystemProperty(const char* property, const char* value) +{ + ALOGV("setSystemProperty() property %s, value %s", property, value); +} + +// Find a direct output profile compatible with the parameters passed, even if the input flags do +// not explicitly request a direct output +AudioPolicyManager::IOProfile *AudioPolicyManager::getProfileForDirectOutput( + audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags) +{ + for (size_t i = 0; i < mHwModules.size(); i++) { + if (mHwModules[i]->mHandle == 0) { + continue; + } + for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) { + IOProfile *profile = mHwModules[i]->mOutputProfiles[j]; + bool found = false; + if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { + if (profile->isCompatibleProfile(device, samplingRate, format, + channelMask, + AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) { + found = true; + } + } else { + if (profile->isCompatibleProfile(device, samplingRate, format, + channelMask, + AUDIO_OUTPUT_FLAG_DIRECT)) { + found = true; + } + } + if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) { + return profile; + } + } + } + return 0; +} + +audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo) +{ + audio_io_handle_t output = 0; + uint32_t latency = 0; + routing_strategy strategy = getStrategy(stream); + audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); + ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x", + device, stream, samplingRate, format, channelMask, flags); + +#ifdef AUDIO_POLICY_TEST + if (mCurOutput != 0) { + ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d", + mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput); + + if (mTestOutputs[mCurOutput] == 0) { + ALOGV("getOutput() opening test output"); + AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL); + outputDesc->mDevice = mTestDevice; + outputDesc->mSamplingRate = mTestSamplingRate; + outputDesc->mFormat = mTestFormat; + outputDesc->mChannelMask = mTestChannels; + outputDesc->mLatency = mTestLatencyMs; + outputDesc->mFlags = + (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0); + outputDesc->mRefCount[stream] = 0; + mTestOutputs[mCurOutput] = mpClientInterface->openOutput(0, &outputDesc->mDevice, + &outputDesc->mSamplingRate, + &outputDesc->mFormat, + &outputDesc->mChannelMask, + &outputDesc->mLatency, + outputDesc->mFlags, + offloadInfo); + if (mTestOutputs[mCurOutput]) { + AudioParameter outputCmd = AudioParameter(); + outputCmd.addInt(String8("set_id"),mCurOutput); + mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString()); + addOutput(mTestOutputs[mCurOutput], outputDesc); + } + } + return mTestOutputs[mCurOutput]; + } +#endif //AUDIO_POLICY_TEST + + // open a direct output if required by specified parameters + //force direct flag if offload flag is set: offloading implies a direct output stream + // and all common behaviors are driven by checking only the direct flag + // this should normally be set appropriately in the policy configuration file + if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { + flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT); + } + + // Do not allow offloading if one non offloadable effect is enabled. This prevents from + // creating an offloaded track and tearing it down immediately after start when audioflinger + // detects there is an active non offloadable effect. + // FIXME: We should check the audio session here but we do not have it in this context. + // This may prevent offloading in rare situations where effects are left active by apps + // in the background. + IOProfile *profile = NULL; + if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) || + !isNonOffloadableEffectEnabled()) { + profile = getProfileForDirectOutput(device, + samplingRate, + format, + channelMask, + (audio_output_flags_t)flags); + } + + if (profile != NULL) { + AudioOutputDescriptor *outputDesc = NULL; + + for (size_t i = 0; i < mOutputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueAt(i); + if (!desc->isDuplicated() && (profile == desc->mProfile)) { + outputDesc = desc; + // reuse direct output if currently open and configured with same parameters + if ((samplingRate == outputDesc->mSamplingRate) && + (format == outputDesc->mFormat) && + (channelMask == outputDesc->mChannelMask)) { + outputDesc->mDirectOpenCount++; + ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i)); + return mOutputs.keyAt(i); + } + } + } + // close direct output if currently open and configured with different parameters + if (outputDesc != NULL) { + closeOutput(outputDesc->mId); + } + outputDesc = new AudioOutputDescriptor(profile); + outputDesc->mDevice = device; + outputDesc->mSamplingRate = samplingRate; + outputDesc->mFormat = format; + outputDesc->mChannelMask = channelMask; + outputDesc->mLatency = 0; + outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags); + outputDesc->mRefCount[stream] = 0; + outputDesc->mStopTime[stream] = 0; + outputDesc->mDirectOpenCount = 1; + output = mpClientInterface->openOutput(profile->mModule->mHandle, + &outputDesc->mDevice, + &outputDesc->mSamplingRate, + &outputDesc->mFormat, + &outputDesc->mChannelMask, + &outputDesc->mLatency, + outputDesc->mFlags, + offloadInfo); + + // only accept an output with the requested parameters + if (output == 0 || + (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) || + (format != AUDIO_FORMAT_DEFAULT && format != outputDesc->mFormat) || + (channelMask != 0 && channelMask != outputDesc->mChannelMask)) { + ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d," + "format %d %d, channelMask %04x %04x", output, samplingRate, + outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask, + outputDesc->mChannelMask); + if (output != 0) { + mpClientInterface->closeOutput(output); + } + delete outputDesc; + return 0; + } + audio_io_handle_t srcOutput = getOutputForEffect(); + addOutput(output, outputDesc); + audio_io_handle_t dstOutput = getOutputForEffect(); + if (dstOutput == output) { + mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput); + } + mPreviousOutputs = mOutputs; + ALOGV("getOutput() returns new direct output %d", output); + return output; + } + + // ignoring channel mask due to downmix capability in mixer + + // open a non direct output + + // for non direct outputs, only PCM is supported + if (audio_is_linear_pcm(format)) { + // get which output is suitable for the specified stream. The actual + // routing change will happen when startOutput() will be called + SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs); + + output = selectOutput(outputs, flags); + } + ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d," + "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags); + + ALOGV("getOutput() returns output %d", output); + + return output; +} + +audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs, + audio_output_flags_t flags) +{ + // select one output among several that provide a path to a particular device or set of + // devices (the list was previously build by getOutputsForDevice()). + // The priority is as follows: + // 1: the output with the highest number of requested policy flags + // 2: the primary output + // 3: the first output in the list + + if (outputs.size() == 0) { + return 0; + } + if (outputs.size() == 1) { + return outputs[0]; + } + + int maxCommonFlags = 0; + audio_io_handle_t outputFlags = 0; + audio_io_handle_t outputPrimary = 0; + + for (size_t i = 0; i < outputs.size(); i++) { + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(outputs[i]); + if (!outputDesc->isDuplicated()) { + int commonFlags = popcount(outputDesc->mProfile->mFlags & flags); + if (commonFlags > maxCommonFlags) { + outputFlags = outputs[i]; + maxCommonFlags = commonFlags; + ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags); + } + if (outputDesc->mProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) { + outputPrimary = outputs[i]; + } + } + } + + if (outputFlags != 0) { + return outputFlags; + } + if (outputPrimary != 0) { + return outputPrimary; + } + + return outputs[0]; +} + +status_t AudioPolicyManager::startOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + ALOGV("startOutput() output %d, stream %d, session %d", output, stream, session); + ssize_t index = mOutputs.indexOfKey(output); + if (index < 0) { + ALOGW("startOutput() unknown output %d", output); + return BAD_VALUE; + } + + AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + + // increment usage count for this stream on the requested output: + // NOTE that the usage count is the same for duplicated output and hardware output which is + // necessary for a correct control of hardware output routing by startOutput() and stopOutput() + outputDesc->changeRefCount(stream, 1); + + if (outputDesc->mRefCount[stream] == 1) { + audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/); + routing_strategy strategy = getStrategy(stream); + bool shouldWait = (strategy == STRATEGY_SONIFICATION) || + (strategy == STRATEGY_SONIFICATION_RESPECTFUL); + uint32_t waitMs = 0; + bool force = false; + for (size_t i = 0; i < mOutputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueAt(i); + if (desc != outputDesc) { + // force a device change if any other output is managed by the same hw + // module and has a current device selection that differs from selected device. + // In this case, the audio HAL must receive the new device selection so that it can + // change the device currently selected by the other active output. + if (outputDesc->sharesHwModuleWith(desc) && + desc->device() != newDevice) { + force = true; + } + // wait for audio on other active outputs to be presented when starting + // a notification so that audio focus effect can propagate. + uint32_t latency = desc->latency(); + if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) { + waitMs = latency; + } + } + } + uint32_t muteWaitMs = setOutputDevice(output, newDevice, force); + + // handle special case for sonification while in call + if (isInCall()) { + handleIncallSonification(stream, true, false); + } + + // apply volume rules for current stream and device if necessary + checkAndSetVolume(stream, + mStreams[stream].getVolumeIndex(newDevice), + output, + newDevice); + + // update the outputs if starting an output with a stream that can affect notification + // routing + handleNotificationRoutingForStream(stream); + if (waitMs > muteWaitMs) { + usleep((waitMs - muteWaitMs) * 2 * 1000); + } + } + return NO_ERROR; +} + + +status_t AudioPolicyManager::stopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session) +{ + ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session); + ssize_t index = mOutputs.indexOfKey(output); + if (index < 0) { + ALOGW("stopOutput() unknown output %d", output); + return BAD_VALUE; + } + + AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + + // handle special case for sonification while in call + if (isInCall()) { + handleIncallSonification(stream, false, false); + } + + if (outputDesc->mRefCount[stream] > 0) { + // decrement usage count of this stream on the output + outputDesc->changeRefCount(stream, -1); + // store time at which the stream was stopped - see isStreamActive() + if (outputDesc->mRefCount[stream] == 0) { + outputDesc->mStopTime[stream] = systemTime(); + audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/); + // delay the device switch by twice the latency because stopOutput() is executed when + // the track stop() command is received and at that time the audio track buffer can + // still contain data that needs to be drained. The latency only covers the audio HAL + // and kernel buffers. Also the latency does not always include additional delay in the + // audio path (audio DSP, CODEC ...) + setOutputDevice(output, newDevice, false, outputDesc->mLatency*2); + + // force restoring the device selection on other active outputs if it differs from the + // one being selected for this output + for (size_t i = 0; i < mOutputs.size(); i++) { + audio_io_handle_t curOutput = mOutputs.keyAt(i); + AudioOutputDescriptor *desc = mOutputs.valueAt(i); + if (curOutput != output && + desc->isActive() && + outputDesc->sharesHwModuleWith(desc) && + (newDevice != desc->device())) { + setOutputDevice(curOutput, + getNewDevice(curOutput, false /*fromCache*/), + true, + outputDesc->mLatency*2); + } + } + // update the outputs if stopping one with a stream that can affect notification routing + handleNotificationRoutingForStream(stream); + } + return NO_ERROR; + } else { + ALOGW("stopOutput() refcount is already 0 for output %d", output); + return INVALID_OPERATION; + } +} + +void AudioPolicyManager::releaseOutput(audio_io_handle_t output) +{ + ALOGV("releaseOutput() %d", output); + ssize_t index = mOutputs.indexOfKey(output); + if (index < 0) { + ALOGW("releaseOutput() releasing unknown output %d", output); + return; + } + +#ifdef AUDIO_POLICY_TEST + int testIndex = testOutputIndex(output); + if (testIndex != 0) { + AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index); + if (outputDesc->isActive()) { + mpClientInterface->closeOutput(output); + delete mOutputs.valueAt(index); + mOutputs.removeItem(output); + mTestOutputs[testIndex] = 0; + } + return; + } +#endif //AUDIO_POLICY_TEST + + AudioOutputDescriptor *desc = mOutputs.valueAt(index); + if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { + if (desc->mDirectOpenCount <= 0) { + ALOGW("releaseOutput() invalid open count %d for output %d", + desc->mDirectOpenCount, output); + return; + } + if (--desc->mDirectOpenCount == 0) { + closeOutput(output); + // If effects where present on the output, audioflinger moved them to the primary + // output by default: move them back to the appropriate output. + audio_io_handle_t dstOutput = getOutputForEffect(); + if (dstOutput != mPrimaryOutput) { + mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mPrimaryOutput, dstOutput); + } + } + } +} + + +audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_in_acoustics_t acoustics) +{ + audio_io_handle_t input = 0; + audio_devices_t device = getDeviceForInputSource(inputSource); + + ALOGV("getInput() inputSource %d, samplingRate %d, format %d, channelMask %x, acoustics %x", + inputSource, samplingRate, format, channelMask, acoustics); + + if (device == AUDIO_DEVICE_NONE) { + ALOGW("getInput() could not find device for inputSource %d", inputSource); + return 0; + } + + // adapt channel selection to input source + switch(inputSource) { + case AUDIO_SOURCE_VOICE_UPLINK: + channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK; + break; + case AUDIO_SOURCE_VOICE_DOWNLINK: + channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK; + break; + case AUDIO_SOURCE_VOICE_CALL: + channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK; + break; + default: + break; + } + + IOProfile *profile = getInputProfile(device, + samplingRate, + format, + channelMask); + if (profile == NULL) { + ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d, " + "channelMask %04x", + device, samplingRate, format, channelMask); + return 0; + } + + if (profile->mModule->mHandle == 0) { + ALOGE("getInput(): HW module %s not opened", profile->mModule->mName); + return 0; + } + + AudioInputDescriptor *inputDesc = new AudioInputDescriptor(profile); + + inputDesc->mInputSource = inputSource; + inputDesc->mDevice = device; + inputDesc->mSamplingRate = samplingRate; + inputDesc->mFormat = format; + inputDesc->mChannelMask = channelMask; + inputDesc->mRefCount = 0; + input = mpClientInterface->openInput(profile->mModule->mHandle, + &inputDesc->mDevice, + &inputDesc->mSamplingRate, + &inputDesc->mFormat, + &inputDesc->mChannelMask); + + // only accept input with the exact requested set of parameters + if (input == 0 || + (samplingRate != inputDesc->mSamplingRate) || + (format != inputDesc->mFormat) || + (channelMask != inputDesc->mChannelMask)) { + ALOGI("getInput() failed opening input: samplingRate %d, format %d, channelMask %x", + samplingRate, format, channelMask); + if (input != 0) { + mpClientInterface->closeInput(input); + } + delete inputDesc; + return 0; + } + mInputs.add(input, inputDesc); + return input; +} + +status_t AudioPolicyManager::startInput(audio_io_handle_t input) +{ + ALOGV("startInput() input %d", input); + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + ALOGW("startInput() unknown input %d", input); + return BAD_VALUE; + } + AudioInputDescriptor *inputDesc = mInputs.valueAt(index); + +#ifdef AUDIO_POLICY_TEST + if (mTestInput == 0) +#endif //AUDIO_POLICY_TEST + { + // refuse 2 active AudioRecord clients at the same time except if the active input + // uses AUDIO_SOURCE_HOTWORD in which case it is closed. + audio_io_handle_t activeInput = getActiveInput(); + if (!isVirtualInputDevice(inputDesc->mDevice) && activeInput != 0) { + AudioInputDescriptor *activeDesc = mInputs.valueFor(activeInput); + if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) { + ALOGW("startInput() preempting already started low-priority input %d", activeInput); + stopInput(activeInput); + releaseInput(activeInput); + } else { + ALOGW("startInput() input %d failed: other input already started", input); + return INVALID_OPERATION; + } + } + } + + audio_devices_t newDevice = getDeviceForInputSource(inputDesc->mInputSource); + if ((newDevice != AUDIO_DEVICE_NONE) && (newDevice != inputDesc->mDevice)) { + inputDesc->mDevice = newDevice; + } + + // automatically enable the remote submix output when input is started + if (audio_is_remote_submix_device(inputDesc->mDevice)) { + setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, + AUDIO_POLICY_DEVICE_STATE_AVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS); + } + + AudioParameter param = AudioParameter(); + param.addInt(String8(AudioParameter::keyRouting), (int)inputDesc->mDevice); + + int aliasSource = (inputDesc->mInputSource == AUDIO_SOURCE_HOTWORD) ? + AUDIO_SOURCE_VOICE_RECOGNITION : inputDesc->mInputSource; + + param.addInt(String8(AudioParameter::keyInputSource), aliasSource); + ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource); + + mpClientInterface->setParameters(input, param.toString()); + + inputDesc->mRefCount = 1; + return NO_ERROR; +} + +status_t AudioPolicyManager::stopInput(audio_io_handle_t input) +{ + ALOGV("stopInput() input %d", input); + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + ALOGW("stopInput() unknown input %d", input); + return BAD_VALUE; + } + AudioInputDescriptor *inputDesc = mInputs.valueAt(index); + + if (inputDesc->mRefCount == 0) { + ALOGW("stopInput() input %d already stopped", input); + return INVALID_OPERATION; + } else { + // automatically disable the remote submix output when input is stopped + if (audio_is_remote_submix_device(inputDesc->mDevice)) { + setDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, + AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS); + } + + AudioParameter param = AudioParameter(); + param.addInt(String8(AudioParameter::keyRouting), 0); + mpClientInterface->setParameters(input, param.toString()); + inputDesc->mRefCount = 0; + return NO_ERROR; + } +} + +void AudioPolicyManager::releaseInput(audio_io_handle_t input) +{ + ALOGV("releaseInput() %d", input); + ssize_t index = mInputs.indexOfKey(input); + if (index < 0) { + ALOGW("releaseInput() releasing unknown input %d", input); + return; + } + mpClientInterface->closeInput(input); + delete mInputs.valueAt(index); + mInputs.removeItem(input); + ALOGV("releaseInput() exit"); +} + +void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream, + int indexMin, + int indexMax) +{ + ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax); + if (indexMin < 0 || indexMin >= indexMax) { + ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax); + return; + } + mStreams[stream].mIndexMin = indexMin; + mStreams[stream].mIndexMax = indexMax; +} + +status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) +{ + + if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) { + return BAD_VALUE; + } + if (!audio_is_output_device(device)) { + return BAD_VALUE; + } + + // Force max volume if stream cannot be muted + if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax; + + ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d", + stream, device, index); + + // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and + // clear all device specific values + if (device == AUDIO_DEVICE_OUT_DEFAULT) { + mStreams[stream].mIndexCur.clear(); + } + mStreams[stream].mIndexCur.add(device, index); + + // compute and apply stream volume on all outputs according to connected device + status_t status = NO_ERROR; + for (size_t i = 0; i < mOutputs.size(); i++) { + audio_devices_t curDevice = + getDeviceForVolume(mOutputs.valueAt(i)->device()); + if ((device == AUDIO_DEVICE_OUT_DEFAULT) || (device == curDevice)) { + status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice); + if (volStatus != NO_ERROR) { + status = volStatus; + } + } + } + return status; +} + +status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) +{ + if (index == NULL) { + return BAD_VALUE; + } + if (!audio_is_output_device(device)) { + return BAD_VALUE; + } + // if device is AUDIO_DEVICE_OUT_DEFAULT, return volume for device corresponding to + // the strategy the stream belongs to. + if (device == AUDIO_DEVICE_OUT_DEFAULT) { + device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/); + } + device = getDeviceForVolume(device); + + *index = mStreams[stream].getVolumeIndex(device); + ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index); + return NO_ERROR; +} + +audio_io_handle_t AudioPolicyManager::selectOutputForEffects( + const SortedVector<audio_io_handle_t>& outputs) +{ + // select one output among several suitable for global effects. + // The priority is as follows: + // 1: An offloaded output. If the effect ends up not being offloadable, + // AudioFlinger will invalidate the track and the offloaded output + // will be closed causing the effect to be moved to a PCM output. + // 2: A deep buffer output + // 3: the first output in the list + + if (outputs.size() == 0) { + return 0; + } + + audio_io_handle_t outputOffloaded = 0; + audio_io_handle_t outputDeepBuffer = 0; + + for (size_t i = 0; i < outputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueFor(outputs[i]); + ALOGV("selectOutputForEffects outputs[%d] flags %x", i, desc->mFlags); + if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { + outputOffloaded = outputs[i]; + } + if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) { + outputDeepBuffer = outputs[i]; + } + } + + ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d", + outputOffloaded, outputDeepBuffer); + if (outputOffloaded != 0) { + return outputOffloaded; + } + if (outputDeepBuffer != 0) { + return outputDeepBuffer; + } + + return outputs[0]; +} + +audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc) +{ + // apply simple rule where global effects are attached to the same output as MUSIC streams + + routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC); + audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/); + SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs); + + audio_io_handle_t output = selectOutputForEffects(dstOutputs); + ALOGV("getOutputForEffect() got output %d for fx %s flags %x", + output, (desc == NULL) ? "unspecified" : desc->name, (desc == NULL) ? 0 : desc->flags); + + return output; +} + +status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc, + audio_io_handle_t io, + uint32_t strategy, + int session, + int id) +{ + ssize_t index = mOutputs.indexOfKey(io); + if (index < 0) { + index = mInputs.indexOfKey(io); + if (index < 0) { + ALOGW("registerEffect() unknown io %d", io); + return INVALID_OPERATION; + } + } + + if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) { + ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB", + desc->name, desc->memoryUsage); + return INVALID_OPERATION; + } + mTotalEffectsMemory += desc->memoryUsage; + ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d", + desc->name, io, strategy, session, id); + ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory); + + EffectDescriptor *pDesc = new EffectDescriptor(); + memcpy (&pDesc->mDesc, desc, sizeof(effect_descriptor_t)); + pDesc->mIo = io; + pDesc->mStrategy = (routing_strategy)strategy; + pDesc->mSession = session; + pDesc->mEnabled = false; + + mEffects.add(id, pDesc); + + return NO_ERROR; +} + +status_t AudioPolicyManager::unregisterEffect(int id) +{ + ssize_t index = mEffects.indexOfKey(id); + if (index < 0) { + ALOGW("unregisterEffect() unknown effect ID %d", id); + return INVALID_OPERATION; + } + + EffectDescriptor *pDesc = mEffects.valueAt(index); + + setEffectEnabled(pDesc, false); + + if (mTotalEffectsMemory < pDesc->mDesc.memoryUsage) { + ALOGW("unregisterEffect() memory %d too big for total %d", + pDesc->mDesc.memoryUsage, mTotalEffectsMemory); + pDesc->mDesc.memoryUsage = mTotalEffectsMemory; + } + mTotalEffectsMemory -= pDesc->mDesc.memoryUsage; + ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d", + pDesc->mDesc.name, id, pDesc->mDesc.memoryUsage, mTotalEffectsMemory); + + mEffects.removeItem(id); + delete pDesc; + + return NO_ERROR; +} + +status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled) +{ + ssize_t index = mEffects.indexOfKey(id); + if (index < 0) { + ALOGW("unregisterEffect() unknown effect ID %d", id); + return INVALID_OPERATION; + } + + return setEffectEnabled(mEffects.valueAt(index), enabled); +} + +status_t AudioPolicyManager::setEffectEnabled(EffectDescriptor *pDesc, bool enabled) +{ + if (enabled == pDesc->mEnabled) { + ALOGV("setEffectEnabled(%s) effect already %s", + enabled?"true":"false", enabled?"enabled":"disabled"); + return INVALID_OPERATION; + } + + if (enabled) { + if (mTotalEffectsCpuLoad + pDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) { + ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS", + pDesc->mDesc.name, (float)pDesc->mDesc.cpuLoad/10); + return INVALID_OPERATION; + } + mTotalEffectsCpuLoad += pDesc->mDesc.cpuLoad; + ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad); + } else { + if (mTotalEffectsCpuLoad < pDesc->mDesc.cpuLoad) { + ALOGW("setEffectEnabled(false) CPU load %d too high for total %d", + pDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad); + pDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad; + } + mTotalEffectsCpuLoad -= pDesc->mDesc.cpuLoad; + ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad); + } + pDesc->mEnabled = enabled; + return NO_ERROR; +} + +bool AudioPolicyManager::isNonOffloadableEffectEnabled() +{ + for (size_t i = 0; i < mEffects.size(); i++) { + const EffectDescriptor * const pDesc = mEffects.valueAt(i); + if (pDesc->mEnabled && (pDesc->mStrategy == STRATEGY_MEDIA) && + ((pDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) { + ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d", + pDesc->mDesc.name, pDesc->mSession); + return true; + } + } + return false; +} + +bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const +{ + nsecs_t sysTime = systemTime(); + for (size_t i = 0; i < mOutputs.size(); i++) { + const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) { + return true; + } + } + return false; +} + +bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, + uint32_t inPastMs) const +{ + nsecs_t sysTime = systemTime(); + for (size_t i = 0; i < mOutputs.size(); i++) { + const AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) && + outputDesc->isStreamActive(stream, inPastMs, sysTime)) { + return true; + } + } + return false; +} + +bool AudioPolicyManager::isSourceActive(audio_source_t source) const +{ + for (size_t i = 0; i < mInputs.size(); i++) { + const AudioInputDescriptor * inputDescriptor = mInputs.valueAt(i); + if ((inputDescriptor->mInputSource == (int)source || + (source == AUDIO_SOURCE_VOICE_RECOGNITION && + inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD)) + && (inputDescriptor->mRefCount > 0)) { + return true; + } + } + return false; +} + + +status_t AudioPolicyManager::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this); + result.append(buffer); + + snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput); + result.append(buffer); + snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState); + result.append(buffer); + snprintf(buffer, SIZE, " Force use for communications %d\n", + mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]); + result.append(buffer); + snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA]); + result.append(buffer); + snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD]); + result.append(buffer); + snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK]); + result.append(buffer); + snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]); + result.append(buffer); + + snprintf(buffer, SIZE, " Available output devices:\n"); + result.append(buffer); + write(fd, result.string(), result.size()); + DeviceDescriptor::dumpHeader(fd, 2); + for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) { + mAvailableOutputDevices[i]->dump(fd, 2); + } + snprintf(buffer, SIZE, "\n Available input devices:\n"); + write(fd, buffer, strlen(buffer)); + DeviceDescriptor::dumpHeader(fd, 2); + for (size_t i = 0; i < mAvailableInputDevices.size(); i++) { + mAvailableInputDevices[i]->dump(fd, 2); + } + + snprintf(buffer, SIZE, "\nHW Modules dump:\n"); + write(fd, buffer, strlen(buffer)); + for (size_t i = 0; i < mHwModules.size(); i++) { + snprintf(buffer, SIZE, "- HW Module %d:\n", i + 1); + write(fd, buffer, strlen(buffer)); + mHwModules[i]->dump(fd); + } + + snprintf(buffer, SIZE, "\nOutputs dump:\n"); + write(fd, buffer, strlen(buffer)); + for (size_t i = 0; i < mOutputs.size(); i++) { + snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i)); + write(fd, buffer, strlen(buffer)); + mOutputs.valueAt(i)->dump(fd); + } + + snprintf(buffer, SIZE, "\nInputs dump:\n"); + write(fd, buffer, strlen(buffer)); + for (size_t i = 0; i < mInputs.size(); i++) { + snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i)); + write(fd, buffer, strlen(buffer)); + mInputs.valueAt(i)->dump(fd); + } + + snprintf(buffer, SIZE, "\nStreams dump:\n"); + write(fd, buffer, strlen(buffer)); + snprintf(buffer, SIZE, + " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n"); + write(fd, buffer, strlen(buffer)); + for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + snprintf(buffer, SIZE, " %02d ", i); + write(fd, buffer, strlen(buffer)); + mStreams[i].dump(fd); + } + + snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n", + (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory); + write(fd, buffer, strlen(buffer)); + + snprintf(buffer, SIZE, "Registered effects:\n"); + write(fd, buffer, strlen(buffer)); + for (size_t i = 0; i < mEffects.size(); i++) { + snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i)); + write(fd, buffer, strlen(buffer)); + mEffects.valueAt(i)->dump(fd); + } + + + return NO_ERROR; +} + +// This function checks for the parameters which can be offloaded. +// This can be enhanced depending on the capability of the DSP and policy +// of the system. +bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo) +{ + ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d," + " BitRate=%u, duration=%lld us, has_video=%d", + offloadInfo.sample_rate, offloadInfo.channel_mask, + offloadInfo.format, + offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us, + offloadInfo.has_video); + + // Check if offload has been disabled + char propValue[PROPERTY_VALUE_MAX]; + if (property_get("audio.offload.disable", propValue, "0")) { + if (atoi(propValue) != 0) { + ALOGV("offload disabled by audio.offload.disable=%s", propValue ); + return false; + } + } + + // Check if stream type is music, then only allow offload as of now. + if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC) + { + ALOGV("isOffloadSupported: stream_type != MUSIC, returning false"); + return false; + } + + //TODO: enable audio offloading with video when ready + if (offloadInfo.has_video) + { + ALOGV("isOffloadSupported: has_video == true, returning false"); + return false; + } + + //If duration is less than minimum value defined in property, return false + if (property_get("audio.offload.min.duration.secs", propValue, NULL)) { + if (offloadInfo.duration_us < (atoi(propValue) * 1000000 )) { + ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%s)", propValue); + return false; + } + } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) { + ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS); + return false; + } + + // Do not allow offloading if one non offloadable effect is enabled. This prevents from + // creating an offloaded track and tearing it down immediately after start when audioflinger + // detects there is an active non offloadable effect. + // FIXME: We should check the audio session here but we do not have it in this context. + // This may prevent offloading in rare situations where effects are left active by apps + // in the background. + if (isNonOffloadableEffectEnabled()) { + return false; + } + + // See if there is a profile to support this. + // AUDIO_DEVICE_NONE + IOProfile *profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */, + offloadInfo.sample_rate, + offloadInfo.format, + offloadInfo.channel_mask, + AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); + ALOGV("isOffloadSupported() profile %sfound", profile != NULL ? "" : "NOT "); + return (profile != NULL); +} + +// ---------------------------------------------------------------------------- +// AudioPolicyManager +// ---------------------------------------------------------------------------- + +uint32_t AudioPolicyManager::nextUniqueId() +{ + return android_atomic_inc(&mNextUniqueId); +} + +AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface) + : +#ifdef AUDIO_POLICY_TEST + Thread(false), +#endif //AUDIO_POLICY_TEST + mPrimaryOutput((audio_io_handle_t)0), + mPhoneState(AUDIO_MODE_NORMAL), + mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f), + mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0), + mA2dpSuspended(false), + mSpeakerDrcEnabled(false), mNextUniqueId(0) +{ + mpClientInterface = clientInterface; + + for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) { + mForceUse[i] = AUDIO_POLICY_FORCE_NONE; + } + + mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER); + if (loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE) != NO_ERROR) { + if (loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE) != NO_ERROR) { + ALOGE("could not load audio policy configuration file, setting defaults"); + defaultAudioPolicyConfig(); + } + } + // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices + + // must be done after reading the policy + initializeVolumeCurves(); + + // open all output streams needed to access attached devices + audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types(); + audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN; + for (size_t i = 0; i < mHwModules.size(); i++) { + mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName); + if (mHwModules[i]->mHandle == 0) { + ALOGW("could not open HW module %s", mHwModules[i]->mName); + continue; + } + // open all output streams needed to access attached devices + // except for direct output streams that are only opened when they are actually + // required by an app. + // This also validates mAvailableOutputDevices list + for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) + { + const IOProfile *outProfile = mHwModules[i]->mOutputProfiles[j]; + + if (outProfile->mSupportedDevices.isEmpty()) { + ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName); + continue; + } + + audio_devices_t profileTypes = outProfile->mSupportedDevices.types(); + if ((profileTypes & outputDeviceTypes) && + ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) { + AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(outProfile); + + outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mType & profileTypes); + audio_io_handle_t output = mpClientInterface->openOutput( + outProfile->mModule->mHandle, + &outputDesc->mDevice, + &outputDesc->mSamplingRate, + &outputDesc->mFormat, + &outputDesc->mChannelMask, + &outputDesc->mLatency, + outputDesc->mFlags); + if (output == 0) { + ALOGW("Cannot open output stream for device %08x on hw module %s", + outputDesc->mDevice, + mHwModules[i]->mName); + delete outputDesc; + } else { + for (size_t i = 0; i < outProfile->mSupportedDevices.size(); i++) { + audio_devices_t type = outProfile->mSupportedDevices[i]->mType; + ssize_t index = + mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[i]); + // give a valid ID to an attached device once confirmed it is reachable + if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) { + mAvailableOutputDevices[index]->mId = nextUniqueId(); + } + } + if (mPrimaryOutput == 0 && + outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) { + mPrimaryOutput = output; + } + addOutput(output, outputDesc); + setOutputDevice(output, + outputDesc->mDevice, + true); + } + } + } + // open input streams needed to access attached devices to validate + // mAvailableInputDevices list + for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++) + { + const IOProfile *inProfile = mHwModules[i]->mInputProfiles[j]; + + if (inProfile->mSupportedDevices.isEmpty()) { + ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName); + continue; + } + + audio_devices_t profileTypes = inProfile->mSupportedDevices.types(); + if (profileTypes & inputDeviceTypes) { + AudioInputDescriptor *inputDesc = new AudioInputDescriptor(inProfile); + + inputDesc->mInputSource = AUDIO_SOURCE_MIC; + inputDesc->mDevice = inProfile->mSupportedDevices[0]->mType; + audio_io_handle_t input = mpClientInterface->openInput( + inProfile->mModule->mHandle, + &inputDesc->mDevice, + &inputDesc->mSamplingRate, + &inputDesc->mFormat, + &inputDesc->mChannelMask); + + if (input != 0) { + for (size_t i = 0; i < inProfile->mSupportedDevices.size(); i++) { + audio_devices_t type = inProfile->mSupportedDevices[i]->mType; + ssize_t index = + mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[i]); + // give a valid ID to an attached device once confirmed it is reachable + if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) { + mAvailableInputDevices[index]->mId = nextUniqueId(); + } + } + mpClientInterface->closeInput(input); + } else { + ALOGW("Cannot open input stream for device %08x on hw module %s", + inputDesc->mDevice, + mHwModules[i]->mName); + } + delete inputDesc; + } + } + } + // make sure all attached devices have been allocated a unique ID + for (size_t i = 0; i < mAvailableOutputDevices.size();) { + if (mAvailableOutputDevices[i]->mId == 0) { + ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->mType); + mAvailableOutputDevices.remove(mAvailableOutputDevices[i]); + continue; + } + i++; + } + for (size_t i = 0; i < mAvailableInputDevices.size();) { + if (mAvailableInputDevices[i]->mId == 0) { + ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->mType); + mAvailableInputDevices.remove(mAvailableInputDevices[i]); + continue; + } + i++; + } + // make sure default device is reachable + if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) { + ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->mType); + } + + ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output"); + + updateDevicesAndOutputs(); + +#ifdef AUDIO_POLICY_TEST + if (mPrimaryOutput != 0) { + AudioParameter outputCmd = AudioParameter(); + outputCmd.addInt(String8("set_id"), 0); + mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString()); + + mTestDevice = AUDIO_DEVICE_OUT_SPEAKER; + mTestSamplingRate = 44100; + mTestFormat = AUDIO_FORMAT_PCM_16_BIT; + mTestChannels = AUDIO_CHANNEL_OUT_STEREO; + mTestLatencyMs = 0; + mCurOutput = 0; + mDirectOutput = false; + for (int i = 0; i < NUM_TEST_OUTPUTS; i++) { + mTestOutputs[i] = 0; + } + + const size_t SIZE = 256; + char buffer[SIZE]; + snprintf(buffer, SIZE, "AudioPolicyManagerTest"); + run(buffer, ANDROID_PRIORITY_AUDIO); + } +#endif //AUDIO_POLICY_TEST +} + +AudioPolicyManager::~AudioPolicyManager() +{ +#ifdef AUDIO_POLICY_TEST + exit(); +#endif //AUDIO_POLICY_TEST + for (size_t i = 0; i < mOutputs.size(); i++) { + mpClientInterface->closeOutput(mOutputs.keyAt(i)); + delete mOutputs.valueAt(i); + } + for (size_t i = 0; i < mInputs.size(); i++) { + mpClientInterface->closeInput(mInputs.keyAt(i)); + delete mInputs.valueAt(i); + } + for (size_t i = 0; i < mHwModules.size(); i++) { + delete mHwModules[i]; + } + mAvailableOutputDevices.clear(); + mAvailableInputDevices.clear(); +} + +status_t AudioPolicyManager::initCheck() +{ + return (mPrimaryOutput == 0) ? NO_INIT : NO_ERROR; +} + +#ifdef AUDIO_POLICY_TEST +bool AudioPolicyManager::threadLoop() +{ + ALOGV("entering threadLoop()"); + while (!exitPending()) + { + String8 command; + int valueInt; + String8 value; + + Mutex::Autolock _l(mLock); + mWaitWorkCV.waitRelative(mLock, milliseconds(50)); + + command = mpClientInterface->getParameters(0, String8("test_cmd_policy")); + AudioParameter param = AudioParameter(command); + + if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR && + valueInt != 0) { + ALOGV("Test command %s received", command.string()); + String8 target; + if (param.get(String8("target"), target) != NO_ERROR) { + target = "Manager"; + } + if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) { + param.remove(String8("test_cmd_policy_output")); + mCurOutput = valueInt; + } + if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) { + param.remove(String8("test_cmd_policy_direct")); + if (value == "false") { + mDirectOutput = false; + } else if (value == "true") { + mDirectOutput = true; + } + } + if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) { + param.remove(String8("test_cmd_policy_input")); + mTestInput = valueInt; + } + + if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) { + param.remove(String8("test_cmd_policy_format")); + int format = AUDIO_FORMAT_INVALID; + if (value == "PCM 16 bits") { + format = AUDIO_FORMAT_PCM_16_BIT; + } else if (value == "PCM 8 bits") { + format = AUDIO_FORMAT_PCM_8_BIT; + } else if (value == "Compressed MP3") { + format = AUDIO_FORMAT_MP3; + } + if (format != AUDIO_FORMAT_INVALID) { + if (target == "Manager") { + mTestFormat = format; + } else if (mTestOutputs[mCurOutput] != 0) { + AudioParameter outputParam = AudioParameter(); + outputParam.addInt(String8("format"), format); + mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString()); + } + } + } + if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) { + param.remove(String8("test_cmd_policy_channels")); + int channels = 0; + + if (value == "Channels Stereo") { + channels = AUDIO_CHANNEL_OUT_STEREO; + } else if (value == "Channels Mono") { + channels = AUDIO_CHANNEL_OUT_MONO; + } + if (channels != 0) { + if (target == "Manager") { + mTestChannels = channels; + } else if (mTestOutputs[mCurOutput] != 0) { + AudioParameter outputParam = AudioParameter(); + outputParam.addInt(String8("channels"), channels); + mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString()); + } + } + } + if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) { + param.remove(String8("test_cmd_policy_sampleRate")); + if (valueInt >= 0 && valueInt <= 96000) { + int samplingRate = valueInt; + if (target == "Manager") { + mTestSamplingRate = samplingRate; + } else if (mTestOutputs[mCurOutput] != 0) { + AudioParameter outputParam = AudioParameter(); + outputParam.addInt(String8("sampling_rate"), samplingRate); + mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString()); + } + } + } + + if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) { + param.remove(String8("test_cmd_policy_reopen")); + + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput); + mpClientInterface->closeOutput(mPrimaryOutput); + + audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle; + + delete mOutputs.valueFor(mPrimaryOutput); + mOutputs.removeItem(mPrimaryOutput); + + AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(NULL); + outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER; + mPrimaryOutput = mpClientInterface->openOutput(moduleHandle, + &outputDesc->mDevice, + &outputDesc->mSamplingRate, + &outputDesc->mFormat, + &outputDesc->mChannelMask, + &outputDesc->mLatency, + outputDesc->mFlags); + if (mPrimaryOutput == 0) { + ALOGE("Failed to reopen hardware output stream, samplingRate: %d, format %d, channels %d", + outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask); + } else { + AudioParameter outputCmd = AudioParameter(); + outputCmd.addInt(String8("set_id"), 0); + mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString()); + addOutput(mPrimaryOutput, outputDesc); + } + } + + + mpClientInterface->setParameters(0, String8("test_cmd_policy=")); + } + } + return false; +} + +void AudioPolicyManager::exit() +{ + { + AutoMutex _l(mLock); + requestExit(); + mWaitWorkCV.signal(); + } + requestExitAndWait(); +} + +int AudioPolicyManager::testOutputIndex(audio_io_handle_t output) +{ + for (int i = 0; i < NUM_TEST_OUTPUTS; i++) { + if (output == mTestOutputs[i]) return i; + } + return 0; +} +#endif //AUDIO_POLICY_TEST + +// --- + +void AudioPolicyManager::addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc) +{ + outputDesc->mId = id; + mOutputs.add(id, outputDesc); +} + + +String8 AudioPolicyManager::addressToParameter(audio_devices_t device, const String8 address) +{ + if (device & AUDIO_DEVICE_OUT_ALL_A2DP) { + return String8("a2dp_sink_address=")+address; + } + return address; +} + +status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device, + audio_policy_dev_state_t state, + SortedVector<audio_io_handle_t>& outputs, + const String8 address) +{ + AudioOutputDescriptor *desc; + + if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) { + // first list already open outputs that can be routed to this device + for (size_t i = 0; i < mOutputs.size(); i++) { + desc = mOutputs.valueAt(i); + if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) { + ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i)); + outputs.add(mOutputs.keyAt(i)); + } + } + // then look for output profiles that can be routed to this device + SortedVector<IOProfile *> profiles; + for (size_t i = 0; i < mHwModules.size(); i++) + { + if (mHwModules[i]->mHandle == 0) { + continue; + } + for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) + { + if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices.types() & device) { + ALOGV("checkOutputsForDevice(): adding profile %d from module %d", j, i); + profiles.add(mHwModules[i]->mOutputProfiles[j]); + } + } + } + + if (profiles.isEmpty() && outputs.isEmpty()) { + ALOGW("checkOutputsForDevice(): No output available for device %04x", device); + return BAD_VALUE; + } + + // open outputs for matching profiles if needed. Direct outputs are also opened to + // query for dynamic parameters and will be closed later by setDeviceConnectionState() + for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) { + IOProfile *profile = profiles[profile_index]; + + // nothing to do if one output is already opened for this profile + size_t j; + for (j = 0; j < mOutputs.size(); j++) { + desc = mOutputs.valueAt(j); + if (!desc->isDuplicated() && desc->mProfile == profile) { + break; + } + } + if (j != mOutputs.size()) { + continue; + } + + ALOGV("opening output for device %08x with params %s", device, address.string()); + desc = new AudioOutputDescriptor(profile); + desc->mDevice = device; + audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; + offloadInfo.sample_rate = desc->mSamplingRate; + offloadInfo.format = desc->mFormat; + offloadInfo.channel_mask = desc->mChannelMask; + + audio_io_handle_t output = mpClientInterface->openOutput(profile->mModule->mHandle, + &desc->mDevice, + &desc->mSamplingRate, + &desc->mFormat, + &desc->mChannelMask, + &desc->mLatency, + desc->mFlags, + &offloadInfo); + if (output != 0) { + if (!address.isEmpty()) { + mpClientInterface->setParameters(output, addressToParameter(device, address)); + } + + if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { + String8 reply; + char *value; + if (profile->mSamplingRates[0] == 0) { + reply = mpClientInterface->getParameters(output, + String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)); + ALOGV("checkOutputsForDevice() direct output sup sampling rates %s", + reply.string()); + value = strpbrk((char *)reply.string(), "="); + if (value != NULL) { + loadSamplingRates(value + 1, profile); + } + } + if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) { + reply = mpClientInterface->getParameters(output, + String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS)); + ALOGV("checkOutputsForDevice() direct output sup formats %s", + reply.string()); + value = strpbrk((char *)reply.string(), "="); + if (value != NULL) { + loadFormats(value + 1, profile); + } + } + if (profile->mChannelMasks[0] == 0) { + reply = mpClientInterface->getParameters(output, + String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS)); + ALOGV("checkOutputsForDevice() direct output sup channel masks %s", + reply.string()); + value = strpbrk((char *)reply.string(), "="); + if (value != NULL) { + loadOutChannels(value + 1, profile); + } + } + if (((profile->mSamplingRates[0] == 0) && + (profile->mSamplingRates.size() < 2)) || + ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) && + (profile->mFormats.size() < 2)) || + ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) && + (profile->mChannelMasks.size() < 2))) { + ALOGW("checkOutputsForDevice() direct output missing param"); + mpClientInterface->closeOutput(output); + output = 0; + } else { + addOutput(output, desc); + } + } else { + audio_io_handle_t duplicatedOutput = 0; + // add output descriptor + addOutput(output, desc); + // set initial stream volume for device + applyStreamVolumes(output, device, 0, true); + + //TODO: configure audio effect output stage here + + // open a duplicating output thread for the new output and the primary output + duplicatedOutput = mpClientInterface->openDuplicateOutput(output, + mPrimaryOutput); + if (duplicatedOutput != 0) { + // add duplicated output descriptor + AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor(NULL); + dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput); + dupOutputDesc->mOutput2 = mOutputs.valueFor(output); + dupOutputDesc->mSamplingRate = desc->mSamplingRate; + dupOutputDesc->mFormat = desc->mFormat; + dupOutputDesc->mChannelMask = desc->mChannelMask; + dupOutputDesc->mLatency = desc->mLatency; + addOutput(duplicatedOutput, dupOutputDesc); + applyStreamVolumes(duplicatedOutput, device, 0, true); + } else { + ALOGW("checkOutputsForDevice() could not open dup output for %d and %d", + mPrimaryOutput, output); + mpClientInterface->closeOutput(output); + mOutputs.removeItem(output); + output = 0; + } + } + } + if (output == 0) { + ALOGW("checkOutputsForDevice() could not open output for device %x", device); + delete desc; + profiles.removeAt(profile_index); + profile_index--; + } else { + outputs.add(output); + ALOGV("checkOutputsForDevice(): adding output %d", output); + } + } + + if (profiles.isEmpty()) { + ALOGW("checkOutputsForDevice(): No output available for device %04x", device); + return BAD_VALUE; + } + } else { + // check if one opened output is not needed any more after disconnecting one device + for (size_t i = 0; i < mOutputs.size(); i++) { + desc = mOutputs.valueAt(i); + if (!desc->isDuplicated() && + !(desc->mProfile->mSupportedDevices.types() & + mAvailableOutputDevices.types())) { + ALOGV("checkOutputsForDevice(): disconnecting adding output %d", mOutputs.keyAt(i)); + outputs.add(mOutputs.keyAt(i)); + } + } + for (size_t i = 0; i < mHwModules.size(); i++) + { + if (mHwModules[i]->mHandle == 0) { + continue; + } + for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) + { + IOProfile *profile = mHwModules[i]->mOutputProfiles[j]; + if ((profile->mSupportedDevices.types() & device) && + (profile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) { + ALOGV("checkOutputsForDevice(): clearing direct output profile %d on module %d", + j, i); + if (profile->mSamplingRates[0] == 0) { + profile->mSamplingRates.clear(); + profile->mSamplingRates.add(0); + } + if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) { + profile->mFormats.clear(); + profile->mFormats.add(AUDIO_FORMAT_DEFAULT); + } + if (profile->mChannelMasks[0] == 0) { + profile->mChannelMasks.clear(); + profile->mChannelMasks.add(0); + } + } + } + } + } + return NO_ERROR; +} + +void AudioPolicyManager::closeOutput(audio_io_handle_t output) +{ + ALOGV("closeOutput(%d)", output); + + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + if (outputDesc == NULL) { + ALOGW("closeOutput() unknown output %d", output); + return; + } + + // look for duplicated outputs connected to the output being removed. + for (size_t i = 0; i < mOutputs.size(); i++) { + AudioOutputDescriptor *dupOutputDesc = mOutputs.valueAt(i); + if (dupOutputDesc->isDuplicated() && + (dupOutputDesc->mOutput1 == outputDesc || + dupOutputDesc->mOutput2 == outputDesc)) { + AudioOutputDescriptor *outputDesc2; + if (dupOutputDesc->mOutput1 == outputDesc) { + outputDesc2 = dupOutputDesc->mOutput2; + } else { + outputDesc2 = dupOutputDesc->mOutput1; + } + // As all active tracks on duplicated output will be deleted, + // and as they were also referenced on the other output, the reference + // count for their stream type must be adjusted accordingly on + // the other output. + for (int j = 0; j < AUDIO_STREAM_CNT; j++) { + int refCount = dupOutputDesc->mRefCount[j]; + outputDesc2->changeRefCount((audio_stream_type_t)j,-refCount); + } + audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i); + ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput); + + mpClientInterface->closeOutput(duplicatedOutput); + delete mOutputs.valueFor(duplicatedOutput); + mOutputs.removeItem(duplicatedOutput); + } + } + + AudioParameter param; + param.add(String8("closing"), String8("true")); + mpClientInterface->setParameters(output, param.toString()); + + mpClientInterface->closeOutput(output); + delete outputDesc; + mOutputs.removeItem(output); + mPreviousOutputs = mOutputs; +} + +SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device, + DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs) +{ + SortedVector<audio_io_handle_t> outputs; + + ALOGVV("getOutputsForDevice() device %04x", device); + for (size_t i = 0; i < openOutputs.size(); i++) { + ALOGVV("output %d isDuplicated=%d device=%04x", + i, openOutputs.valueAt(i)->isDuplicated(), openOutputs.valueAt(i)->supportedDevices()); + if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) { + ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i)); + outputs.add(openOutputs.keyAt(i)); + } + } + return outputs; +} + +bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1, + SortedVector<audio_io_handle_t>& outputs2) +{ + if (outputs1.size() != outputs2.size()) { + return false; + } + for (size_t i = 0; i < outputs1.size(); i++) { + if (outputs1[i] != outputs2[i]) { + return false; + } + } + return true; +} + +void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy) +{ + audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/); + audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/); + SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs); + SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs); + + if (!vectorsEqual(srcOutputs,dstOutputs)) { + ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d", + strategy, srcOutputs[0], dstOutputs[0]); + // mute strategy while moving tracks from one output to another + for (size_t i = 0; i < srcOutputs.size(); i++) { + AudioOutputDescriptor *desc = mOutputs.valueFor(srcOutputs[i]); + if (desc->isStrategyActive(strategy)) { + setStrategyMute(strategy, true, srcOutputs[i]); + setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice); + } + } + + // Move effects associated to this strategy from previous output to new output + if (strategy == STRATEGY_MEDIA) { + audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs); + SortedVector<audio_io_handle_t> moved; + for (size_t i = 0; i < mEffects.size(); i++) { + EffectDescriptor *desc = mEffects.valueAt(i); + if (desc->mSession == AUDIO_SESSION_OUTPUT_MIX && + desc->mIo != fxOutput) { + if (moved.indexOf(desc->mIo) < 0) { + ALOGV("checkOutputForStrategy() moving effect %d to output %d", + mEffects.keyAt(i), fxOutput); + mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, desc->mIo, + fxOutput); + moved.add(desc->mIo); + } + desc->mIo = fxOutput; + } + } + } + // Move tracks associated to this strategy from previous output to new output + for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + if (getStrategy((audio_stream_type_t)i) == strategy) { + mpClientInterface->invalidateStream((audio_stream_type_t)i); + } + } + } +} + +void AudioPolicyManager::checkOutputForAllStrategies() +{ + checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE); + checkOutputForStrategy(STRATEGY_PHONE); + checkOutputForStrategy(STRATEGY_SONIFICATION); + checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL); + checkOutputForStrategy(STRATEGY_MEDIA); + checkOutputForStrategy(STRATEGY_DTMF); +} + +audio_io_handle_t AudioPolicyManager::getA2dpOutput() +{ + for (size_t i = 0; i < mOutputs.size(); i++) { + AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i); + if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) { + return mOutputs.keyAt(i); + } + } + + return 0; +} + +void AudioPolicyManager::checkA2dpSuspend() +{ + audio_io_handle_t a2dpOutput = getA2dpOutput(); + if (a2dpOutput == 0) { + mA2dpSuspended = false; + return; + } + + bool isScoConnected = + (mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) != 0; + // suspend A2DP output if: + // (NOT already suspended) && + // ((SCO device is connected && + // (forced usage for communication || for record is SCO))) || + // (phone state is ringing || in call) + // + // restore A2DP output if: + // (Already suspended) && + // ((SCO device is NOT connected || + // (forced usage NOT for communication && NOT for record is SCO))) && + // (phone state is NOT ringing && NOT in call) + // + if (mA2dpSuspended) { + if ((!isScoConnected || + ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO) && + (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] != AUDIO_POLICY_FORCE_BT_SCO))) && + ((mPhoneState != AUDIO_MODE_IN_CALL) && + (mPhoneState != AUDIO_MODE_RINGTONE))) { + + mpClientInterface->restoreOutput(a2dpOutput); + mA2dpSuspended = false; + } + } else { + if ((isScoConnected && + ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) || + (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO))) || + ((mPhoneState == AUDIO_MODE_IN_CALL) || + (mPhoneState == AUDIO_MODE_RINGTONE))) { + + mpClientInterface->suspendOutput(a2dpOutput); + mA2dpSuspended = true; + } + } +} + +audio_devices_t AudioPolicyManager::getNewDevice(audio_io_handle_t output, bool fromCache) +{ + audio_devices_t device = AUDIO_DEVICE_NONE; + + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + // check the following by order of priority to request a routing change if necessary: + // 1: the strategy enforced audible is active on the output: + // use device for strategy enforced audible + // 2: we are in call or the strategy phone is active on the output: + // use device for strategy phone + // 3: the strategy sonification is active on the output: + // use device for strategy sonification + // 4: the strategy "respectful" sonification is active on the output: + // use device for strategy "respectful" sonification + // 5: the strategy media is active on the output: + // use device for strategy media + // 6: the strategy DTMF is active on the output: + // use device for strategy DTMF + if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE)) { + device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache); + } else if (isInCall() || + outputDesc->isStrategyActive(STRATEGY_PHONE)) { + device = getDeviceForStrategy(STRATEGY_PHONE, fromCache); + } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION)) { + device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache); + } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION_RESPECTFUL)) { + device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache); + } else if (outputDesc->isStrategyActive(STRATEGY_MEDIA)) { + device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache); + } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) { + device = getDeviceForStrategy(STRATEGY_DTMF, fromCache); + } + + ALOGV("getNewDevice() selected device %x", device); + return device; +} + +uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) { + return (uint32_t)getStrategy(stream); +} + +audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) { + audio_devices_t devices; + // By checking the range of stream before calling getStrategy, we avoid + // getStrategy's behavior for invalid streams. getStrategy would do a ALOGE + // and then return STRATEGY_MEDIA, but we want to return the empty set. + if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_CNT) { + devices = AUDIO_DEVICE_NONE; + } else { + AudioPolicyManager::routing_strategy strategy = getStrategy(stream); + devices = getDeviceForStrategy(strategy, true /*fromCache*/); + } + return devices; +} + +AudioPolicyManager::routing_strategy AudioPolicyManager::getStrategy( + audio_stream_type_t stream) { + // stream to strategy mapping + switch (stream) { + case AUDIO_STREAM_VOICE_CALL: + case AUDIO_STREAM_BLUETOOTH_SCO: + return STRATEGY_PHONE; + case AUDIO_STREAM_RING: + case AUDIO_STREAM_ALARM: + return STRATEGY_SONIFICATION; + case AUDIO_STREAM_NOTIFICATION: + return STRATEGY_SONIFICATION_RESPECTFUL; + case AUDIO_STREAM_DTMF: + return STRATEGY_DTMF; + default: + ALOGE("unknown stream type"); + case AUDIO_STREAM_SYSTEM: + // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs + // while key clicks are played produces a poor result + case AUDIO_STREAM_TTS: + case AUDIO_STREAM_MUSIC: + return STRATEGY_MEDIA; + case AUDIO_STREAM_ENFORCED_AUDIBLE: + return STRATEGY_ENFORCED_AUDIBLE; + } +} + +void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) { + switch(stream) { + case AUDIO_STREAM_MUSIC: + checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL); + updateDevicesAndOutputs(); + break; + default: + break; + } +} + +audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy, + bool fromCache) +{ + uint32_t device = AUDIO_DEVICE_NONE; + + if (fromCache) { + ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x", + strategy, mDeviceForStrategy[strategy]); + return mDeviceForStrategy[strategy]; + } + audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types(); + switch (strategy) { + + case STRATEGY_SONIFICATION_RESPECTFUL: + if (isInCall()) { + device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/); + } else if (isStreamActiveRemotely(AUDIO_STREAM_MUSIC, + SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) { + // while media is playing on a remote device, use the the sonification behavior. + // Note that we test this usecase before testing if media is playing because + // the isStreamActive() method only informs about the activity of a stream, not + // if it's for local playback. Note also that we use the same delay between both tests + device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/); + } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) { + // while media is playing (or has recently played), use the same device + device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/); + } else { + // when media is not playing anymore, fall back on the sonification behavior + device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/); + } + + break; + + case STRATEGY_DTMF: + if (!isInCall()) { + // when off call, DTMF strategy follows the same rules as MEDIA strategy + device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/); + break; + } + // when in call, DTMF and PHONE strategies follow the same rules + // FALL THROUGH + + case STRATEGY_PHONE: + // for phone strategy, we first consider the forced use and then the available devices by order + // of priority + switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) { + case AUDIO_POLICY_FORCE_BT_SCO: + if (!isInCall() || strategy != STRATEGY_DTMF) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT; + if (device) break; + } + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO; + if (device) break; + // if SCO device is requested but no SCO device is available, fall back to default case + // FALL THROUGH + + default: // FORCE_NONE + // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP + if (!isInCall() && + (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) && + (getA2dpOutput() != 0) && !mA2dpSuspended) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES; + if (device) break; + } + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET; + if (device) break; + if (mPhoneState != AUDIO_MODE_IN_CALL) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET; + if (device) break; + } + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_EARPIECE; + if (device) break; + device = mDefaultOutputDevice->mType; + if (device == AUDIO_DEVICE_NONE) { + ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE"); + } + break; + + case AUDIO_POLICY_FORCE_SPEAKER: + // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to + // A2DP speaker when forcing to speaker output + if (!isInCall() && + (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) && + (getA2dpOutput() != 0) && !mA2dpSuspended) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER; + if (device) break; + } + if (mPhoneState != AUDIO_MODE_IN_CALL) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL; + if (device) break; + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET; + if (device) break; + } + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER; + if (device) break; + device = mDefaultOutputDevice->mType; + if (device == AUDIO_DEVICE_NONE) { + ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER"); + } + break; + } + break; + + case STRATEGY_SONIFICATION: + + // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by + // handleIncallSonification(). + if (isInCall()) { + device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/); + break; + } + // FALL THROUGH + + case STRATEGY_ENFORCED_AUDIBLE: + // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION + // except: + // - when in call where it doesn't default to STRATEGY_PHONE behavior + // - in countries where not enforced in which case it follows STRATEGY_MEDIA + + if ((strategy == STRATEGY_SONIFICATION) || + (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) { + device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER; + if (device == AUDIO_DEVICE_NONE) { + ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION"); + } + } + // The second device used for sonification is the same as the device used by media strategy + // FALL THROUGH + + case STRATEGY_MEDIA: { + uint32_t device2 = AUDIO_DEVICE_NONE; + if (strategy != STRATEGY_SONIFICATION) { + // no sonification on remote submix (e.g. WFD) + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX; + } + if ((device2 == AUDIO_DEVICE_NONE) && + (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) && + (getA2dpOutput() != 0) && !mA2dpSuspended) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP; + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER; + } + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET; + } + if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) { + // no sonification on aux digital (e.g. HDMI) + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL; + } + if ((device2 == AUDIO_DEVICE_NONE) && + (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET; + } + if (device2 == AUDIO_DEVICE_NONE) { + device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER; + } + + // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or + // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise + device |= device2; + if (device) break; + device = mDefaultOutputDevice->mType; + if (device == AUDIO_DEVICE_NONE) { + ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA"); + } + } break; + + default: + ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy); + break; + } + + ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device); + return device; +} + +void AudioPolicyManager::updateDevicesAndOutputs() +{ + for (int i = 0; i < NUM_STRATEGIES; i++) { + mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/); + } + mPreviousOutputs = mOutputs; +} + +uint32_t AudioPolicyManager::checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc, + audio_devices_t prevDevice, + uint32_t delayMs) +{ + // mute/unmute strategies using an incompatible device combination + // if muting, wait for the audio in pcm buffer to be drained before proceeding + // if unmuting, unmute only after the specified delay + if (outputDesc->isDuplicated()) { + return 0; + } + + uint32_t muteWaitMs = 0; + audio_devices_t device = outputDesc->device(); + bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2); + // temporary mute output if device selection changes to avoid volume bursts due to + // different per device volumes + bool tempMute = outputDesc->isActive() && (device != prevDevice); + + for (size_t i = 0; i < NUM_STRATEGIES; i++) { + audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/); + bool mute = shouldMute && (curDevice & device) && (curDevice != device); + bool doMute = false; + + if (mute && !outputDesc->mStrategyMutedByDevice[i]) { + doMute = true; + outputDesc->mStrategyMutedByDevice[i] = true; + } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){ + doMute = true; + outputDesc->mStrategyMutedByDevice[i] = false; + } + if (doMute || tempMute) { + for (size_t j = 0; j < mOutputs.size(); j++) { + AudioOutputDescriptor *desc = mOutputs.valueAt(j); + // skip output if it does not share any device with current output + if ((desc->supportedDevices() & outputDesc->supportedDevices()) + == AUDIO_DEVICE_NONE) { + continue; + } + audio_io_handle_t curOutput = mOutputs.keyAt(j); + ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x) on output %d", + mute ? "muting" : "unmuting", i, curDevice, curOutput); + setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs); + if (desc->isStrategyActive((routing_strategy)i)) { + // do tempMute only for current output + if (tempMute && (desc == outputDesc)) { + setStrategyMute((routing_strategy)i, true, curOutput); + setStrategyMute((routing_strategy)i, false, curOutput, + desc->latency() * 2, device); + } + if ((tempMute && (desc == outputDesc)) || mute) { + if (muteWaitMs < desc->latency()) { + muteWaitMs = desc->latency(); + } + } + } + } + } + } + + // FIXME: should not need to double latency if volume could be applied immediately by the + // audioflinger mixer. We must account for the delay between now and the next time + // the audioflinger thread for this output will process a buffer (which corresponds to + // one buffer size, usually 1/2 or 1/4 of the latency). + muteWaitMs *= 2; + // wait for the PCM output buffers to empty before proceeding with the rest of the command + if (muteWaitMs > delayMs) { + muteWaitMs -= delayMs; + usleep(muteWaitMs * 1000); + return muteWaitMs; + } + return 0; +} + +uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output, + audio_devices_t device, + bool force, + int delayMs) +{ + ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs); + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + AudioParameter param; + uint32_t muteWaitMs; + + if (outputDesc->isDuplicated()) { + muteWaitMs = setOutputDevice(outputDesc->mOutput1->mId, device, force, delayMs); + muteWaitMs += setOutputDevice(outputDesc->mOutput2->mId, device, force, delayMs); + return muteWaitMs; + } + // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current + // output profile + if ((device != AUDIO_DEVICE_NONE) && + ((device & outputDesc->mProfile->mSupportedDevices.types()) == 0)) { + return 0; + } + + // filter devices according to output selected + device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices.types()); + + audio_devices_t prevDevice = outputDesc->mDevice; + + ALOGV("setOutputDevice() prevDevice %04x", prevDevice); + + if (device != AUDIO_DEVICE_NONE) { + outputDesc->mDevice = device; + } + muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs); + + // Do not change the routing if: + // - the requested device is AUDIO_DEVICE_NONE + // - the requested device is the same as current device and force is not specified. + // Doing this check here allows the caller to call setOutputDevice() without conditions + if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force) { + ALOGV("setOutputDevice() setting same device %04x or null device for output %d", device, output); + return muteWaitMs; + } + + ALOGV("setOutputDevice() changing device"); + // do the routing + param.addInt(String8(AudioParameter::keyRouting), (int)device); + mpClientInterface->setParameters(output, param.toString(), delayMs); + + // update stream volumes according to new device + applyStreamVolumes(output, device, delayMs); + + return muteWaitMs; +} + +AudioPolicyManager::IOProfile *AudioPolicyManager::getInputProfile(audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask) +{ + // Choose an input profile based on the requested capture parameters: select the first available + // profile supporting all requested parameters. + + for (size_t i = 0; i < mHwModules.size(); i++) + { + if (mHwModules[i]->mHandle == 0) { + continue; + } + for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++) + { + IOProfile *profile = mHwModules[i]->mInputProfiles[j]; + if (profile->isCompatibleProfile(device, samplingRate, format, + channelMask, AUDIO_OUTPUT_FLAG_NONE)) { + return profile; + } + } + } + return NULL; +} + +audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource) +{ + uint32_t device = AUDIO_DEVICE_NONE; + audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() & + ~AUDIO_DEVICE_BIT_IN; + switch (inputSource) { + case AUDIO_SOURCE_VOICE_UPLINK: + if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) { + device = AUDIO_DEVICE_IN_VOICE_CALL; + break; + } + // FALL THROUGH + + case AUDIO_SOURCE_DEFAULT: + case AUDIO_SOURCE_MIC: + case AUDIO_SOURCE_VOICE_RECOGNITION: + case AUDIO_SOURCE_HOTWORD: + case AUDIO_SOURCE_VOICE_COMMUNICATION: + if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO && + availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) { + device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET; + } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) { + device = AUDIO_DEVICE_IN_WIRED_HEADSET; + } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) { + device = AUDIO_DEVICE_IN_BUILTIN_MIC; + } + break; + case AUDIO_SOURCE_CAMCORDER: + if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) { + device = AUDIO_DEVICE_IN_BACK_MIC; + } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) { + device = AUDIO_DEVICE_IN_BUILTIN_MIC; + } + break; + case AUDIO_SOURCE_VOICE_DOWNLINK: + case AUDIO_SOURCE_VOICE_CALL: + if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) { + device = AUDIO_DEVICE_IN_VOICE_CALL; + } + break; + case AUDIO_SOURCE_REMOTE_SUBMIX: + if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) { + device = AUDIO_DEVICE_IN_REMOTE_SUBMIX; + } + break; + default: + ALOGW("getDeviceForInputSource() invalid input source %d", inputSource); + break; + } + ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device); + return device; +} + +bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device) +{ + if ((device & AUDIO_DEVICE_BIT_IN) != 0) { + device &= ~AUDIO_DEVICE_BIT_IN; + if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0)) + return true; + } + return false; +} + +audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs) +{ + for (size_t i = 0; i < mInputs.size(); i++) { + const AudioInputDescriptor * input_descriptor = mInputs.valueAt(i); + if ((input_descriptor->mRefCount > 0) + && (!ignoreVirtualInputs || !isVirtualInputDevice(input_descriptor->mDevice))) { + return mInputs.keyAt(i); + } + } + return 0; +} + + +audio_devices_t AudioPolicyManager::getDeviceForVolume(audio_devices_t device) +{ + if (device == AUDIO_DEVICE_NONE) { + // this happens when forcing a route update and no track is active on an output. + // In this case the returned category is not important. + device = AUDIO_DEVICE_OUT_SPEAKER; + } else if (popcount(device) > 1) { + // Multiple device selection is either: + // - speaker + one other device: give priority to speaker in this case. + // - one A2DP device + another device: happens with duplicated output. In this case + // retain the device on the A2DP output as the other must not correspond to an active + // selection if not the speaker. + if (device & AUDIO_DEVICE_OUT_SPEAKER) { + device = AUDIO_DEVICE_OUT_SPEAKER; + } else { + device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP); + } + } + + ALOGW_IF(popcount(device) != 1, + "getDeviceForVolume() invalid device combination: %08x", + device); + + return device; +} + +AudioPolicyManager::device_category AudioPolicyManager::getDeviceCategory(audio_devices_t device) +{ + switch(getDeviceForVolume(device)) { + case AUDIO_DEVICE_OUT_EARPIECE: + return DEVICE_CATEGORY_EARPIECE; + case AUDIO_DEVICE_OUT_WIRED_HEADSET: + case AUDIO_DEVICE_OUT_WIRED_HEADPHONE: + case AUDIO_DEVICE_OUT_BLUETOOTH_SCO: + case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET: + case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP: + case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES: + return DEVICE_CATEGORY_HEADSET; + case AUDIO_DEVICE_OUT_SPEAKER: + case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT: + case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER: + case AUDIO_DEVICE_OUT_AUX_DIGITAL: + case AUDIO_DEVICE_OUT_USB_ACCESSORY: + case AUDIO_DEVICE_OUT_USB_DEVICE: + case AUDIO_DEVICE_OUT_REMOTE_SUBMIX: + default: + return DEVICE_CATEGORY_SPEAKER; + } +} + +float AudioPolicyManager::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc, + int indexInUi) +{ + device_category deviceCategory = getDeviceCategory(device); + const VolumeCurvePoint *curve = streamDesc.mVolumeCurve[deviceCategory]; + + // the volume index in the UI is relative to the min and max volume indices for this stream type + int nbSteps = 1 + curve[VOLMAX].mIndex - + curve[VOLMIN].mIndex; + int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) / + (streamDesc.mIndexMax - streamDesc.mIndexMin); + + // find what part of the curve this index volume belongs to, or if it's out of bounds + int segment = 0; + if (volIdx < curve[VOLMIN].mIndex) { // out of bounds + return 0.0f; + } else if (volIdx < curve[VOLKNEE1].mIndex) { + segment = 0; + } else if (volIdx < curve[VOLKNEE2].mIndex) { + segment = 1; + } else if (volIdx <= curve[VOLMAX].mIndex) { + segment = 2; + } else { // out of bounds + return 1.0f; + } + + // linear interpolation in the attenuation table in dB + float decibels = curve[segment].mDBAttenuation + + ((float)(volIdx - curve[segment].mIndex)) * + ( (curve[segment+1].mDBAttenuation - + curve[segment].mDBAttenuation) / + ((float)(curve[segment+1].mIndex - + curve[segment].mIndex)) ); + + float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 ) + + ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f", + curve[segment].mIndex, volIdx, + curve[segment+1].mIndex, + curve[segment].mDBAttenuation, + decibels, + curve[segment+1].mDBAttenuation, + amplification); + + return amplification; +} + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sDefaultVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT] = { + {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f} +}; + +// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks +// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets. +// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java). +// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset. + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT] = { + {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT] = { + {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = { + {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + AudioPolicyManager::sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = { + {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f} +}; + +const AudioPolicyManager::VolumeCurvePoint + *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT] + [AudioPolicyManager::DEVICE_CATEGORY_CNT] = { + { // AUDIO_STREAM_VOICE_CALL + sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultVoiceVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_SYSTEM + sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET + sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultSystemVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_RING + sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_MUSIC + sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_ALARM + sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_NOTIFICATION + sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_BLUETOOTH_SCO + sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultVoiceVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_ENFORCED_AUDIBLE + sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET + sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultSystemVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_DTMF + sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET + sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultSystemVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, + { // AUDIO_STREAM_TTS + sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET + sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER + sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EARPIECE + }, +}; + +void AudioPolicyManager::initializeVolumeCurves() +{ + for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) { + mStreams[i].mVolumeCurve[j] = + sVolumeProfiles[i][j]; + } + } + + // Check availability of DRC on speaker path: if available, override some of the speaker curves + if (mSpeakerDrcEnabled) { + mStreams[AUDIO_STREAM_SYSTEM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = + sDefaultSystemVolumeCurveDrc; + mStreams[AUDIO_STREAM_RING].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = + sSpeakerSonificationVolumeCurveDrc; + mStreams[AUDIO_STREAM_ALARM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = + sSpeakerSonificationVolumeCurveDrc; + mStreams[AUDIO_STREAM_NOTIFICATION].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] = + sSpeakerSonificationVolumeCurveDrc; + } +} + +float AudioPolicyManager::computeVolume(audio_stream_type_t stream, + int index, + audio_io_handle_t output, + audio_devices_t device) +{ + float volume = 1.0; + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + StreamDescriptor &streamDesc = mStreams[stream]; + + if (device == AUDIO_DEVICE_NONE) { + device = outputDesc->device(); + } + + // if volume is not 0 (not muted), force media volume to max on digital output + if (stream == AUDIO_STREAM_MUSIC && + index != mStreams[stream].mIndexMin && + (device == AUDIO_DEVICE_OUT_AUX_DIGITAL || + device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET || + device == AUDIO_DEVICE_OUT_USB_ACCESSORY || + device == AUDIO_DEVICE_OUT_USB_DEVICE)) { + return 1.0; + } + + volume = volIndexToAmpl(device, streamDesc, index); + + // if a headset is connected, apply the following rules to ring tones and notifications + // to avoid sound level bursts in user's ears: + // - always attenuate ring tones and notifications volume by 6dB + // - if music is playing, always limit the volume to current music volume, + // with a minimum threshold at -36dB so that notification is always perceived. + const routing_strategy stream_strategy = getStrategy(stream); + if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP | + AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES | + AUDIO_DEVICE_OUT_WIRED_HEADSET | + AUDIO_DEVICE_OUT_WIRED_HEADPHONE)) && + ((stream_strategy == STRATEGY_SONIFICATION) + || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL) + || (stream == AUDIO_STREAM_SYSTEM) + || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) && + (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) && + streamDesc.mCanBeMuted) { + volume *= SONIFICATION_HEADSET_VOLUME_FACTOR; + // when the phone is ringing we must consider that music could have been paused just before + // by the music application and behave as if music was active if the last music track was + // just stopped + if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) || + mLimitRingtoneVolume) { + audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/); + float musicVol = computeVolume(AUDIO_STREAM_MUSIC, + mStreams[AUDIO_STREAM_MUSIC].getVolumeIndex(musicDevice), + output, + musicDevice); + float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? + musicVol : SONIFICATION_HEADSET_VOLUME_MIN; + if (volume > minVol) { + volume = minVol; + ALOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol); + } + } + } + + return volume; +} + +status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream, + int index, + audio_io_handle_t output, + audio_devices_t device, + int delayMs, + bool force) +{ + + // do not change actual stream volume if the stream is muted + if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) { + ALOGVV("checkAndSetVolume() stream %d muted count %d", + stream, mOutputs.valueFor(output)->mMuteCount[stream]); + return NO_ERROR; + } + + // do not change in call volume if bluetooth is connected and vice versa + if ((stream == AUDIO_STREAM_VOICE_CALL && + mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) || + (stream == AUDIO_STREAM_BLUETOOTH_SCO && + mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO)) { + ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm", + stream, mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]); + return INVALID_OPERATION; + } + + float volume = computeVolume(stream, index, output, device); + // We actually change the volume if: + // - the float value returned by computeVolume() changed + // - the force flag is set + if (volume != mOutputs.valueFor(output)->mCurVolume[stream] || + force) { + mOutputs.valueFor(output)->mCurVolume[stream] = volume; + ALOGVV("checkAndSetVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs); + // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is + // enabled + if (stream == AUDIO_STREAM_BLUETOOTH_SCO) { + mpClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volume, output, delayMs); + } + mpClientInterface->setStreamVolume(stream, volume, output, delayMs); + } + + if (stream == AUDIO_STREAM_VOICE_CALL || + stream == AUDIO_STREAM_BLUETOOTH_SCO) { + float voiceVolume; + // Force voice volume to max for bluetooth SCO as volume is managed by the headset + if (stream == AUDIO_STREAM_VOICE_CALL) { + voiceVolume = (float)index/(float)mStreams[stream].mIndexMax; + } else { + voiceVolume = 1.0; + } + + if (voiceVolume != mLastVoiceVolume && output == mPrimaryOutput) { + mpClientInterface->setVoiceVolume(voiceVolume, delayMs); + mLastVoiceVolume = voiceVolume; + } + } + + return NO_ERROR; +} + +void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output, + audio_devices_t device, + int delayMs, + bool force) +{ + ALOGVV("applyStreamVolumes() for output %d and device %x", output, device); + + for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { + checkAndSetVolume((audio_stream_type_t)stream, + mStreams[stream].getVolumeIndex(device), + output, + device, + delayMs, + force); + } +} + +void AudioPolicyManager::setStrategyMute(routing_strategy strategy, + bool on, + audio_io_handle_t output, + int delayMs, + audio_devices_t device) +{ + ALOGVV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output); + for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) { + if (getStrategy((audio_stream_type_t)stream) == strategy) { + setStreamMute((audio_stream_type_t)stream, on, output, delayMs, device); + } + } +} + +void AudioPolicyManager::setStreamMute(audio_stream_type_t stream, + bool on, + audio_io_handle_t output, + int delayMs, + audio_devices_t device) +{ + StreamDescriptor &streamDesc = mStreams[stream]; + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output); + if (device == AUDIO_DEVICE_NONE) { + device = outputDesc->device(); + } + + ALOGVV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d device %04x", + stream, on, output, outputDesc->mMuteCount[stream], device); + + if (on) { + if (outputDesc->mMuteCount[stream] == 0) { + if (streamDesc.mCanBeMuted && + ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) || + (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) { + checkAndSetVolume(stream, 0, output, device, delayMs); + } + } + // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored + outputDesc->mMuteCount[stream]++; + } else { + if (outputDesc->mMuteCount[stream] == 0) { + ALOGV("setStreamMute() unmuting non muted stream!"); + return; + } + if (--outputDesc->mMuteCount[stream] == 0) { + checkAndSetVolume(stream, + streamDesc.getVolumeIndex(device), + output, + device, + delayMs); + } + } +} + +void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream, + bool starting, bool stateChange) +{ + // if the stream pertains to sonification strategy and we are in call we must + // mute the stream if it is low visibility. If it is high visibility, we must play a tone + // in the device used for phone strategy and play the tone if the selected device does not + // interfere with the device used for phone strategy + // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as + // many times as there are active tracks on the output + const routing_strategy stream_strategy = getStrategy(stream); + if ((stream_strategy == STRATEGY_SONIFICATION) || + ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) { + AudioOutputDescriptor *outputDesc = mOutputs.valueFor(mPrimaryOutput); + ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d", + stream, starting, outputDesc->mDevice, stateChange); + if (outputDesc->mRefCount[stream]) { + int muteCount = 1; + if (stateChange) { + muteCount = outputDesc->mRefCount[stream]; + } + if (audio_is_low_visibility(stream)) { + ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount); + for (int i = 0; i < muteCount; i++) { + setStreamMute(stream, starting, mPrimaryOutput); + } + } else { + ALOGV("handleIncallSonification() high visibility"); + if (outputDesc->device() & + getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) { + ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount); + for (int i = 0; i < muteCount; i++) { + setStreamMute(stream, starting, mPrimaryOutput); + } + } + if (starting) { + mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION, + AUDIO_STREAM_VOICE_CALL); + } else { + mpClientInterface->stopTone(); + } + } + } + } +} + +bool AudioPolicyManager::isInCall() +{ + return isStateInCall(mPhoneState); +} + +bool AudioPolicyManager::isStateInCall(int state) { + return ((state == AUDIO_MODE_IN_CALL) || + (state == AUDIO_MODE_IN_COMMUNICATION)); +} + +uint32_t AudioPolicyManager::getMaxEffectsCpuLoad() +{ + return MAX_EFFECTS_CPU_LOAD; +} + +uint32_t AudioPolicyManager::getMaxEffectsMemory() +{ + return MAX_EFFECTS_MEMORY; +} + +// --- AudioOutputDescriptor class implementation + +AudioPolicyManager::AudioOutputDescriptor::AudioOutputDescriptor( + const IOProfile *profile) + : mId(0), mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), + mChannelMask(0), mLatency(0), + mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE), + mOutput1(0), mOutput2(0), mProfile(profile), mDirectOpenCount(0) +{ + // clear usage count for all stream types + for (int i = 0; i < AUDIO_STREAM_CNT; i++) { + mRefCount[i] = 0; + mCurVolume[i] = -1.0; + mMuteCount[i] = 0; + mStopTime[i] = 0; + } + for (int i = 0; i < NUM_STRATEGIES; i++) { + mStrategyMutedByDevice[i] = false; + } + if (profile != NULL) { + mSamplingRate = profile->mSamplingRates[0]; + mFormat = profile->mFormats[0]; + mChannelMask = profile->mChannelMasks[0]; + mFlags = profile->mFlags; + } +} + +audio_devices_t AudioPolicyManager::AudioOutputDescriptor::device() const +{ + if (isDuplicated()) { + return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice); + } else { + return mDevice; + } +} + +uint32_t AudioPolicyManager::AudioOutputDescriptor::latency() +{ + if (isDuplicated()) { + return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency; + } else { + return mLatency; + } +} + +bool AudioPolicyManager::AudioOutputDescriptor::sharesHwModuleWith( + const AudioOutputDescriptor *outputDesc) +{ + if (isDuplicated()) { + return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc); + } else if (outputDesc->isDuplicated()){ + return sharesHwModuleWith(outputDesc->mOutput1) || sharesHwModuleWith(outputDesc->mOutput2); + } else { + return (mProfile->mModule == outputDesc->mProfile->mModule); + } +} + +void AudioPolicyManager::AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream, + int delta) +{ + // forward usage count change to attached outputs + if (isDuplicated()) { + mOutput1->changeRefCount(stream, delta); + mOutput2->changeRefCount(stream, delta); + } + if ((delta + (int)mRefCount[stream]) < 0) { + ALOGW("changeRefCount() invalid delta %d for stream %d, refCount %d", + delta, stream, mRefCount[stream]); + mRefCount[stream] = 0; + return; + } + mRefCount[stream] += delta; + ALOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]); +} + +audio_devices_t AudioPolicyManager::AudioOutputDescriptor::supportedDevices() +{ + if (isDuplicated()) { + return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices()); + } else { + return mProfile->mSupportedDevices.types() ; + } +} + +bool AudioPolicyManager::AudioOutputDescriptor::isActive(uint32_t inPastMs) const +{ + return isStrategyActive(NUM_STRATEGIES, inPastMs); +} + +bool AudioPolicyManager::AudioOutputDescriptor::isStrategyActive(routing_strategy strategy, + uint32_t inPastMs, + nsecs_t sysTime) const +{ + if ((sysTime == 0) && (inPastMs != 0)) { + sysTime = systemTime(); + } + for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) { + if (((getStrategy((audio_stream_type_t)i) == strategy) || + (NUM_STRATEGIES == strategy)) && + isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) { + return true; + } + } + return false; +} + +bool AudioPolicyManager::AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream, + uint32_t inPastMs, + nsecs_t sysTime) const +{ + if (mRefCount[stream] != 0) { + return true; + } + if (inPastMs == 0) { + return false; + } + if (sysTime == 0) { + sysTime = systemTime(); + } + if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) { + return true; + } + return false; +} + + +status_t AudioPolicyManager::AudioOutputDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate); + result.append(buffer); + snprintf(buffer, SIZE, " Format: %08x\n", mFormat); + result.append(buffer); + snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask); + result.append(buffer); + snprintf(buffer, SIZE, " Latency: %d\n", mLatency); + result.append(buffer); + snprintf(buffer, SIZE, " Flags %08x\n", mFlags); + result.append(buffer); + snprintf(buffer, SIZE, " Devices %08x\n", device()); + result.append(buffer); + snprintf(buffer, SIZE, " Stream volume refCount muteCount\n"); + result.append(buffer); + for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) { + snprintf(buffer, SIZE, " %02d %.03f %02d %02d\n", + i, mCurVolume[i], mRefCount[i], mMuteCount[i]); + result.append(buffer); + } + write(fd, result.string(), result.size()); + + return NO_ERROR; +} + +// --- AudioInputDescriptor class implementation + +AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const IOProfile *profile) + : mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(0), + mDevice(AUDIO_DEVICE_NONE), mRefCount(0), + mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile) +{ + if (profile != NULL) { + mSamplingRate = profile->mSamplingRates[0]; + mFormat = profile->mFormats[0]; + mChannelMask = profile->mChannelMasks[0]; + } +} + +status_t AudioPolicyManager::AudioInputDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate); + result.append(buffer); + snprintf(buffer, SIZE, " Format: %d\n", mFormat); + result.append(buffer); + snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask); + result.append(buffer); + snprintf(buffer, SIZE, " Devices %08x\n", mDevice); + result.append(buffer); + snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount); + result.append(buffer); + write(fd, result.string(), result.size()); + + return NO_ERROR; +} + +// --- StreamDescriptor class implementation + +AudioPolicyManager::StreamDescriptor::StreamDescriptor() + : mIndexMin(0), mIndexMax(1), mCanBeMuted(true) +{ + mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0); +} + +int AudioPolicyManager::StreamDescriptor::getVolumeIndex(audio_devices_t device) +{ + device = AudioPolicyManager::getDeviceForVolume(device); + // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT + if (mIndexCur.indexOfKey(device) < 0) { + device = AUDIO_DEVICE_OUT_DEFAULT; + } + return mIndexCur.valueFor(device); +} + +void AudioPolicyManager::StreamDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, "%s %02d %02d ", + mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax); + result.append(buffer); + for (size_t i = 0; i < mIndexCur.size(); i++) { + snprintf(buffer, SIZE, "%04x : %02d, ", + mIndexCur.keyAt(i), + mIndexCur.valueAt(i)); + result.append(buffer); + } + result.append("\n"); + + write(fd, result.string(), result.size()); +} + +// --- EffectDescriptor class implementation + +status_t AudioPolicyManager::EffectDescriptor::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " I/O: %d\n", mIo); + result.append(buffer); + snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy); + result.append(buffer); + snprintf(buffer, SIZE, " Session: %d\n", mSession); + result.append(buffer); + snprintf(buffer, SIZE, " Name: %s\n", mDesc.name); + result.append(buffer); + snprintf(buffer, SIZE, " %s\n", mEnabled ? "Enabled" : "Disabled"); + result.append(buffer); + write(fd, result.string(), result.size()); + + return NO_ERROR; +} + +// --- IOProfile class implementation + +AudioPolicyManager::HwModule::HwModule(const char *name) + : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)), mHandle(0) +{ +} + +AudioPolicyManager::HwModule::~HwModule() +{ + for (size_t i = 0; i < mOutputProfiles.size(); i++) { + mOutputProfiles[i]->mSupportedDevices.clear(); + delete mOutputProfiles[i]; + } + for (size_t i = 0; i < mInputProfiles.size(); i++) { + mInputProfiles[i]->mSupportedDevices.clear(); + delete mInputProfiles[i]; + } + free((void *)mName); +} + +void AudioPolicyManager::HwModule::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " - name: %s\n", mName); + result.append(buffer); + snprintf(buffer, SIZE, " - handle: %d\n", mHandle); + result.append(buffer); + write(fd, result.string(), result.size()); + if (mOutputProfiles.size()) { + write(fd, " - outputs:\n", strlen(" - outputs:\n")); + for (size_t i = 0; i < mOutputProfiles.size(); i++) { + snprintf(buffer, SIZE, " output %d:\n", i); + write(fd, buffer, strlen(buffer)); + mOutputProfiles[i]->dump(fd); + } + } + if (mInputProfiles.size()) { + write(fd, " - inputs:\n", strlen(" - inputs:\n")); + for (size_t i = 0; i < mInputProfiles.size(); i++) { + snprintf(buffer, SIZE, " input %d:\n", i); + write(fd, buffer, strlen(buffer)); + mInputProfiles[i]->dump(fd); + } + } +} + +AudioPolicyManager::IOProfile::IOProfile(HwModule *module) + : mFlags((audio_output_flags_t)0), mModule(module) +{ +} + +AudioPolicyManager::IOProfile::~IOProfile() +{ +} + +// checks if the IO profile is compatible with specified parameters. +// Sampling rate, format and channel mask must be specified in order to +// get a valid a match +bool AudioPolicyManager::IOProfile::isCompatibleProfile(audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags) const +{ + if (samplingRate == 0 || !audio_is_valid_format(format) || channelMask == 0) { + return false; + } + + if ((mSupportedDevices.types() & device) != device) { + return false; + } + if ((mFlags & flags) != flags) { + return false; + } + size_t i; + for (i = 0; i < mSamplingRates.size(); i++) + { + if (mSamplingRates[i] == samplingRate) { + break; + } + } + if (i == mSamplingRates.size()) { + return false; + } + for (i = 0; i < mFormats.size(); i++) + { + if (mFormats[i] == format) { + break; + } + } + if (i == mFormats.size()) { + return false; + } + for (i = 0; i < mChannelMasks.size(); i++) + { + if (mChannelMasks[i] == channelMask) { + break; + } + } + if (i == mChannelMasks.size()) { + return false; + } + return true; +} + +void AudioPolicyManager::IOProfile::dump(int fd) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + String8 result; + + snprintf(buffer, SIZE, " - sampling rates: "); + result.append(buffer); + for (size_t i = 0; i < mSamplingRates.size(); i++) { + snprintf(buffer, SIZE, "%d", mSamplingRates[i]); + result.append(buffer); + result.append(i == (mSamplingRates.size() - 1) ? "\n" : ", "); + } + + snprintf(buffer, SIZE, " - channel masks: "); + result.append(buffer); + for (size_t i = 0; i < mChannelMasks.size(); i++) { + snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]); + result.append(buffer); + result.append(i == (mChannelMasks.size() - 1) ? "\n" : ", "); + } + + snprintf(buffer, SIZE, " - formats: "); + result.append(buffer); + for (size_t i = 0; i < mFormats.size(); i++) { + snprintf(buffer, SIZE, "0x%08x", mFormats[i]); + result.append(buffer); + result.append(i == (mFormats.size() - 1) ? "\n" : ", "); + } + + snprintf(buffer, SIZE, " - devices:\n"); + result.append(buffer); + write(fd, result.string(), result.size()); + DeviceDescriptor::dumpHeader(fd, 6); + for (size_t i = 0; i < mSupportedDevices.size(); i++) { + mSupportedDevices[i]->dump(fd, 6); + } + + snprintf(buffer, SIZE, " - flags: 0x%04x\n", mFlags); + result.append(buffer); + + write(fd, result.string(), result.size()); +} + +// --- DeviceDescriptor implementation + +bool AudioPolicyManager::DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const +{ + // Devices are considered equal if they: + // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE) + // - have the same address or one device does not specify the address + // - have the same channel mask or one device does not specify the channel mask + return (mType == other->mType) && + (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) && + (mChannelMask == 0 || other->mChannelMask == 0 || + mChannelMask == other->mChannelMask); +} + +void AudioPolicyManager::DeviceVector::refreshTypes() +{ + mTypes = AUDIO_DEVICE_NONE; + for(size_t i = 0; i < size(); i++) { + mTypes |= itemAt(i)->mType; + } + ALOGV("DeviceVector::refreshTypes() mTypes %08x", mTypes); +} + +ssize_t AudioPolicyManager::DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const +{ + for(size_t i = 0; i < size(); i++) { + if (item->equals(itemAt(i))) { + return i; + } + } + return -1; +} + +ssize_t AudioPolicyManager::DeviceVector::add(const sp<DeviceDescriptor>& item) +{ + ssize_t ret = indexOf(item); + + if (ret < 0) { + ret = SortedVector::add(item); + if (ret >= 0) { + refreshTypes(); + } + } else { + ALOGW("DeviceVector::add device %08x already in", item->mType); + ret = -1; + } + return ret; +} + +ssize_t AudioPolicyManager::DeviceVector::remove(const sp<DeviceDescriptor>& item) +{ + size_t i; + ssize_t ret = indexOf(item); + + if (ret < 0) { + ALOGW("DeviceVector::remove device %08x not in", item->mType); + } else { + ret = SortedVector::removeAt(ret); + if (ret >= 0) { + refreshTypes(); + } + } + return ret; +} + +void AudioPolicyManager::DeviceVector::loadDevicesFromType(audio_devices_t types) +{ + DeviceVector deviceList; + + uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types; + types &= ~role_bit; + + while (types) { + uint32_t i = 31 - __builtin_clz(types); + uint32_t type = 1 << i; + types &= ~type; + add(new DeviceDescriptor(type | role_bit)); + } +} + +void AudioPolicyManager::DeviceDescriptor::dumpHeader(int fd, int spaces) +{ + const size_t SIZE = 256; + char buffer[SIZE]; + + snprintf(buffer, SIZE, "%*s%-48s %-2s %-8s %-32s \n", + spaces, "", "Type", "ID", "Cnl Mask", "Address"); + write(fd, buffer, strlen(buffer)); +} + +status_t AudioPolicyManager::DeviceDescriptor::dump(int fd, int spaces) const +{ + const size_t SIZE = 256; + char buffer[SIZE]; + + snprintf(buffer, SIZE, "%*s%-48s %2d %08x %-32s \n", + spaces, "", + enumToString(sDeviceNameToEnumTable, + ARRAY_SIZE(sDeviceNameToEnumTable), + mType), + mId, mChannelMask, mAddress.string()); + write(fd, buffer, strlen(buffer)); + + return NO_ERROR; +} + + +// --- audio_policy.conf file parsing + +audio_output_flags_t AudioPolicyManager::parseFlagNames(char *name) +{ + uint32_t flag = 0; + + // it is OK to cast name to non const here as we are not going to use it after + // strtok() modifies it + char *flagName = strtok(name, "|"); + while (flagName != NULL) { + if (strlen(flagName) != 0) { + flag |= stringToEnum(sFlagNameToEnumTable, + ARRAY_SIZE(sFlagNameToEnumTable), + flagName); + } + flagName = strtok(NULL, "|"); + } + //force direct flag if offload flag is set: offloading implies a direct output stream + // and all common behaviors are driven by checking only the direct flag + // this should normally be set appropriately in the policy configuration file + if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { + flag |= AUDIO_OUTPUT_FLAG_DIRECT; + } + + return (audio_output_flags_t)flag; +} + +audio_devices_t AudioPolicyManager::parseDeviceNames(char *name) +{ + uint32_t device = 0; + + char *devName = strtok(name, "|"); + while (devName != NULL) { + if (strlen(devName) != 0) { + device |= stringToEnum(sDeviceNameToEnumTable, + ARRAY_SIZE(sDeviceNameToEnumTable), + devName); + } + devName = strtok(NULL, "|"); + } + return device; +} + +void AudioPolicyManager::loadSamplingRates(char *name, IOProfile *profile) +{ + char *str = strtok(name, "|"); + + // by convention, "0' in the first entry in mSamplingRates indicates the supported sampling + // rates should be read from the output stream after it is opened for the first time + if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) { + profile->mSamplingRates.add(0); + return; + } + + while (str != NULL) { + uint32_t rate = atoi(str); + if (rate != 0) { + ALOGV("loadSamplingRates() adding rate %d", rate); + profile->mSamplingRates.add(rate); + } + str = strtok(NULL, "|"); + } + return; +} + +void AudioPolicyManager::loadFormats(char *name, IOProfile *profile) +{ + char *str = strtok(name, "|"); + + // by convention, "0' in the first entry in mFormats indicates the supported formats + // should be read from the output stream after it is opened for the first time + if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) { + profile->mFormats.add(AUDIO_FORMAT_DEFAULT); + return; + } + + while (str != NULL) { + audio_format_t format = (audio_format_t)stringToEnum(sFormatNameToEnumTable, + ARRAY_SIZE(sFormatNameToEnumTable), + str); + if (format != AUDIO_FORMAT_DEFAULT) { + profile->mFormats.add(format); + } + str = strtok(NULL, "|"); + } + return; +} + +void AudioPolicyManager::loadInChannels(char *name, IOProfile *profile) +{ + const char *str = strtok(name, "|"); + + ALOGV("loadInChannels() %s", name); + + if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) { + profile->mChannelMasks.add(0); + return; + } + + while (str != NULL) { + audio_channel_mask_t channelMask = + (audio_channel_mask_t)stringToEnum(sInChannelsNameToEnumTable, + ARRAY_SIZE(sInChannelsNameToEnumTable), + str); + if (channelMask != 0) { + ALOGV("loadInChannels() adding channelMask %04x", channelMask); + profile->mChannelMasks.add(channelMask); + } + str = strtok(NULL, "|"); + } + return; +} + +void AudioPolicyManager::loadOutChannels(char *name, IOProfile *profile) +{ + const char *str = strtok(name, "|"); + + ALOGV("loadOutChannels() %s", name); + + // by convention, "0' in the first entry in mChannelMasks indicates the supported channel + // masks should be read from the output stream after it is opened for the first time + if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) { + profile->mChannelMasks.add(0); + return; + } + + while (str != NULL) { + audio_channel_mask_t channelMask = + (audio_channel_mask_t)stringToEnum(sOutChannelsNameToEnumTable, + ARRAY_SIZE(sOutChannelsNameToEnumTable), + str); + if (channelMask != 0) { + profile->mChannelMasks.add(channelMask); + } + str = strtok(NULL, "|"); + } + return; +} + +status_t AudioPolicyManager::loadInput(cnode *root, HwModule *module) +{ + cnode *node = root->first_child; + + IOProfile *profile = new IOProfile(module); + + while (node) { + if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) { + loadSamplingRates((char *)node->value, profile); + } else if (strcmp(node->name, FORMATS_TAG) == 0) { + loadFormats((char *)node->value, profile); + } else if (strcmp(node->name, CHANNELS_TAG) == 0) { + loadInChannels((char *)node->value, profile); + } else if (strcmp(node->name, DEVICES_TAG) == 0) { + profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value)); + } + node = node->next; + } + ALOGW_IF(profile->mSupportedDevices.isEmpty(), + "loadInput() invalid supported devices"); + ALOGW_IF(profile->mChannelMasks.size() == 0, + "loadInput() invalid supported channel masks"); + ALOGW_IF(profile->mSamplingRates.size() == 0, + "loadInput() invalid supported sampling rates"); + ALOGW_IF(profile->mFormats.size() == 0, + "loadInput() invalid supported formats"); + if (!profile->mSupportedDevices.isEmpty() && + (profile->mChannelMasks.size() != 0) && + (profile->mSamplingRates.size() != 0) && + (profile->mFormats.size() != 0)) { + + ALOGV("loadInput() adding input Supported Devices %04x", + profile->mSupportedDevices.types()); + + module->mInputProfiles.add(profile); + return NO_ERROR; + } else { + delete profile; + return BAD_VALUE; + } +} + +status_t AudioPolicyManager::loadOutput(cnode *root, HwModule *module) +{ + cnode *node = root->first_child; + + IOProfile *profile = new IOProfile(module); + + while (node) { + if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) { + loadSamplingRates((char *)node->value, profile); + } else if (strcmp(node->name, FORMATS_TAG) == 0) { + loadFormats((char *)node->value, profile); + } else if (strcmp(node->name, CHANNELS_TAG) == 0) { + loadOutChannels((char *)node->value, profile); + } else if (strcmp(node->name, DEVICES_TAG) == 0) { + profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value)); + } else if (strcmp(node->name, FLAGS_TAG) == 0) { + profile->mFlags = parseFlagNames((char *)node->value); + } + node = node->next; + } + ALOGW_IF(profile->mSupportedDevices.isEmpty(), + "loadOutput() invalid supported devices"); + ALOGW_IF(profile->mChannelMasks.size() == 0, + "loadOutput() invalid supported channel masks"); + ALOGW_IF(profile->mSamplingRates.size() == 0, + "loadOutput() invalid supported sampling rates"); + ALOGW_IF(profile->mFormats.size() == 0, + "loadOutput() invalid supported formats"); + if (!profile->mSupportedDevices.isEmpty() && + (profile->mChannelMasks.size() != 0) && + (profile->mSamplingRates.size() != 0) && + (profile->mFormats.size() != 0)) { + + ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x", + profile->mSupportedDevices.types(), profile->mFlags); + + module->mOutputProfiles.add(profile); + return NO_ERROR; + } else { + delete profile; + return BAD_VALUE; + } +} + +void AudioPolicyManager::loadHwModule(cnode *root) +{ + cnode *node = config_find(root, OUTPUTS_TAG); + status_t status = NAME_NOT_FOUND; + + HwModule *module = new HwModule(root->name); + + if (node != NULL) { + node = node->first_child; + while (node) { + ALOGV("loadHwModule() loading output %s", node->name); + status_t tmpStatus = loadOutput(node, module); + if (status == NAME_NOT_FOUND || status == NO_ERROR) { + status = tmpStatus; + } + node = node->next; + } + } + node = config_find(root, INPUTS_TAG); + if (node != NULL) { + node = node->first_child; + while (node) { + ALOGV("loadHwModule() loading input %s", node->name); + status_t tmpStatus = loadInput(node, module); + if (status == NAME_NOT_FOUND || status == NO_ERROR) { + status = tmpStatus; + } + node = node->next; + } + } + if (status == NO_ERROR) { + mHwModules.add(module); + } else { + delete module; + } +} + +void AudioPolicyManager::loadHwModules(cnode *root) +{ + cnode *node = config_find(root, AUDIO_HW_MODULE_TAG); + if (node == NULL) { + return; + } + + node = node->first_child; + while (node) { + ALOGV("loadHwModules() loading module %s", node->name); + loadHwModule(node); + node = node->next; + } +} + +void AudioPolicyManager::loadGlobalConfig(cnode *root) +{ + cnode *node = config_find(root, GLOBAL_CONFIG_TAG); + if (node == NULL) { + return; + } + node = node->first_child; + while (node) { + if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) { + mAvailableOutputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value)); + ALOGV("loadGlobalConfig() Attached Output Devices %08x", + mAvailableOutputDevices.types()); + } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) { + audio_devices_t device = (audio_devices_t)stringToEnum(sDeviceNameToEnumTable, + ARRAY_SIZE(sDeviceNameToEnumTable), + (char *)node->value); + if (device != AUDIO_DEVICE_NONE) { + mDefaultOutputDevice = new DeviceDescriptor(device); + } else { + ALOGW("loadGlobalConfig() default device not specified"); + } + ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", mDefaultOutputDevice->mType); + } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) { + mAvailableInputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value)); + ALOGV("loadGlobalConfig() Available InputDevices %08x", mAvailableInputDevices.types()); + } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) { + mSpeakerDrcEnabled = stringToBool((char *)node->value); + ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled); + } + node = node->next; + } +} + +status_t AudioPolicyManager::loadAudioPolicyConfig(const char *path) +{ + cnode *root; + char *data; + + data = (char *)load_file(path, NULL); + if (data == NULL) { + return -ENODEV; + } + root = config_node("", ""); + config_load(root, data); + + loadGlobalConfig(root); + loadHwModules(root); + + config_free(root); + free(root); + free(data); + + ALOGI("loadAudioPolicyConfig() loaded %s\n", path); + + return NO_ERROR; +} + +void AudioPolicyManager::defaultAudioPolicyConfig(void) +{ + HwModule *module; + IOProfile *profile; + sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC); + mAvailableOutputDevices.add(mDefaultOutputDevice); + mAvailableInputDevices.add(defaultInputDevice); + + module = new HwModule("primary"); + + profile = new IOProfile(module); + profile->mSamplingRates.add(44100); + profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT); + profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO); + profile->mSupportedDevices.add(mDefaultOutputDevice); + profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY; + module->mOutputProfiles.add(profile); + + profile = new IOProfile(module); + profile->mSamplingRates.add(8000); + profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT); + profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO); + profile->mSupportedDevices.add(defaultInputDevice); + module->mInputProfiles.add(profile); + + mHwModules.add(module); +} + +}; // namespace android diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h new file mode 100644 index 0000000..8a631ba --- /dev/null +++ b/services/audiopolicy/AudioPolicyManager.h @@ -0,0 +1,620 @@ +/* + * Copyright (C) 2009 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include <stdint.h> +#include <sys/types.h> +#include <cutils/config_utils.h> +#include <cutils/misc.h> +#include <utils/Timers.h> +#include <utils/Errors.h> +#include <utils/KeyedVector.h> +#include <utils/SortedVector.h> +#include "AudioPolicyInterface.h" + + +namespace android { + +// ---------------------------------------------------------------------------- + +// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB +#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5 +// Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB +#define SONIFICATION_HEADSET_VOLUME_MIN 0.016 +// Time in milliseconds during which we consider that music is still active after a music +// track was stopped - see computeVolume() +#define SONIFICATION_HEADSET_MUSIC_DELAY 5000 +// Time in milliseconds after media stopped playing during which we consider that the +// sonification should be as unobtrusive as during the time media was playing. +#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000 +// Time in milliseconds during witch some streams are muted while the audio path +// is switched +#define MUTE_TIME_MS 2000 + +#define NUM_TEST_OUTPUTS 5 + +#define NUM_VOL_CURVE_KNEES 2 + +// Default minimum length allowed for offloading a compressed track +// Can be overridden by the audio.offload.min.duration.secs property +#define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60 + +// ---------------------------------------------------------------------------- +// AudioPolicyManager implements audio policy manager behavior common to all platforms. +// ---------------------------------------------------------------------------- + +class AudioPolicyManager: public AudioPolicyInterface +#ifdef AUDIO_POLICY_TEST + , public Thread +#endif //AUDIO_POLICY_TEST +{ + +public: + AudioPolicyManager(AudioPolicyClientInterface *clientInterface); + virtual ~AudioPolicyManager(); + + // AudioPolicyInterface + virtual status_t setDeviceConnectionState(audio_devices_t device, + audio_policy_dev_state_t state, + const char *device_address); + virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, + const char *device_address); + virtual void setPhoneState(audio_mode_t state); + virtual void setForceUse(audio_policy_force_use_t usage, + audio_policy_forced_cfg_t config); + virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage); + virtual void setSystemProperty(const char* property, const char* value); + virtual status_t initCheck(); + virtual audio_io_handle_t getOutput(audio_stream_type_t stream, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo); + virtual status_t startOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session = 0); + virtual status_t stopOutput(audio_io_handle_t output, + audio_stream_type_t stream, + int session = 0); + virtual void releaseOutput(audio_io_handle_t output); + virtual audio_io_handle_t getInput(audio_source_t inputSource, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_in_acoustics_t acoustics); + + // indicates to the audio policy manager that the input starts being used. + virtual status_t startInput(audio_io_handle_t input); + + // indicates to the audio policy manager that the input stops being used. + virtual status_t stopInput(audio_io_handle_t input); + virtual void releaseInput(audio_io_handle_t input); + virtual void initStreamVolume(audio_stream_type_t stream, + int indexMin, + int indexMax); + virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device); + virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device); + + // return the strategy corresponding to a given stream type + virtual uint32_t getStrategyForStream(audio_stream_type_t stream); + + // return the enabled output devices for the given stream type + virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream); + + virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc = NULL); + virtual status_t registerEffect(const effect_descriptor_t *desc, + audio_io_handle_t io, + uint32_t strategy, + int session, + int id); + virtual status_t unregisterEffect(int id); + virtual status_t setEffectEnabled(int id, bool enabled); + + virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const; + // return whether a stream is playing remotely, override to change the definition of + // local/remote playback, used for instance by notification manager to not make + // media players lose audio focus when not playing locally + virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const; + virtual bool isSourceActive(audio_source_t source) const; + + virtual status_t dump(int fd); + + virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo); + +protected: + + enum routing_strategy { + STRATEGY_MEDIA, + STRATEGY_PHONE, + STRATEGY_SONIFICATION, + STRATEGY_SONIFICATION_RESPECTFUL, + STRATEGY_DTMF, + STRATEGY_ENFORCED_AUDIBLE, + NUM_STRATEGIES + }; + + // 4 points to define the volume attenuation curve, each characterized by the volume + // index (from 0 to 100) at which they apply, and the attenuation in dB at that index. + // we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl() + + enum { VOLMIN = 0, VOLKNEE1 = 1, VOLKNEE2 = 2, VOLMAX = 3, VOLCNT = 4}; + + class VolumeCurvePoint + { + public: + int mIndex; + float mDBAttenuation; + }; + + // device categories used for volume curve management. + enum device_category { + DEVICE_CATEGORY_HEADSET, + DEVICE_CATEGORY_SPEAKER, + DEVICE_CATEGORY_EARPIECE, + DEVICE_CATEGORY_CNT + }; + + class IOProfile; + + class DeviceDescriptor: public RefBase + { + public: + DeviceDescriptor(audio_devices_t type, String8 address, + audio_channel_mask_t channelMask) : + mType(type), mAddress(address), + mChannelMask(channelMask), mId(0) {} + + DeviceDescriptor(audio_devices_t type) : + mType(type), mAddress(""), + mChannelMask(0), mId(0) {} + + status_t dump(int fd, int spaces) const; + static void dumpHeader(int fd, int spaces); + + bool equals(const sp<DeviceDescriptor>& other) const; + + audio_devices_t mType; + String8 mAddress; + audio_channel_mask_t mChannelMask; + uint32_t mId; + }; + + class DeviceVector : public SortedVector< sp<DeviceDescriptor> > + { + public: + DeviceVector() : SortedVector(), mTypes(AUDIO_DEVICE_NONE) {} + + ssize_t add(const sp<DeviceDescriptor>& item); + ssize_t remove(const sp<DeviceDescriptor>& item); + ssize_t indexOf(const sp<DeviceDescriptor>& item) const; + + audio_devices_t types() const { return mTypes; } + + void loadDevicesFromType(audio_devices_t types); + + private: + void refreshTypes(); + audio_devices_t mTypes; + }; + + class HwModule { + public: + HwModule(const char *name); + ~HwModule(); + + void dump(int fd); + + const char *const mName; // base name of the audio HW module (primary, a2dp ...) + audio_module_handle_t mHandle; + Vector <IOProfile *> mOutputProfiles; // output profiles exposed by this module + Vector <IOProfile *> mInputProfiles; // input profiles exposed by this module + }; + + // the IOProfile class describes the capabilities of an output or input stream. + // It is currently assumed that all combination of listed parameters are supported. + // It is used by the policy manager to determine if an output or input is suitable for + // a given use case, open/close it accordingly and connect/disconnect audio tracks + // to/from it. + class IOProfile + { + public: + IOProfile(HwModule *module); + ~IOProfile(); + + bool isCompatibleProfile(audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags) const; + + void dump(int fd); + + // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats + // indicates the supported parameters should be read from the output stream + // after it is opened for the first time + Vector <uint32_t> mSamplingRates; // supported sampling rates + Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks + Vector <audio_format_t> mFormats; // supported audio formats + DeviceVector mSupportedDevices; // supported devices + // (devices this output can be routed to) + audio_output_flags_t mFlags; // attribute flags (e.g primary output, + // direct output...). For outputs only. + HwModule *mModule; // audio HW module exposing this I/O stream + }; + + // default volume curve + static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManager::VOLCNT]; + // default volume curve for media strategy + static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT]; + // volume curve for media strategy on speakers + static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT]; + // volume curve for sonification strategy on speakers + static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT]; + static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT]; + // default volume curves per stream and device category. See initializeVolumeCurves() + static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT]; + + // descriptor for audio outputs. Used to maintain current configuration of each opened audio output + // and keep track of the usage of this output by each audio stream type. + class AudioOutputDescriptor + { + public: + AudioOutputDescriptor(const IOProfile *profile); + + status_t dump(int fd); + + audio_devices_t device() const; + void changeRefCount(audio_stream_type_t stream, int delta); + + bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); } + audio_devices_t supportedDevices(); + uint32_t latency(); + bool sharesHwModuleWith(const AudioOutputDescriptor *outputDesc); + bool isActive(uint32_t inPastMs = 0) const; + bool isStreamActive(audio_stream_type_t stream, + uint32_t inPastMs = 0, + nsecs_t sysTime = 0) const; + bool isStrategyActive(routing_strategy strategy, + uint32_t inPastMs = 0, + nsecs_t sysTime = 0) const; + + audio_io_handle_t mId; // output handle + uint32_t mSamplingRate; // + audio_format_t mFormat; // + audio_channel_mask_t mChannelMask; // output configuration + uint32_t mLatency; // + audio_output_flags_t mFlags; // + audio_devices_t mDevice; // current device this output is routed to + uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output + nsecs_t mStopTime[AUDIO_STREAM_CNT]; + AudioOutputDescriptor *mOutput1; // used by duplicated outputs: first output + AudioOutputDescriptor *mOutput2; // used by duplicated outputs: second output + float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume + int mMuteCount[AUDIO_STREAM_CNT]; // mute request counter + const IOProfile *mProfile; // I/O profile this output derives from + bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible + // device selection. See checkDeviceMuteStrategies() + uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only) + }; + + // descriptor for audio inputs. Used to maintain current configuration of each opened audio input + // and keep track of the usage of this input. + class AudioInputDescriptor + { + public: + AudioInputDescriptor(const IOProfile *profile); + + status_t dump(int fd); + + uint32_t mSamplingRate; // + audio_format_t mFormat; // input configuration + audio_channel_mask_t mChannelMask; // + audio_devices_t mDevice; // current device this input is routed to + uint32_t mRefCount; // number of AudioRecord clients using this output + audio_source_t mInputSource; // input source selected by application (mediarecorder.h) + const IOProfile *mProfile; // I/O profile this output derives from + }; + + // stream descriptor used for volume control + class StreamDescriptor + { + public: + StreamDescriptor(); + + int getVolumeIndex(audio_devices_t device); + void dump(int fd); + + int mIndexMin; // min volume index + int mIndexMax; // max volume index + KeyedVector<audio_devices_t, int> mIndexCur; // current volume index per device + bool mCanBeMuted; // true is the stream can be muted + + const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT]; + }; + + // stream descriptor used for volume control + class EffectDescriptor + { + public: + + status_t dump(int fd); + + int mIo; // io the effect is attached to + routing_strategy mStrategy; // routing strategy the effect is associated to + int mSession; // audio session the effect is on + effect_descriptor_t mDesc; // effect descriptor + bool mEnabled; // enabled state: CPU load being used or not + }; + + void addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc); + + // return the strategy corresponding to a given stream type + static routing_strategy getStrategy(audio_stream_type_t stream); + + // return appropriate device for streams handled by the specified strategy according to current + // phone state, connected devices... + // if fromCache is true, the device is returned from mDeviceForStrategy[], + // otherwise it is determine by current state + // (device connected,phone state, force use, a2dp output...) + // This allows to: + // 1 speed up process when the state is stable (when starting or stopping an output) + // 2 access to either current device selection (fromCache == true) or + // "future" device selection (fromCache == false) when called from a context + // where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND + // before updateDevicesAndOutputs() is called. + virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy, + bool fromCache); + + // change the route of the specified output. Returns the number of ms we have slept to + // allow new routing to take effect in certain cases. + uint32_t setOutputDevice(audio_io_handle_t output, + audio_devices_t device, + bool force = false, + int delayMs = 0); + + // select input device corresponding to requested audio source + virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource); + + // return io handle of active input or 0 if no input is active + // Only considers inputs from physical devices (e.g. main mic, headset mic) when + // ignoreVirtualInputs is true. + audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true); + + // initialize volume curves for each strategy and device category + void initializeVolumeCurves(); + + // compute the actual volume for a given stream according to the requested index and a particular + // device + virtual float computeVolume(audio_stream_type_t stream, int index, + audio_io_handle_t output, audio_devices_t device); + + // check that volume change is permitted, compute and send new volume to audio hardware + status_t checkAndSetVolume(audio_stream_type_t stream, int index, audio_io_handle_t output, + audio_devices_t device, int delayMs = 0, bool force = false); + + // apply all stream volumes to the specified output and device + void applyStreamVolumes(audio_io_handle_t output, audio_devices_t device, int delayMs = 0, bool force = false); + + // Mute or unmute all streams handled by the specified strategy on the specified output + void setStrategyMute(routing_strategy strategy, + bool on, + audio_io_handle_t output, + int delayMs = 0, + audio_devices_t device = (audio_devices_t)0); + + // Mute or unmute the stream on the specified output + void setStreamMute(audio_stream_type_t stream, + bool on, + audio_io_handle_t output, + int delayMs = 0, + audio_devices_t device = (audio_devices_t)0); + + // handle special cases for sonification strategy while in call: mute streams or replace by + // a special tone in the device used for communication + void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange); + + // true if device is in a telephony or VoIP call + virtual bool isInCall(); + + // true if given state represents a device in a telephony or VoIP call + virtual bool isStateInCall(int state); + + // when a device is connected, checks if an open output can be routed + // to this device. If none is open, tries to open one of the available outputs. + // Returns an output suitable to this device or 0. + // when a device is disconnected, checks if an output is not used any more and + // returns its handle if any. + // transfers the audio tracks and effects from one output thread to another accordingly. + status_t checkOutputsForDevice(audio_devices_t device, + audio_policy_dev_state_t state, + SortedVector<audio_io_handle_t>& outputs, + const String8 address); + + // close an output and its companion duplicating output. + void closeOutput(audio_io_handle_t output); + + // checks and if necessary changes outputs used for all strategies. + // must be called every time a condition that affects the output choice for a given strategy + // changes: connected device, phone state, force use... + // Must be called before updateDevicesAndOutputs() + void checkOutputForStrategy(routing_strategy strategy); + + // Same as checkOutputForStrategy() but for a all strategies in order of priority + void checkOutputForAllStrategies(); + + // manages A2DP output suspend/restore according to phone state and BT SCO usage + void checkA2dpSuspend(); + + // returns the A2DP output handle if it is open or 0 otherwise + audio_io_handle_t getA2dpOutput(); + + // selects the most appropriate device on output for current state + // must be called every time a condition that affects the device choice for a given output is + // changed: connected device, phone state, force use, output start, output stop.. + // see getDeviceForStrategy() for the use of fromCache parameter + + audio_devices_t getNewDevice(audio_io_handle_t output, bool fromCache); + // updates cache of device used by all strategies (mDeviceForStrategy[]) + // must be called every time a condition that affects the device choice for a given strategy is + // changed: connected device, phone state, force use... + // cached values are used by getDeviceForStrategy() if parameter fromCache is true. + // Must be called after checkOutputForAllStrategies() + + void updateDevicesAndOutputs(); + + virtual uint32_t getMaxEffectsCpuLoad(); + virtual uint32_t getMaxEffectsMemory(); +#ifdef AUDIO_POLICY_TEST + virtual bool threadLoop(); + void exit(); + int testOutputIndex(audio_io_handle_t output); +#endif //AUDIO_POLICY_TEST + + status_t setEffectEnabled(EffectDescriptor *pDesc, bool enabled); + + // returns the category the device belongs to with regard to volume curve management + static device_category getDeviceCategory(audio_devices_t device); + + // extract one device relevant for volume control from multiple device selection + static audio_devices_t getDeviceForVolume(audio_devices_t device); + + SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device, + DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> openOutputs); + bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1, + SortedVector<audio_io_handle_t>& outputs2); + + // mute/unmute strategies using an incompatible device combination + // if muting, wait for the audio in pcm buffer to be drained before proceeding + // if unmuting, unmute only after the specified delay + // Returns the number of ms waited + uint32_t checkDeviceMuteStrategies(AudioOutputDescriptor *outputDesc, + audio_devices_t prevDevice, + uint32_t delayMs); + + audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs, + audio_output_flags_t flags); + IOProfile *getInputProfile(audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask); + IOProfile *getProfileForDirectOutput(audio_devices_t device, + uint32_t samplingRate, + audio_format_t format, + audio_channel_mask_t channelMask, + audio_output_flags_t flags); + + audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs); + + bool isNonOffloadableEffectEnabled(); + + // + // Audio policy configuration file parsing (audio_policy.conf) + // + static uint32_t stringToEnum(const struct StringToEnum *table, + size_t size, + const char *name); + static const char *enumToString(const struct StringToEnum *table, + size_t size, + uint32_t value); + static bool stringToBool(const char *value); + static audio_output_flags_t parseFlagNames(char *name); + static audio_devices_t parseDeviceNames(char *name); + void loadSamplingRates(char *name, IOProfile *profile); + void loadFormats(char *name, IOProfile *profile); + void loadOutChannels(char *name, IOProfile *profile); + void loadInChannels(char *name, IOProfile *profile); + status_t loadOutput(cnode *root, HwModule *module); + status_t loadInput(cnode *root, HwModule *module); + void loadHwModule(cnode *root); + void loadHwModules(cnode *root); + void loadGlobalConfig(cnode *root); + status_t loadAudioPolicyConfig(const char *path); + void defaultAudioPolicyConfig(void); + + + AudioPolicyClientInterface *mpClientInterface; // audio policy client interface + audio_io_handle_t mPrimaryOutput; // primary output handle + // list of descriptors for outputs currently opened + DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mOutputs; + // copy of mOutputs before setDeviceConnectionState() opens new outputs + // reset to mOutputs when updateDevicesAndOutputs() is called. + DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mPreviousOutputs; + DefaultKeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs; // list of input descriptors + DeviceVector mAvailableOutputDevices; // bit field of all available output devices + DeviceVector mAvailableInputDevices; // bit field of all available input devices + // without AUDIO_DEVICE_BIT_IN to allow direct bit + // field comparisons + int mPhoneState; // current phone state + audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT]; // current forced use configuration + + StreamDescriptor mStreams[AUDIO_STREAM_CNT]; // stream descriptors for volume control + bool mLimitRingtoneVolume; // limit ringtone volume to music volume if headset connected + audio_devices_t mDeviceForStrategy[NUM_STRATEGIES]; + float mLastVoiceVolume; // last voice volume value sent to audio HAL + + // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units + static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000; + // Maximum memory allocated to audio effects in KB + static const uint32_t MAX_EFFECTS_MEMORY = 512; + uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects + uint32_t mTotalEffectsMemory; // current memory used by effects + KeyedVector<int, EffectDescriptor *> mEffects; // list of registered audio effects + bool mA2dpSuspended; // true if A2DP output is suspended + sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time + bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path + // to boost soft sounds, used to adjust volume curves accordingly + + Vector <HwModule *> mHwModules; + volatile int32_t mNextUniqueId; + +#ifdef AUDIO_POLICY_TEST + Mutex mLock; + Condition mWaitWorkCV; + + int mCurOutput; + bool mDirectOutput; + audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS]; + int mTestInput; + uint32_t mTestDevice; + uint32_t mTestSamplingRate; + uint32_t mTestFormat; + uint32_t mTestChannels; + uint32_t mTestLatencyMs; +#endif //AUDIO_POLICY_TEST + +private: + static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc, + int indexInUi); + // updates device caching and output for streams that can influence the + // routing of notifications + void handleNotificationRoutingForStream(audio_stream_type_t stream); + static bool isVirtualInputDevice(audio_devices_t device); + uint32_t nextUniqueId(); + // converts device address to string sent to audio HAL via setParameters + static String8 addressToParameter(audio_devices_t device, const String8 address); +}; + +}; diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp index a37272d..4a708a0 100644 --- a/services/audioflinger/AudioPolicyService.cpp +++ b/services/audiopolicy/AudioPolicyService.cpp @@ -60,7 +60,8 @@ namespace { // ---------------------------------------------------------------------------- AudioPolicyService::AudioPolicyService() - : BnAudioPolicyService() , mpAudioPolicyDev(NULL) , mpAudioPolicy(NULL) + : BnAudioPolicyService(), mpAudioPolicyDev(NULL), mpAudioPolicy(NULL), + mAudioPolicyManager(NULL), mAudioPolicyClient(NULL) { char value[PROPERTY_VALUE_MAX]; const struct hw_module_t *module; @@ -75,28 +76,40 @@ AudioPolicyService::AudioPolicyService() mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this); // start output activity command thread mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this); + +#ifdef USE_LEGACY_AUDIO_POLICY + ALOGI("AudioPolicyService CSTOR in legacy mode"); + /* instantiate the audio policy manager */ rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module); - if (rc) + if (rc) { return; - + } rc = audio_policy_dev_open(module, &mpAudioPolicyDev); ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc)); - if (rc) + if (rc) { return; + } rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this, &mpAudioPolicy); ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc)); - if (rc) + if (rc) { return; + } rc = mpAudioPolicy->init_check(mpAudioPolicy); ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc)); - if (rc) + if (rc) { return; - + } ALOGI("Loaded audio policy from %s (%s)", module->name, module->id); +#else + ALOGI("AudioPolicyService CSTOR in new mode"); + + mAudioPolicyClient = new AudioPolicyClient(this); + mAudioPolicyManager = new AudioPolicyManager(mAudioPolicyClient); +#endif // load audio pre processing modules if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) { @@ -126,450 +139,19 @@ AudioPolicyService::~AudioPolicyService() } mInputs.clear(); - if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) +#ifdef USE_LEGACY_AUDIO_POLICY + if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) { mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy); - if (mpAudioPolicyDev != NULL) - audio_policy_dev_close(mpAudioPolicyDev); -} - -status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device, - audio_policy_dev_state_t state, - const char *device_address) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (!audio_is_output_device(device) && !audio_is_input_device(device)) { - return BAD_VALUE; - } - if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE && - state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { - return BAD_VALUE; - } - - ALOGV("setDeviceConnectionState()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device, - state, device_address); -} - -audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState( - audio_devices_t device, - const char *device_address) -{ - if (mpAudioPolicy == NULL) { - return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; - } - return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device, - device_address); -} - -status_t AudioPolicyService::setPhoneState(audio_mode_t state) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(state) >= AUDIO_MODE_CNT) { - return BAD_VALUE; - } - - ALOGV("setPhoneState()"); - - // TODO: check if it is more appropriate to do it in platform specific policy manager - AudioSystem::setMode(state); - - Mutex::Autolock _l(mLock); - mpAudioPolicy->set_phone_state(mpAudioPolicy, state); - return NO_ERROR; -} - -status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage, - audio_policy_forced_cfg_t config) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { - return BAD_VALUE; - } - if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) { - return BAD_VALUE; - } - ALOGV("setForceUse()"); - Mutex::Autolock _l(mLock); - mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config); - return NO_ERROR; -} - -audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage) -{ - if (mpAudioPolicy == NULL) { - return AUDIO_POLICY_FORCE_NONE; - } - if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) { - return AUDIO_POLICY_FORCE_NONE; - } - return mpAudioPolicy->get_force_use(mpAudioPolicy, usage); -} - -audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream, - uint32_t samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, - audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - ALOGV("getOutput()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate, - format, channelMask, flags, offloadInfo); -} - -status_t AudioPolicyService::startOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - ALOGV("startOutput()"); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session); -} - -status_t AudioPolicyService::stopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - ALOGV("stopOutput()"); - mOutputCommandThread->stopOutputCommand(output, stream, session); - return NO_ERROR; -} - -status_t AudioPolicyService::doStopOutput(audio_io_handle_t output, - audio_stream_type_t stream, - int session) -{ - ALOGV("doStopOutput from tid %d", gettid()); - Mutex::Autolock _l(mLock); - return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session); -} - -void AudioPolicyService::releaseOutput(audio_io_handle_t output) -{ - if (mpAudioPolicy == NULL) { - return; - } - ALOGV("releaseOutput()"); - mOutputCommandThread->releaseOutputCommand(output); -} - -void AudioPolicyService::doReleaseOutput(audio_io_handle_t output) -{ - ALOGV("doReleaseOutput from tid %d", gettid()); - Mutex::Autolock _l(mLock); - mpAudioPolicy->release_output(mpAudioPolicy, output); -} - -audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource, - uint32_t samplingRate, - audio_format_t format, - audio_channel_mask_t channelMask, - int audioSession) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - // already checked by client, but double-check in case the client wrapper is bypassed - if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) { - return 0; - } - - if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) { - return 0; - } - - Mutex::Autolock _l(mLock); - // the audio_in_acoustics_t parameter is ignored by get_input() - audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate, - format, channelMask, (audio_in_acoustics_t) 0); - - if (input == 0) { - return input; - } - // create audio pre processors according to input source - audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ? - AUDIO_SOURCE_VOICE_RECOGNITION : inputSource; - - ssize_t index = mInputSources.indexOfKey(aliasSource); - if (index < 0) { - return input; - } - ssize_t idx = mInputs.indexOfKey(input); - InputDesc *inputDesc; - if (idx < 0) { - inputDesc = new InputDesc(audioSession); - mInputs.add(input, inputDesc); - } else { - inputDesc = mInputs.valueAt(idx); - } - - Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects; - for (size_t i = 0; i < effects.size(); i++) { - EffectDesc *effect = effects[i]; - sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input); - status_t status = fx->initCheck(); - if (status != NO_ERROR && status != ALREADY_EXISTS) { - ALOGW("Failed to create Fx %s on input %d", effect->mName, input); - // fx goes out of scope and strong ref on AudioEffect is released - continue; - } - for (size_t j = 0; j < effect->mParams.size(); j++) { - fx->setParameter(effect->mParams[j]); - } - inputDesc->mEffects.add(fx); - } - setPreProcessorEnabled(inputDesc, true); - return input; -} - -status_t AudioPolicyService::startInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - - return mpAudioPolicy->start_input(mpAudioPolicy, input); -} - -status_t AudioPolicyService::stopInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - - return mpAudioPolicy->stop_input(mpAudioPolicy, input); -} - -void AudioPolicyService::releaseInput(audio_io_handle_t input) -{ - if (mpAudioPolicy == NULL) { - return; - } - Mutex::Autolock _l(mLock); - mpAudioPolicy->release_input(mpAudioPolicy, input); - - ssize_t index = mInputs.indexOfKey(input); - if (index < 0) { - return; - } - InputDesc *inputDesc = mInputs.valueAt(index); - setPreProcessorEnabled(inputDesc, false); - delete inputDesc; - mInputs.removeItemsAt(index); -} - -status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream, - int indexMin, - int indexMax) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax); - return NO_ERROR; -} - -status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, - int index, - audio_devices_t device) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (!settingsAllowed()) { - return PERMISSION_DENIED; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - if (mpAudioPolicy->set_stream_volume_index_for_device) { - return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy, - stream, - index, - device); - } else { - return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index); - } -} - -status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, - int *index, - audio_devices_t device) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - if (uint32_t(stream) >= AUDIO_STREAM_CNT) { - return BAD_VALUE; - } - Mutex::Autolock _l(mLock); - if (mpAudioPolicy->get_stream_volume_index_for_device) { - return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy, - stream, - index, - device); - } else { - return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index); - } -} - -uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream) -{ - if (mpAudioPolicy == NULL) { - return 0; - } - return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream); -} - -//audio policy: use audio_device_t appropriately - -audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream) -{ - if (mpAudioPolicy == NULL) { - return (audio_devices_t)0; - } - return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream); -} - -audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc); -} - -status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc, - audio_io_handle_t io, - uint32_t strategy, - int session, - int id) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id); -} - -status_t AudioPolicyService::unregisterEffect(int id) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->unregister_effect(mpAudioPolicy, id); -} - -status_t AudioPolicyService::setEffectEnabled(int id, bool enabled) -{ - if (mpAudioPolicy == NULL) { - return NO_INIT; - } - return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled); -} - -bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const -{ - if (mpAudioPolicy == NULL) { - return 0; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs); -} - -bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const -{ - if (mpAudioPolicy == NULL) { - return 0; - } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs); -} - -bool AudioPolicyService::isSourceActive(audio_source_t source) const -{ - if (mpAudioPolicy == NULL) { - return false; } - if (mpAudioPolicy->is_source_active == 0) { - return false; + if (mpAudioPolicyDev != NULL) { + audio_policy_dev_close(mpAudioPolicyDev); } - Mutex::Autolock _l(mLock); - return mpAudioPolicy->is_source_active(mpAudioPolicy, source); +#else + delete mAudioPolicyManager; + delete mAudioPolicyClient; +#endif } -status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession, - effect_descriptor_t *descriptors, - uint32_t *count) -{ - - if (mpAudioPolicy == NULL) { - *count = 0; - return NO_INIT; - } - Mutex::Autolock _l(mLock); - status_t status = NO_ERROR; - - size_t index; - for (index = 0; index < mInputs.size(); index++) { - if (mInputs.valueAt(index)->mSessionId == audioSession) { - break; - } - } - if (index == mInputs.size()) { - *count = 0; - return BAD_VALUE; - } - Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects; - - for (size_t i = 0; i < effects.size(); i++) { - effect_descriptor_t desc = effects[i]->descriptor(); - if (i < *count) { - descriptors[i] = desc; - } - } - if (effects.size() > *count) { - status = NO_MEMORY; - } - *count = effects.size(); - return status; -} void AudioPolicyService::binderDied(const wp<IBinder>& who) { ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(), @@ -595,7 +177,11 @@ status_t AudioPolicyService::dumpInternals(int fd) char buffer[SIZE]; String8 result; +#ifdef USE_LEGACY_AUDIO_POLICY snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy); +#else + snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager); +#endif result.append(buffer); snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get()); result.append(buffer); @@ -606,7 +192,7 @@ status_t AudioPolicyService::dumpInternals(int fd) return NO_ERROR; } -status_t AudioPolicyService::dump(int fd, const Vector<String16>& args) +status_t AudioPolicyService::dump(int fd, const Vector<String16>& args __unused) { if (!dumpAllowed()) { dumpPermissionDenial(fd); @@ -625,9 +211,15 @@ status_t AudioPolicyService::dump(int fd, const Vector<String16>& args) mTonePlaybackThread->dump(fd); } +#ifdef USE_LEGACY_AUDIO_POLICY if (mpAudioPolicy) { mpAudioPolicy->dump(mpAudioPolicy, fd); } +#else + if (mAudioPolicyManager) { + mAudioPolicyManager->dump(fd); + } +#endif if (locked) mLock.unlock(); } @@ -1114,11 +706,13 @@ int AudioPolicyService::setStreamVolume(audio_stream_type_t stream, int AudioPolicyService::startTone(audio_policy_tone_t tone, audio_stream_type_t stream) { - if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) + if (tone != AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION) { ALOGE("startTone: illegal tone requested (%d)", tone); - if (stream != AUDIO_STREAM_VOICE_CALL) + } + if (stream != AUDIO_STREAM_VOICE_CALL) { ALOGE("startTone: illegal stream (%d) requested for tone %d", stream, tone); + } mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING, AUDIO_STREAM_VOICE_CALL); return 0; @@ -1135,21 +729,6 @@ int AudioPolicyService::setVoiceVolume(float volume, int delayMs) return (int)mAudioCommandThread->voiceVolumeCommand(volume, delayMs); } -bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info) -{ - if (mpAudioPolicy == NULL) { - ALOGV("mpAudioPolicy == NULL"); - return false; - } - - if (mpAudioPolicy->is_offload_supported == NULL) { - ALOGV("HAL does not implement is_offload_supported"); - return false; - } - - return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info); -} - // ---------------------------------------------------------------------------- // Audio pre-processing configuration // ---------------------------------------------------------------------------- @@ -1448,42 +1027,18 @@ status_t AudioPolicyService::loadPreProcessorConfig(const char *path) return NO_ERROR; } -/* implementation of the interface to the policy manager */ extern "C" { - - -static audio_module_handle_t aps_load_hw_module(void *service, - const char *name) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->loadHwModule(name); -} - -// deprecated: replaced by aps_open_output_on_module() -static audio_io_handle_t aps_open_output(void *service, +audio_module_handle_t aps_load_hw_module(void *service __unused, + const char *name); +audio_io_handle_t aps_open_output(void *service __unused, audio_devices_t *pDevices, uint32_t *pSamplingRate, audio_format_t *pFormat, audio_channel_mask_t *pChannelMask, uint32_t *pLatencyMs, - audio_output_flags_t flags) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } + audio_output_flags_t flags); - return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask, - pLatencyMs, flags); -} - -static audio_io_handle_t aps_open_output_on_module(void *service, +audio_io_handle_t aps_open_output_on_module(void *service __unused, audio_module_handle_t module, audio_devices_t *pDevices, uint32_t *pSamplingRate, @@ -1491,192 +1046,63 @@ static audio_io_handle_t aps_open_output_on_module(void *service, audio_channel_mask_t *pChannelMask, uint32_t *pLatencyMs, audio_output_flags_t flags, - const audio_offload_info_t *offloadInfo) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask, - pLatencyMs, flags, offloadInfo); -} - -static audio_io_handle_t aps_open_dup_output(void *service, + const audio_offload_info_t *offloadInfo); +audio_io_handle_t aps_open_dup_output(void *service __unused, audio_io_handle_t output1, - audio_io_handle_t output2) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - return af->openDuplicateOutput(output1, output2); -} - -static int aps_close_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->closeOutput(output); -} - -static int aps_suspend_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return PERMISSION_DENIED; - } - - return af->suspendOutput(output); -} - -static int aps_restore_output(void *service, audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return PERMISSION_DENIED; - } - - return af->restoreOutput(output); -} - -// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored -static audio_io_handle_t aps_open_input(void *service, + audio_io_handle_t output2); +int aps_close_output(void *service __unused, audio_io_handle_t output); +int aps_suspend_output(void *service __unused, audio_io_handle_t output); +int aps_restore_output(void *service __unused, audio_io_handle_t output); +audio_io_handle_t aps_open_input(void *service __unused, audio_devices_t *pDevices, uint32_t *pSamplingRate, audio_format_t *pFormat, audio_channel_mask_t *pChannelMask, - audio_in_acoustics_t acoustics) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask); -} - -static audio_io_handle_t aps_open_input_on_module(void *service, + audio_in_acoustics_t acoustics __unused); +audio_io_handle_t aps_open_input_on_module(void *service __unused, audio_module_handle_t module, audio_devices_t *pDevices, uint32_t *pSamplingRate, audio_format_t *pFormat, - audio_channel_mask_t *pChannelMask) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) { - ALOGW("%s: could not get AudioFlinger", __func__); - return 0; - } - - return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask); -} - -static int aps_close_input(void *service, audio_io_handle_t input) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->closeInput(input); -} - -static int aps_set_stream_output(void *service, audio_stream_type_t stream, - audio_io_handle_t output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->setStreamOutput(stream, output); -} - -static int aps_move_effects(void *service, int session, + audio_channel_mask_t *pChannelMask); +int aps_close_input(void *service __unused, audio_io_handle_t input); +int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream); +int aps_move_effects(void *service __unused, int session, audio_io_handle_t src_output, - audio_io_handle_t dst_output) -{ - sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); - if (af == 0) - return PERMISSION_DENIED; - - return af->moveEffects(session, src_output, dst_output); -} - -static char * aps_get_parameters(void *service, audio_io_handle_t io_handle, - const char *keys) -{ - String8 result = AudioSystem::getParameters(io_handle, String8(keys)); - return strdup(result.string()); -} - -static void aps_set_parameters(void *service, audio_io_handle_t io_handle, - const char *kv_pairs, int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms); -} - -static int aps_set_stream_volume(void *service, audio_stream_type_t stream, + audio_io_handle_t dst_output); +char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle, + const char *keys); +void aps_set_parameters(void *service, audio_io_handle_t io_handle, + const char *kv_pairs, int delay_ms); +int aps_set_stream_volume(void *service, audio_stream_type_t stream, float volume, audio_io_handle_t output, - int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->setStreamVolume(stream, volume, output, - delay_ms); -} - -static int aps_start_tone(void *service, audio_policy_tone_t tone, - audio_stream_type_t stream) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->startTone(tone, stream); -} - -static int aps_stop_tone(void *service) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->stopTone(); -} - -static int aps_set_voice_volume(void *service, float volume, int delay_ms) -{ - AudioPolicyService *audioPolicyService = (AudioPolicyService *)service; - - return audioPolicyService->setVoiceVolume(volume, delay_ms); -} - -}; // extern "C" + int delay_ms); +int aps_start_tone(void *service, audio_policy_tone_t tone, + audio_stream_type_t stream); +int aps_stop_tone(void *service); +int aps_set_voice_volume(void *service, float volume, int delay_ms); +}; namespace { struct audio_policy_service_ops aps_ops = { - open_output : aps_open_output, - open_duplicate_output : aps_open_dup_output, - close_output : aps_close_output, - suspend_output : aps_suspend_output, - restore_output : aps_restore_output, - open_input : aps_open_input, - close_input : aps_close_input, - set_stream_volume : aps_set_stream_volume, - set_stream_output : aps_set_stream_output, - set_parameters : aps_set_parameters, - get_parameters : aps_get_parameters, - start_tone : aps_start_tone, - stop_tone : aps_stop_tone, - set_voice_volume : aps_set_voice_volume, - move_effects : aps_move_effects, - load_hw_module : aps_load_hw_module, - open_output_on_module : aps_open_output_on_module, - open_input_on_module : aps_open_input_on_module, + .open_output = aps_open_output, + .open_duplicate_output = aps_open_dup_output, + .close_output = aps_close_output, + .suspend_output = aps_suspend_output, + .restore_output = aps_restore_output, + .open_input = aps_open_input, + .close_input = aps_close_input, + .set_stream_volume = aps_set_stream_volume, + .invalidate_stream = aps_invalidate_stream, + .set_parameters = aps_set_parameters, + .get_parameters = aps_get_parameters, + .start_tone = aps_start_tone, + .stop_tone = aps_stop_tone, + .set_voice_volume = aps_set_voice_volume, + .move_effects = aps_move_effects, + .load_hw_module = aps_load_hw_module, + .open_output_on_module = aps_open_output_on_module, + .open_input_on_module = aps_open_input_on_module, }; }; // namespace <unnamed> diff --git a/services/audioflinger/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h index ae053a9..cdc90d0 100644 --- a/services/audioflinger/AudioPolicyService.h +++ b/services/audiopolicy/AudioPolicyService.h @@ -30,6 +30,8 @@ #include <media/IAudioPolicyService.h> #include <media/ToneGenerator.h> #include <media/AudioEffect.h> +#include <hardware_legacy/AudioPolicyInterface.h> +#include "AudioPolicyManager.h" namespace android { @@ -38,7 +40,6 @@ namespace android { class AudioPolicyService : public BinderService<AudioPolicyService>, public BnAudioPolicyService, -// public AudioPolicyClientInterface, public IBinder::DeathRecipient { friend class BinderService<AudioPolicyService>; @@ -313,6 +314,91 @@ private: Vector< sp<AudioEffect> >mEffects; }; + class AudioPolicyClient : public AudioPolicyClientInterface + { + public: + AudioPolicyClient(AudioPolicyService *service) : mAudioPolicyService(service) {} + virtual ~AudioPolicyClient() {} + + // + // Audio HW module functions + // + + // loads a HW module. + virtual audio_module_handle_t loadHwModule(const char *name); + + // + // Audio output Control functions + // + + // opens an audio output with the requested parameters. The parameter values can indicate to use the default values + // in case the audio policy manager has no specific requirements for the output being opened. + // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream. + // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly. + virtual audio_io_handle_t openOutput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask, + uint32_t *pLatencyMs, + audio_output_flags_t flags, + const audio_offload_info_t *offloadInfo = NULL); + // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by + // a special mixer thread in the AudioFlinger. + virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2); + // closes the output stream + virtual status_t closeOutput(audio_io_handle_t output); + // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in + // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded. + virtual status_t suspendOutput(audio_io_handle_t output); + // restores a suspended output. + virtual status_t restoreOutput(audio_io_handle_t output); + + // + // Audio input Control functions + // + + // opens an audio input + virtual audio_io_handle_t openInput(audio_module_handle_t module, + audio_devices_t *pDevices, + uint32_t *pSamplingRate, + audio_format_t *pFormat, + audio_channel_mask_t *pChannelMask); + // closes an audio input + virtual status_t closeInput(audio_io_handle_t input); + // + // misc control functions + // + + // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes + // for each output (destination device) it is attached to. + virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0); + + // invalidate a stream type, causing a reroute to an unspecified new output + virtual status_t invalidateStream(audio_stream_type_t stream); + + // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface. + virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0); + // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager. + virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys); + + // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing + // over a telephony device during a phone call. + virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream); + virtual status_t stopTone(); + + // set down link audio volume. + virtual status_t setVoiceVolume(float volume, int delayMs = 0); + + // move effect to the specified output + virtual status_t moveEffects(int session, + audio_io_handle_t srcOutput, + audio_io_handle_t dstOutput); + + private: + AudioPolicyService *mAudioPolicyService; + }; + static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1]; void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled); @@ -344,6 +430,9 @@ private: sp<AudioCommandThread> mOutputCommandThread; // process stop and release output struct audio_policy_device *mpAudioPolicyDev; struct audio_policy *mpAudioPolicy; + AudioPolicyManager *mAudioPolicyManager; + AudioPolicyClient *mAudioPolicyClient; + KeyedVector< audio_source_t, InputSourceDesc* > mInputSources; KeyedVector< audio_io_handle_t, InputDesc* > mInputs; }; diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk index 51ba698..2f485b9 100644 --- a/services/camera/libcameraservice/Android.mk +++ b/services/camera/libcameraservice/Android.mk @@ -1,3 +1,17 @@ +# Copyright 2010 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + LOCAL_PATH:= $(call my-dir) # @@ -53,6 +67,7 @@ LOCAL_SHARED_LIBRARIES:= \ LOCAL_C_INCLUDES += \ system/media/camera/include \ + system/media/private/camera/include \ external/jpeg diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp index 7fdf304..bfef50e 100644 --- a/services/camera/libcameraservice/CameraDeviceFactory.cpp +++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp @@ -46,6 +46,8 @@ sp<CameraDeviceBase> CameraDeviceFactory::createDevice(int cameraId) { device = new Camera2Device(cameraId); break; case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: device = new Camera3Device(cameraId); break; default: diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp index 9ce7daf..02bca1f 100644 --- a/services/camera/libcameraservice/CameraService.cpp +++ b/services/camera/libcameraservice/CameraService.cpp @@ -1,24 +1,24 @@ /* -** -** Copyright (C) 2008, The Android Open Source Project -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -*/ + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #define LOG_TAG "CameraService" //#define LOG_NDEBUG 0 #include <stdio.h> +#include <string.h> #include <sys/types.h> #include <pthread.h> @@ -32,10 +32,13 @@ #include <gui/Surface.h> #include <hardware/hardware.h> #include <media/AudioSystem.h> +#include <media/IMediaHTTPService.h> #include <media/mediaplayer.h> #include <utils/Errors.h> #include <utils/Log.h> #include <utils/String16.h> +#include <utils/Trace.h> +#include <system/camera_vendor_tags.h> #include "CameraService.h" #include "api1/CameraClient.h" @@ -130,6 +133,12 @@ void CameraService::onFirstRef() mModule->set_callbacks(this); } + VendorTagDescriptor::clearGlobalVendorTagDescriptor(); + + if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_2) { + setUpVendorTags(); + } + CameraDeviceFactory::registerService(this); } } @@ -141,6 +150,7 @@ CameraService::~CameraService() { } } + VendorTagDescriptor::clearGlobalVendorTagDescriptor(); gCameraService = NULL; } @@ -269,6 +279,22 @@ status_t CameraService::getCameraCharacteristics(int cameraId, return ret; } +status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) { + if (!mModule) { + ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__); + return -ENODEV; + } + + if (mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_2) { + // TODO: Remove this check once HAL1 shim is in place. + ALOGW("%s: Only HAL module version V2.2 or higher supports vendor tags", __FUNCTION__); + return -EOPNOTSUPP; + } + + desc = VendorTagDescriptor::getGlobalVendorTagDescriptor(); + return OK; +} + int CameraService::getDeviceVersion(int cameraId, int* facing) { struct camera_info info; if (mModule->get_camera_info(cameraId, &info) != OK) { @@ -298,6 +324,8 @@ bool CameraService::isValidCameraId(int cameraId) { case CAMERA_DEVICE_API_VERSION_2_0: case CAMERA_DEVICE_API_VERSION_2_1: case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: return true; default: return false; @@ -306,6 +334,44 @@ bool CameraService::isValidCameraId(int cameraId) { return false; } +bool CameraService::setUpVendorTags() { + vendor_tag_ops_t vOps = vendor_tag_ops_t(); + + // Check if vendor operations have been implemented + if (mModule->get_vendor_tag_ops == NULL) { + ALOGI("%s: No vendor tags defined for this device.", __FUNCTION__); + return false; + } + + ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops"); + mModule->get_vendor_tag_ops(&vOps); + ATRACE_END(); + + // Ensure all vendor operations are present + if (vOps.get_tag_count == NULL || vOps.get_all_tags == NULL || + vOps.get_section_name == NULL || vOps.get_tag_name == NULL || + vOps.get_tag_type == NULL) { + ALOGE("%s: Vendor tag operations not fully defined. Ignoring definitions." + , __FUNCTION__); + return false; + } + + // Read all vendor tag definitions into a descriptor + sp<VendorTagDescriptor> desc; + status_t res; + if ((res = VendorTagDescriptor::createDescriptorFromOps(&vOps, /*out*/desc)) + != OK) { + ALOGE("%s: Could not generate descriptor from vendor tag operations," + "received error %s (%d). Camera clients will not be able to use" + "vendor tags", __FUNCTION__, strerror(res), res); + return false; + } + + // Set the global descriptor to use with camera metadata + VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc); + return true; +} + status_t CameraService::validateConnect(int cameraId, /*inout*/ int& clientUid) const { @@ -455,6 +521,8 @@ status_t CameraService::connect( case CAMERA_DEVICE_API_VERSION_2_0: case CAMERA_DEVICE_API_VERSION_2_1: case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: client = new Camera2Client(this, cameraClient, clientPackageName, cameraId, facing, callingPid, clientUid, getpid(), @@ -541,6 +609,8 @@ status_t CameraService::connectPro( case CAMERA_DEVICE_API_VERSION_2_0: case CAMERA_DEVICE_API_VERSION_2_1: case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: client = new ProCamera2Client(this, cameraCb, String16(), cameraId, facing, callingPid, USE_CALLING_UID, getpid()); break; @@ -619,6 +689,8 @@ status_t CameraService::connectDevice( case CAMERA_DEVICE_API_VERSION_2_0: case CAMERA_DEVICE_API_VERSION_2_1: case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: client = new CameraDeviceClient(this, cameraCb, String16(), cameraId, facing, callingPid, USE_CALLING_UID, getpid()); break; @@ -876,7 +948,7 @@ void CameraService::setCameraFree(int cameraId) { MediaPlayer* CameraService::newMediaPlayer(const char *file) { MediaPlayer* mp = new MediaPlayer(); - if (mp->setDataSource(file, NULL) == NO_ERROR) { + if (mp->setDataSource(NULL /* httpService */, file, NULL) == NO_ERROR) { mp->setAudioStreamType(AUDIO_STREAM_ENFORCED_AUDIBLE); mp->prepare(); } else { @@ -1044,7 +1116,8 @@ void CameraService::BasicClient::opChanged(int32_t op, const String16& packageNa // Reset the client PID to allow server-initiated disconnect, // and to prevent further calls by client. mClientPid = getCallingPid(); - notifyError(); + CaptureResultExtras resultExtras; // a dummy result (invalid) + notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras); disconnect(); } } @@ -1073,7 +1146,8 @@ CameraService::Client* CameraService::Client::getClientFromCookie(void* user) { return client; } -void CameraService::Client::notifyError() { +void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras) { mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0); } @@ -1127,7 +1201,8 @@ CameraService::ProClient::ProClient(const sp<CameraService>& cameraService, CameraService::ProClient::~ProClient() { } -void CameraService::ProClient::notifyError() { +void CameraService::ProClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras) { mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0); } diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h index ad6a582..76ea7be 100644 --- a/services/camera/libcameraservice/CameraService.h +++ b/services/camera/libcameraservice/CameraService.h @@ -1,19 +1,18 @@ /* -** -** Copyright (C) 2008, The Android Open Source Project -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -*/ + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H @@ -31,6 +30,8 @@ #include <camera/IProCameraCallbacks.h> #include <camera/camera2/ICameraDeviceUser.h> #include <camera/camera2/ICameraDeviceCallbacks.h> +#include <camera/VendorTagDescriptor.h> +#include <camera/CaptureResult.h> #include <camera/ICameraServiceListener.h> @@ -73,6 +74,7 @@ public: struct CameraInfo* cameraInfo); virtual status_t getCameraCharacteristics(int cameraId, CameraMetadata* cameraInfo); + virtual status_t getCameraVendorTagDescriptor(/*out*/ sp<VendorTagDescriptor>& desc); virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId, const String16& clientPackageName, int clientUid, @@ -181,7 +183,9 @@ public: status_t finishCameraOps(); // Notify client about a fatal error - virtual void notifyError() = 0; + virtual void notifyError( + ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras) = 0; private: AppOpsManager mAppOpsManager; @@ -258,7 +262,8 @@ public: // convert client from cookie. Client lock should be acquired before getting Client. static Client* getClientFromCookie(void* user); - virtual void notifyError(); + virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras); // Initialized in constructor @@ -306,7 +311,8 @@ public: virtual void onExclusiveLockStolen() = 0; protected: - virtual void notifyError(); + virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras); sp<IProCameraCallbacks> mRemoteCallback; }; // class ProClient @@ -387,6 +393,8 @@ private: // Helpers bool isValidCameraId(int cameraId); + + bool setUpVendorTags(); }; } // namespace android diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp index af23557..0447979 100644 --- a/services/camera/libcameraservice/api1/Camera2Client.cpp +++ b/services/camera/libcameraservice/api1/Camera2Client.cpp @@ -118,7 +118,9 @@ status_t Camera2Client::initialize(camera_module_t *module) mZslProcessorThread = zslProc; break; } - case CAMERA_DEVICE_API_VERSION_3_0:{ + case CAMERA_DEVICE_API_VERSION_3_0: + case CAMERA_DEVICE_API_VERSION_3_1: + case CAMERA_DEVICE_API_VERSION_3_2: { sp<ZslProcessor3> zslProc = new ZslProcessor3(this, mCaptureSequencer); mZslProcessor = zslProc; @@ -238,7 +240,7 @@ status_t Camera2Client::dump(int fd, const Vector<String16>& args) { result.append(" Scene mode: "); switch (p.sceneMode) { - case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: + case ANDROID_CONTROL_SCENE_MODE_DISABLED: result.append("AUTO\n"); break; CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT) @@ -816,6 +818,8 @@ status_t Camera2Client::startPreviewL(Parameters ¶ms, bool restart) { return res; } outputStreams.push(getZslStreamId()); + } else { + mZslProcessor->deleteStream(); } outputStreams.push(getPreviewStreamId()); @@ -1162,7 +1166,7 @@ status_t Camera2Client::autoFocus() { * Handle quirk mode for AF in scene modes */ if (l.mParameters.quirks.triggerAfWithAuto && - l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED && + l.mParameters.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED && l.mParameters.focusMode != Parameters::FOCUS_MODE_AUTO && !l.mParameters.focusingAreas[0].isEmpty()) { ALOGV("%s: Quirk: Switching from focusMode %d to AUTO", diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp index d2ac79c..c266213 100644 --- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp @@ -110,11 +110,13 @@ status_t CallbackProcessor::updateStream(const Parameters ¶ms) { if (!mCallbackToApp && mCallbackConsumer == 0) { // Create CPU buffer queue endpoint, since app hasn't given us one // Make it async to avoid disconnect deadlocks - sp<BufferQueue> bq = new BufferQueue(); - mCallbackConsumer = new CpuConsumer(bq, kCallbackHeapCount); + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mCallbackConsumer = new CpuConsumer(consumer, kCallbackHeapCount); mCallbackConsumer->setFrameAvailableListener(this); mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer")); - mCallbackWindow = new Surface(bq); + mCallbackWindow = new Surface(producer); } if (mCallbackStreamId != NO_STREAM) { diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp index f5c28ed..8268f65 100644 --- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp +++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp @@ -106,13 +106,12 @@ void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) { } } -void CaptureSequencer::onFrameAvailable(int32_t requestId, - const CameraMetadata &frame) { - ALOGV("%s: Listener found new frame", __FUNCTION__); +void CaptureSequencer::onResultAvailable(const CaptureResult &result) { ATRACE_CALL(); + ALOGV("%s: New result available.", __FUNCTION__); Mutex::Autolock l(mInputMutex); - mNewFrameId = requestId; - mNewFrame = frame; + mNewFrameId = result.mResultExtras.requestId; + mNewFrame = result.mMetadata; if (!mNewFrameReceived) { mNewFrameReceived = true; mNewFrameSignal.signal(); @@ -585,12 +584,15 @@ CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait( entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP); if (entry.count == 0) { ALOGE("No timestamp field in capture frame!"); - } - if (entry.data.i64[0] != mCaptureTimestamp) { - ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 "," - " captured buffer %" PRId64, - entry.data.i64[0], - mCaptureTimestamp); + } else if (entry.count == 1) { + if (entry.data.i64[0] != mCaptureTimestamp) { + ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 "," + " captured buffer %" PRId64, + entry.data.i64[0], + mCaptureTimestamp); + } + } else { + ALOGE("Timestamp metadata is malformed!"); } client->removeFrameListener(mCaptureId, mCaptureId + 1, this); diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h index 9fb4ee7..d42ab13 100644 --- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h +++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h @@ -24,6 +24,7 @@ #include <utils/Mutex.h> #include <utils/Condition.h> #include "camera/CameraMetadata.h" +#include "camera/CaptureResult.h" #include "Parameters.h" #include "FrameProcessor.h" @@ -61,8 +62,8 @@ class CaptureSequencer: // Notifications about AE state changes void notifyAutoExposure(uint8_t newState, int triggerId); - // Notifications from the frame processor - virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame); + // Notification from the frame processor + virtual void onResultAvailable(const CaptureResult &result); // Notifications from the JPEG processor void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer); diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp index dd5b27c..69bea24 100644 --- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp @@ -55,7 +55,7 @@ FrameProcessor::FrameProcessor(wp<CameraDeviceBase> device, FrameProcessor::~FrameProcessor() { } -bool FrameProcessor::processSingleFrame(CameraMetadata &frame, +bool FrameProcessor::processSingleFrame(CaptureResult &frame, const sp<CameraDeviceBase> &device) { sp<Camera2Client> client = mClient.promote(); @@ -66,19 +66,19 @@ bool FrameProcessor::processSingleFrame(CameraMetadata &frame, bool partialResult = false; if (mUsePartialQuirk) { camera_metadata_entry_t entry; - entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT); + entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT); if (entry.count > 0 && entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) { partialResult = true; } } - if (!partialResult && processFaceDetect(frame, client) != OK) { + if (!partialResult && processFaceDetect(frame.mMetadata, client) != OK) { return false; } if (mSynthesize3ANotify) { - process3aState(frame, client); + process3aState(frame.mMetadata, client); } return FrameProcessorBase::processSingleFrame(frame, device); diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h index 856ad32..514bd1a 100644 --- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h +++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h @@ -51,7 +51,7 @@ class FrameProcessor : public FrameProcessorBase { void processNewFrames(const sp<Camera2Client> &client); - virtual bool processSingleFrame(CameraMetadata &frame, + virtual bool processSingleFrame(CaptureResult &frame, const sp<CameraDeviceBase> &device); status_t processFaceDetect(const CameraMetadata &frame, diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp index 2de7a2b..964d278 100644 --- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp @@ -83,11 +83,13 @@ status_t JpegProcessor::updateStream(const Parameters ¶ms) { if (mCaptureConsumer == 0) { // Create CPU buffer queue endpoint - sp<BufferQueue> bq = new BufferQueue(); - mCaptureConsumer = new CpuConsumer(bq, 1); + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mCaptureConsumer = new CpuConsumer(consumer, 1); mCaptureConsumer->setFrameAvailableListener(this); mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer")); - mCaptureWindow = new Surface(bq); + mCaptureWindow = new Surface(producer); // Create memory for API consumption mCaptureHeap = new MemoryHeapBase(maxJpegSize.data.i32[0], 0, "Camera2Client::CaptureHeap"); diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp index 07654c0..5bfb969 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.cpp +++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp @@ -16,7 +16,7 @@ #define LOG_TAG "Camera2-Parameters" #define ATRACE_TAG ATRACE_TAG_CAMERA -// #define LOG_NDEBUG 0 +//#define LOG_NDEBUG 0 #include <utils/Log.h> #include <utils/Trace.h> @@ -92,6 +92,26 @@ status_t Parameters::initialize(const CameraMetadata *info) { staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2); if (!availableFpsRanges.count) return NO_INIT; + previewFpsRange[0] = availableFpsRanges.data.i32[0]; + previewFpsRange[1] = availableFpsRanges.data.i32[1]; + + params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, + String8::format("%d,%d", + previewFpsRange[0] * kFpsToApiScale, + previewFpsRange[1] * kFpsToApiScale)); + + { + String8 supportedPreviewFpsRange; + for (size_t i=0; i < availableFpsRanges.count; i += 2) { + if (i != 0) supportedPreviewFpsRange += ","; + supportedPreviewFpsRange += String8::format("(%d,%d)", + availableFpsRanges.data.i32[i] * kFpsToApiScale, + availableFpsRanges.data.i32[i+1] * kFpsToApiScale); + } + params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, + supportedPreviewFpsRange); + } + previewFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; params.set(CameraParameters::KEY_PREVIEW_FORMAT, formatEnumToString(previewFormat)); // NV21 @@ -159,9 +179,6 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedPreviewFormats); } - previewFpsRange[0] = availableFpsRanges.data.i32[0]; - previewFpsRange[1] = availableFpsRanges.data.i32[1]; - // PREVIEW_FRAME_RATE / SUPPORTED_PREVIEW_FRAME_RATES are deprecated, but // still have to do something sane for them @@ -170,27 +187,6 @@ status_t Parameters::initialize(const CameraMetadata *info) { params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE, previewFps); - // PREVIEW_FPS_RANGE - // -- Order matters. Set range after single value to so that a roundtrip - // of setParameters(getParameters()) would keep the FPS range in higher - // order. - params.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, - String8::format("%d,%d", - previewFpsRange[0] * kFpsToApiScale, - previewFpsRange[1] * kFpsToApiScale)); - - { - String8 supportedPreviewFpsRange; - for (size_t i=0; i < availableFpsRanges.count; i += 2) { - if (i != 0) supportedPreviewFpsRange += ","; - supportedPreviewFpsRange += String8::format("(%d,%d)", - availableFpsRanges.data.i32[i] * kFpsToApiScale, - availableFpsRanges.data.i32[i+1] * kFpsToApiScale); - } - params.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, - supportedPreviewFpsRange); - } - { SortedVector<int32_t> sortedPreviewFrameRates; @@ -470,7 +466,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { supportedAntibanding); } - sceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + sceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED; params.set(CameraParameters::KEY_SCENE_MODE, CameraParameters::SCENE_MODE_AUTO); @@ -486,7 +482,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { if (addComma) supportedSceneModes += ","; addComma = true; switch (availableSceneModes.data.u8[i]) { - case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: + case ANDROID_CONTROL_SCENE_MODE_DISABLED: noSceneModes = true; break; case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY: @@ -668,13 +664,13 @@ status_t Parameters::initialize(const CameraMetadata *info) { focusState = ANDROID_CONTROL_AF_STATE_INACTIVE; shadowFocusMode = FOCUS_MODE_INVALID; - camera_metadata_ro_entry_t max3aRegions = - staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1); - if (!max3aRegions.count) return NO_INIT; + camera_metadata_ro_entry_t max3aRegions = staticInfo(ANDROID_CONTROL_MAX_REGIONS, + Parameters::NUM_REGION, Parameters::NUM_REGION); + if (max3aRegions.count != Parameters::NUM_REGION) return NO_INIT; int32_t maxNumFocusAreas = 0; if (focusMode != Parameters::FOCUS_MODE_FIXED) { - maxNumFocusAreas = max3aRegions.data.i32[0]; + maxNumFocusAreas = max3aRegions.data.i32[Parameters::REGION_AF]; } params.set(CameraParameters::KEY_MAX_NUM_FOCUS_AREAS, maxNumFocusAreas); params.set(CameraParameters::KEY_FOCUS_AREAS, @@ -734,7 +730,7 @@ status_t Parameters::initialize(const CameraMetadata *info) { meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0)); params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS, - max3aRegions.data.i32[0]); + max3aRegions.data.i32[Parameters::REGION_AE]); params.set(CameraParameters::KEY_METERING_AREAS, "(0,0,0,0,0)"); @@ -1088,7 +1084,7 @@ camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag, status_t Parameters::set(const String8& paramString) { status_t res; - CameraParameters2 newParams(paramString); + CameraParameters newParams(paramString); // TODO: Currently ignoring any changes to supposedly read-only parameters // such as supported preview sizes, etc. Should probably produce an error if @@ -1131,73 +1127,29 @@ status_t Parameters::set(const String8& paramString) { // RECORDING_HINT (always supported) validatedParams.recordingHint = boolFromString( newParams.get(CameraParameters::KEY_RECORDING_HINT) ); - IF_ALOGV() { // Avoid unused variable warning - bool recordingHintChanged = - validatedParams.recordingHint != recordingHint; - if (recordingHintChanged) { - ALOGV("%s: Recording hint changed to %d", - __FUNCTION__, validatedParams.recordingHint); - } - } + bool recordingHintChanged = validatedParams.recordingHint != recordingHint; + ALOGV_IF(recordingHintChanged, "%s: Recording hint changed to %d", + __FUNCTION__, recordingHintChanged); // PREVIEW_FPS_RANGE + bool fpsRangeChanged = false; + int32_t lastSetFpsRange[2]; - /** - * Use the single FPS value if it was set later than the range. - * Otherwise, use the range value. - */ - bool fpsUseSingleValue; - { - const char *fpsRange, *fpsSingle; - - fpsRange = newParams.get(CameraParameters::KEY_PREVIEW_FRAME_RATE); - fpsSingle = newParams.get(CameraParameters::KEY_PREVIEW_FPS_RANGE); - - /** - * Pick either the range or the single key if only one was set. - * - * If both are set, pick the one that has greater set order. - */ - if (fpsRange == NULL && fpsSingle == NULL) { - ALOGE("%s: FPS was not set. One of %s or %s must be set.", - __FUNCTION__, CameraParameters::KEY_PREVIEW_FRAME_RATE, - CameraParameters::KEY_PREVIEW_FPS_RANGE); - return BAD_VALUE; - } else if (fpsRange == NULL) { - fpsUseSingleValue = true; - ALOGV("%s: FPS range not set, using FPS single value", - __FUNCTION__); - } else if (fpsSingle == NULL) { - fpsUseSingleValue = false; - ALOGV("%s: FPS single not set, using FPS range value", - __FUNCTION__); - } else { - int fpsKeyOrder; - res = newParams.compareSetOrder( - CameraParameters::KEY_PREVIEW_FRAME_RATE, - CameraParameters::KEY_PREVIEW_FPS_RANGE, - &fpsKeyOrder); - LOG_ALWAYS_FATAL_IF(res != OK, "Impossibly bad FPS keys"); - - fpsUseSingleValue = (fpsKeyOrder > 0); + params.getPreviewFpsRange(&lastSetFpsRange[0], &lastSetFpsRange[1]); + lastSetFpsRange[0] /= kFpsToApiScale; + lastSetFpsRange[1] /= kFpsToApiScale; - } - - ALOGV("%s: Preview FPS value is used from '%s'", - __FUNCTION__, fpsUseSingleValue ? "single" : "range"); - } newParams.getPreviewFpsRange(&validatedParams.previewFpsRange[0], &validatedParams.previewFpsRange[1]); - validatedParams.previewFpsRange[0] /= kFpsToApiScale; validatedParams.previewFpsRange[1] /= kFpsToApiScale; - // Ignore the FPS range if the FPS single has higher precedence - if (!fpsUseSingleValue) { - ALOGV("%s: Preview FPS range (%d, %d)", __FUNCTION__, - validatedParams.previewFpsRange[0], - validatedParams.previewFpsRange[1]); + // Compare the FPS range value from the last set() to the current set() + // to determine if the client has changed it + if (validatedParams.previewFpsRange[0] != lastSetFpsRange[0] || + validatedParams.previewFpsRange[1] != lastSetFpsRange[1]) { + fpsRangeChanged = true; camera_metadata_ro_entry_t availablePreviewFpsRanges = staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2); for (i = 0; i < availablePreviewFpsRanges.count; i += 2) { @@ -1248,13 +1200,14 @@ status_t Parameters::set(const String8& paramString) { } } - // PREVIEW_FRAME_RATE Deprecated - // - Use only if the single FPS value was set later than the FPS range - if (fpsUseSingleValue) { + // PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is + // unchanged this time. The single-value FPS is the same as the minimum of + // the range. To detect whether the application has changed the value of + // previewFps, compare against their last-set preview FPS. + if (!fpsRangeChanged) { int previewFps = newParams.getPreviewFrameRate(); - ALOGV("%s: Preview FPS single value requested: %d", - __FUNCTION__, previewFps); - { + int lastSetPreviewFps = params.getPreviewFrameRate(); + if (previewFps != lastSetPreviewFps || recordingHintChanged) { camera_metadata_ro_entry_t availableFrameRates = staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES); /** @@ -1323,35 +1276,6 @@ status_t Parameters::set(const String8& paramString) { } } - /** - * Update Preview FPS and Preview FPS ranges based on - * what we actually set. - * - * This updates the API-visible (Camera.Parameters#getParameters) values of - * the FPS fields, not only the internal versions. - * - * Order matters: The value that was set last takes precedence. - * - If the client does a setParameters(getParameters()) we retain - * the same order for preview FPS. - */ - if (!fpsUseSingleValue) { - // Set fps single, then fps range (range wins) - newParams.setPreviewFrameRate( - fpsFromRange(/*min*/validatedParams.previewFpsRange[0], - /*max*/validatedParams.previewFpsRange[1])); - newParams.setPreviewFpsRange( - validatedParams.previewFpsRange[0] * kFpsToApiScale, - validatedParams.previewFpsRange[1] * kFpsToApiScale); - } else { - // Set fps range, then fps single (single wins) - newParams.setPreviewFpsRange( - validatedParams.previewFpsRange[0] * kFpsToApiScale, - validatedParams.previewFpsRange[1] * kFpsToApiScale); - // Set this to the same value, but with higher priority - newParams.setPreviewFrameRate( - newParams.getPreviewFrameRate()); - } - // PICTURE_SIZE newParams.getPictureSize(&validatedParams.pictureWidth, &validatedParams.pictureHeight); @@ -1522,7 +1446,7 @@ status_t Parameters::set(const String8& paramString) { newParams.get(CameraParameters::KEY_SCENE_MODE) ); if (validatedParams.sceneMode != sceneMode && validatedParams.sceneMode != - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) { + ANDROID_CONTROL_SCENE_MODE_DISABLED) { camera_metadata_ro_entry_t availableSceneModes = staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES); for (i = 0; i < availableSceneModes.count; i++) { @@ -1537,7 +1461,7 @@ status_t Parameters::set(const String8& paramString) { } } bool sceneModeSet = - validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + validatedParams.sceneMode != ANDROID_CONTROL_SCENE_MODE_DISABLED; // FLASH_MODE if (sceneModeSet) { @@ -1667,10 +1591,11 @@ status_t Parameters::set(const String8& paramString) { // FOCUS_AREAS res = parseAreas(newParams.get(CameraParameters::KEY_FOCUS_AREAS), &validatedParams.focusingAreas); - size_t max3aRegions = - (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, 1, 1).data.i32[0]; + size_t maxAfRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, + Parameters::NUM_REGION, Parameters::NUM_REGION). + data.i32[Parameters::REGION_AF]; if (res == OK) res = validateAreas(validatedParams.focusingAreas, - max3aRegions, AREA_KIND_FOCUS); + maxAfRegions, AREA_KIND_FOCUS); if (res != OK) { ALOGE("%s: Requested focus areas are malformed: %s", __FUNCTION__, newParams.get(CameraParameters::KEY_FOCUS_AREAS)); @@ -1700,10 +1625,13 @@ status_t Parameters::set(const String8& paramString) { newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK)); // METERING_AREAS + size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, + Parameters::NUM_REGION, Parameters::NUM_REGION). + data.i32[Parameters::REGION_AE]; res = parseAreas(newParams.get(CameraParameters::KEY_METERING_AREAS), &validatedParams.meteringAreas); if (res == OK) { - res = validateAreas(validatedParams.meteringAreas, max3aRegions, + res = validateAreas(validatedParams.meteringAreas, maxAeRegions, AREA_KIND_METERING); } if (res != OK) { @@ -1852,7 +1780,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { // (face detection statistics and face priority scene mode). Map from other // to the other. bool sceneModeActive = - sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + sceneMode != (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED; uint8_t reqControlMode = ANDROID_CONTROL_MODE_AUTO; if (enableFaceDetect || sceneModeActive) { reqControlMode = ANDROID_CONTROL_MODE_USE_SCENE_MODE; @@ -1864,7 +1792,7 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { uint8_t reqSceneMode = sceneModeActive ? sceneMode : enableFaceDetect ? (uint8_t)ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY : - (uint8_t)ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED; + (uint8_t)ANDROID_CONTROL_SCENE_MODE_DISABLED; res = request->update(ANDROID_CONTROL_SCENE_MODE, &reqSceneMode, 1); if (res != OK) return res; @@ -1985,6 +1913,23 @@ status_t Parameters::updateRequest(CameraMetadata *request) const { reqMeteringAreas, reqMeteringAreasSize); if (res != OK) return res; + // Set awb regions to be the same as the metering regions if allowed + size_t maxAwbRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS, + Parameters::NUM_REGION, Parameters::NUM_REGION). + data.i32[Parameters::REGION_AWB]; + if (maxAwbRegions > 0) { + if (maxAwbRegions >= meteringAreas.size()) { + res = request->update(ANDROID_CONTROL_AWB_REGIONS, + reqMeteringAreas, reqMeteringAreasSize); + } else { + // Ensure the awb regions are zeroed if the region count is too high. + int32_t zeroedAwbAreas[5] = {0, 0, 0, 0, 0}; + res = request->update(ANDROID_CONTROL_AWB_REGIONS, + zeroedAwbAreas, sizeof(zeroedAwbAreas)/sizeof(int32_t)); + } + if (res != OK) return res; + } + delete[] reqMeteringAreas; /* don't include jpeg thumbnail size - it's valid for @@ -2225,9 +2170,9 @@ int Parameters::abModeStringToEnum(const char *abMode) { int Parameters::sceneModeStringToEnum(const char *sceneMode) { return !sceneMode ? - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + ANDROID_CONTROL_SCENE_MODE_DISABLED : !strcmp(sceneMode, CameraParameters::SCENE_MODE_AUTO) ? - ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED : + ANDROID_CONTROL_SCENE_MODE_DISABLED : !strcmp(sceneMode, CameraParameters::SCENE_MODE_ACTION) ? ANDROID_CONTROL_SCENE_MODE_ACTION : !strcmp(sceneMode, CameraParameters::SCENE_MODE_PORTRAIT) ? diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h index da07ccf..60c4687 100644 --- a/services/camera/libcameraservice/api1/client2/Parameters.h +++ b/services/camera/libcameraservice/api1/client2/Parameters.h @@ -25,7 +25,6 @@ #include <utils/Vector.h> #include <utils/KeyedVector.h> #include <camera/CameraParameters.h> -#include <camera/CameraParameters2.h> #include <camera/CameraMetadata.h> namespace android { @@ -33,7 +32,7 @@ namespace camera2 { /** * Current camera state; this is the full state of the Camera under the old - * camera API (contents of the CameraParameters2 object in a more-efficient + * camera API (contents of the CameraParameters object in a more-efficient * format, plus other state). The enum values are mostly based off the * corresponding camera2 enums, not the camera1 strings. A few are defined here * if they don't cleanly map to camera2 values. @@ -114,6 +113,14 @@ struct Parameters { bool autoExposureLock; bool autoWhiteBalanceLock; + // 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS + enum region_t { + REGION_AE = 0, + REGION_AWB, + REGION_AF, + NUM_REGION // Number of region types + } region; + Vector<Area> meteringAreas; int zoom; @@ -129,7 +136,7 @@ struct Parameters { LIGHTFX_HDR } lightFx; - CameraParameters2 params; + CameraParameters params; String8 paramsFlattened; // These parameters are also part of the camera API-visible state, but not diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp index 77ae7ec..2064e2c 100644 --- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp @@ -319,13 +319,15 @@ status_t StreamingProcessor::updateRecordingStream(const Parameters ¶ms) { // Create CPU buffer queue endpoint. We need one more buffer here so that we can // always acquire and free a buffer when the heap is full; otherwise the consumer // will have buffers in flight we'll never clear out. - sp<BufferQueue> bq = new BufferQueue(); - mRecordingConsumer = new BufferItemConsumer(bq, + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mRecordingConsumer = new BufferItemConsumer(consumer, GRALLOC_USAGE_HW_VIDEO_ENCODER, mRecordingHeapCount + 1); mRecordingConsumer->setFrameAvailableListener(this); mRecordingConsumer->setName(String8("Camera2-RecordingConsumer")); - mRecordingWindow = new Surface(bq); + mRecordingWindow = new Surface(producer); newConsumer = true; // Allocate memory later, since we don't know buffer size until receipt } diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp index 130f81a..2a2a5af 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp @@ -73,18 +73,19 @@ void ZslProcessor::onFrameAvailable() { } } -void ZslProcessor::onFrameAvailable(int32_t /*requestId*/, - const CameraMetadata &frame) { +void ZslProcessor::onResultAvailable(const CaptureResult &result) { + ATRACE_CALL(); + ALOGV("%s:", __FUNCTION__); Mutex::Autolock l(mInputMutex); camera_metadata_ro_entry_t entry; - entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP); nsecs_t timestamp = entry.data.i64[0]; (void)timestamp; ALOGVV("Got preview frame for timestamp %" PRId64, timestamp); if (mState != RUNNING) return; - mFrameList.editItemAt(mFrameListHead) = frame; + mFrameList.editItemAt(mFrameListHead) = result.mMetadata; mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; findMatchesLocked(); @@ -130,13 +131,15 @@ status_t ZslProcessor::updateStream(const Parameters ¶ms) { if (mZslConsumer == 0) { // Create CPU buffer queue endpoint - sp<BufferQueue> bq = new BufferQueue(); - mZslConsumer = new BufferItemConsumer(bq, + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mZslConsumer = new BufferItemConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL, kZslBufferDepth); mZslConsumer->setFrameAvailableListener(this); mZslConsumer->setName(String8("Camera2Client::ZslConsumer")); - mZslWindow = new Surface(bq); + mZslWindow = new Surface(producer); } if (mZslStreamId != NO_STREAM) { diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h index 6d3cb85..f4cf0c8 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h @@ -24,6 +24,7 @@ #include <utils/Condition.h> #include <gui/BufferItemConsumer.h> #include <camera/CameraMetadata.h> +#include <camera/CaptureResult.h> #include "common/CameraDeviceBase.h" #include "api1/client2/ZslProcessorInterface.h" @@ -54,7 +55,7 @@ class ZslProcessor: // From mZslConsumer virtual void onFrameAvailable(); // From FrameProcessor - virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame); + virtual void onResultAvailable(const CaptureResult &result); virtual void onBufferReleased(buffer_handle_t *handle); diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp index 2fce2b6..1dcb718 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp @@ -63,18 +63,19 @@ ZslProcessor3::~ZslProcessor3() { deleteStream(); } -void ZslProcessor3::onFrameAvailable(int32_t /*requestId*/, - const CameraMetadata &frame) { +void ZslProcessor3::onResultAvailable(const CaptureResult &result) { + ATRACE_CALL(); + ALOGV("%s:", __FUNCTION__); Mutex::Autolock l(mInputMutex); camera_metadata_ro_entry_t entry; - entry = frame.find(ANDROID_SENSOR_TIMESTAMP); + entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP); nsecs_t timestamp = entry.data.i64[0]; (void)timestamp; ALOGVV("Got preview metadata for timestamp %" PRId64, timestamp); if (mState != RUNNING) return; - mFrameList.editItemAt(mFrameListHead) = frame; + mFrameList.editItemAt(mFrameListHead) = result.mMetadata; mFrameListHead = (mFrameListHead + 1) % kFrameListDepth; } @@ -275,6 +276,15 @@ status_t ZslProcessor3::pushToReprocess(int32_t requestId) { return INVALID_OPERATION; } + // Flush device to clear out all in-flight requests pending in HAL. + res = client->getCameraDevice()->flush(); + if (res != OK) { + ALOGE("%s: Camera %d: Failed to flush device: " + "%s (%d)", + __FUNCTION__, client->getCameraId(), strerror(-res), res); + return res; + } + // Update JPEG settings { SharedParameters::Lock l(client->getParameters()); diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h index d2f8322..4c52a64 100644 --- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h +++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h @@ -50,8 +50,8 @@ class ZslProcessor3 : ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer); ~ZslProcessor3(); - // From FrameProcessor - virtual void onFrameAvailable(int32_t requestId, const CameraMetadata &frame); + // From FrameProcessor::FilteredListener + virtual void onResultAvailable(const CaptureResult &result); /** **************************************** diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp index 142da9e..3d85e90 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp @@ -16,7 +16,7 @@ #define LOG_TAG "CameraDeviceClient" #define ATRACE_TAG ATRACE_TAG_CAMERA -// #define LOG_NDEBUG 0 +//#define LOG_NDEBUG 0 #include <cutils/properties.h> #include <utils/Log.h> @@ -91,79 +91,101 @@ CameraDeviceClient::~CameraDeviceClient() { } status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request, - bool streaming) { + bool streaming, + /*out*/ + int64_t* lastFrameNumber) { + List<sp<CaptureRequest> > requestList; + requestList.push_back(request); + return submitRequestList(requestList, streaming, lastFrameNumber); +} + +status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests, + bool streaming, int64_t* lastFrameNumber) { ATRACE_CALL(); - ALOGV("%s", __FUNCTION__); + ALOGV("%s-start of function. Request list size %d", __FUNCTION__, requests.size()); status_t res; - if ( (res = checkPid(__FUNCTION__) ) != OK) return res; Mutex::Autolock icl(mBinderSerializationLock); if (!mDevice.get()) return DEAD_OBJECT; - if (request == 0) { + if (requests.empty()) { ALOGE("%s: Camera %d: Sent null request. Rejecting request.", __FUNCTION__, mCameraId); return BAD_VALUE; } - CameraMetadata metadata(request->mMetadata); - - if (metadata.isEmpty()) { - ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.", - __FUNCTION__, mCameraId); - return BAD_VALUE; - } else if (request->mSurfaceList.size() == 0) { - ALOGE("%s: Camera %d: Requests must have at least one surface target. " - "Rejecting request.", __FUNCTION__, mCameraId); - return BAD_VALUE; - } + List<const CameraMetadata> metadataRequestList; + int32_t requestId = mRequestIdCounter; + uint32_t loopCounter = 0; - if (!enforceRequestPermissions(metadata)) { - // Callee logs - return PERMISSION_DENIED; - } + for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) { + sp<CaptureRequest> request = *it; + if (request == 0) { + ALOGE("%s: Camera %d: Sent null request.", + __FUNCTION__, mCameraId); + return BAD_VALUE; + } - /** - * Write in the output stream IDs which we calculate from - * the capture request's list of surface targets - */ - Vector<int32_t> outputStreamIds; - outputStreamIds.setCapacity(request->mSurfaceList.size()); - for (size_t i = 0; i < request->mSurfaceList.size(); ++i) { - sp<Surface> surface = request->mSurfaceList[i]; + CameraMetadata metadata(request->mMetadata); + if (metadata.isEmpty()) { + ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.", + __FUNCTION__, mCameraId); + return BAD_VALUE; + } else if (request->mSurfaceList.isEmpty()) { + ALOGE("%s: Camera %d: Requests must have at least one surface target. " + "Rejecting request.", __FUNCTION__, mCameraId); + return BAD_VALUE; + } - if (surface == 0) continue; + if (!enforceRequestPermissions(metadata)) { + // Callee logs + return PERMISSION_DENIED; + } - sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer(); - int idx = mStreamMap.indexOfKey(gbp->asBinder()); + /** + * Write in the output stream IDs which we calculate from + * the capture request's list of surface targets + */ + Vector<int32_t> outputStreamIds; + outputStreamIds.setCapacity(request->mSurfaceList.size()); + for (size_t i = 0; i < request->mSurfaceList.size(); ++i) { + sp<Surface> surface = request->mSurfaceList[i]; + if (surface == 0) continue; + + sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer(); + int idx = mStreamMap.indexOfKey(gbp->asBinder()); + + // Trying to submit request with surface that wasn't created + if (idx == NAME_NOT_FOUND) { + ALOGE("%s: Camera %d: Tried to submit a request with a surface that" + " we have not called createStream on", + __FUNCTION__, mCameraId); + return BAD_VALUE; + } - // Trying to submit request with surface that wasn't created - if (idx == NAME_NOT_FOUND) { - ALOGE("%s: Camera %d: Tried to submit a request with a surface that" - " we have not called createStream on", - __FUNCTION__, mCameraId); - return BAD_VALUE; + int streamId = mStreamMap.valueAt(idx); + outputStreamIds.push_back(streamId); + ALOGV("%s: Camera %d: Appending output stream %d to request", + __FUNCTION__, mCameraId, streamId); } - int streamId = mStreamMap.valueAt(idx); - outputStreamIds.push_back(streamId); - ALOGV("%s: Camera %d: Appending output stream %d to request", - __FUNCTION__, mCameraId, streamId); - } + metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0], + outputStreamIds.size()); - metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0], - outputStreamIds.size()); + metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1); + loopCounter++; // loopCounter starts from 1 + ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)", + __FUNCTION__, mCameraId, requestId, loopCounter, requests.size()); - int32_t requestId = mRequestIdCounter++; - metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1); - ALOGV("%s: Camera %d: Submitting request with ID %d", - __FUNCTION__, mCameraId, requestId); + metadataRequestList.push_back(metadata); + } + mRequestIdCounter++; if (streaming) { - res = mDevice->setStreamingRequest(metadata); + res = mDevice->setStreamingRequestList(metadataRequestList, lastFrameNumber); if (res != OK) { ALOGE("%s: Camera %d: Got error %d after trying to set streaming " "request", __FUNCTION__, mCameraId, res); @@ -171,11 +193,12 @@ status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request, mStreamingRequestList.push_back(requestId); } } else { - res = mDevice->capture(metadata); + res = mDevice->captureList(metadataRequestList, lastFrameNumber); if (res != OK) { ALOGE("%s: Camera %d: Got error %d after trying to set capture", - __FUNCTION__, mCameraId, res); + __FUNCTION__, mCameraId, res); } + ALOGV("%s: requestId = %d ", __FUNCTION__, requestId); } ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId); @@ -186,7 +209,7 @@ status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request, return res; } -status_t CameraDeviceClient::cancelRequest(int requestId) { +status_t CameraDeviceClient::cancelRequest(int requestId, int64_t* lastFrameNumber) { ATRACE_CALL(); ALOGV("%s, requestId = %d", __FUNCTION__, requestId); @@ -212,7 +235,7 @@ status_t CameraDeviceClient::cancelRequest(int requestId) { return BAD_VALUE; } - res = mDevice->clearStreamingRequest(); + res = mDevice->clearStreamingRequest(lastFrameNumber); if (res == OK) { ALOGV("%s: Camera %d: Successfully cleared streaming request", @@ -259,8 +282,6 @@ status_t CameraDeviceClient::deleteStream(int streamId) { } else if (res == OK) { mStreamMap.removeItemsAt(index); - ALOGV("%s: Camera %d: Successfully deleted stream ID (%d)", - __FUNCTION__, mCameraId, streamId); } return res; @@ -465,7 +486,7 @@ status_t CameraDeviceClient::waitUntilIdle() return res; } -status_t CameraDeviceClient::flush() { +status_t CameraDeviceClient::flush(int64_t* lastFrameNumber) { ATRACE_CALL(); ALOGV("%s", __FUNCTION__); @@ -476,7 +497,7 @@ status_t CameraDeviceClient::flush() { if (!mDevice.get()) return DEAD_OBJECT; - return mDevice->flush(); + return mDevice->flush(lastFrameNumber); } status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) { @@ -493,13 +514,13 @@ status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) { return dumpDevice(fd, args); } - -void CameraDeviceClient::notifyError() { +void CameraDeviceClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras) { // Thread safe. Don't bother locking. sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback(); if (remoteCb != 0) { - remoteCb->onDeviceError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE); + remoteCb->onDeviceError(errorCode, resultExtras); } } @@ -512,12 +533,12 @@ void CameraDeviceClient::notifyIdle() { } } -void CameraDeviceClient::notifyShutter(int requestId, +void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) { // Thread safe. Don't bother locking. sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback(); if (remoteCb != 0) { - remoteCb->onCaptureStarted(requestId, timestamp); + remoteCb->onCaptureStarted(resultExtras, timestamp); } } @@ -552,16 +573,14 @@ void CameraDeviceClient::detachDevice() { } /** Device-related methods */ -void CameraDeviceClient::onFrameAvailable(int32_t requestId, - const CameraMetadata& frame) { +void CameraDeviceClient::onResultAvailable(const CaptureResult& result) { ATRACE_CALL(); ALOGV("%s", __FUNCTION__); // Thread-safe. No lock necessary. sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback; if (remoteCb != NULL) { - ALOGV("%s: frame = %p ", __FUNCTION__, &frame); - remoteCb->onResultReceived(requestId, frame); + remoteCb->onResultReceived(result.mMetadata, result.mResultExtras); } } @@ -635,26 +654,56 @@ status_t CameraDeviceClient::getRotationTransformLocked(int32_t* transform) { return INVALID_OPERATION; } + camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING); + if (entry.count == 0) { + ALOGE("%s: Camera %d: Can't find android.lens.facing in " + "static metadata!", __FUNCTION__, mCameraId); + return INVALID_OPERATION; + } + int32_t& flags = *transform; + bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT); int orientation = entry.data.i32[0]; - switch (orientation) { - case 0: - flags = 0; - break; - case 90: - flags = NATIVE_WINDOW_TRANSFORM_ROT_90; - break; - case 180: - flags = NATIVE_WINDOW_TRANSFORM_ROT_180; - break; - case 270: - flags = NATIVE_WINDOW_TRANSFORM_ROT_270; - break; - default: - ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", - __FUNCTION__, orientation); - return INVALID_OPERATION; + if (!mirror) { + switch (orientation) { + case 0: + flags = 0; + break; + case 90: + flags = NATIVE_WINDOW_TRANSFORM_ROT_90; + break; + case 180: + flags = NATIVE_WINDOW_TRANSFORM_ROT_180; + break; + case 270: + flags = NATIVE_WINDOW_TRANSFORM_ROT_270; + break; + default: + ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", + __FUNCTION__, orientation); + return INVALID_OPERATION; + } + } else { + switch (orientation) { + case 0: + flags = HAL_TRANSFORM_FLIP_H; + break; + case 90: + flags = HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90; + break; + case 180: + flags = HAL_TRANSFORM_FLIP_V; + break; + case 270: + flags = HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90; + break; + default: + ALOGE("%s: Invalid HAL android.sensor.orientation value: %d", + __FUNCTION__, orientation); + return INVALID_OPERATION; + } + } /** diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h index b9c16aa..0b37784 100644 --- a/services/camera/libcameraservice/api2/CameraDeviceClient.h +++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h @@ -63,9 +63,18 @@ public: */ // Note that the callee gets a copy of the metadata. - virtual int submitRequest(sp<CaptureRequest> request, - bool streaming = false); - virtual status_t cancelRequest(int requestId); + virtual status_t submitRequest(sp<CaptureRequest> request, + bool streaming = false, + /*out*/ + int64_t* lastFrameNumber = NULL); + // List of requests are copied. + virtual status_t submitRequestList(List<sp<CaptureRequest> > requests, + bool streaming = false, + /*out*/ + int64_t* lastFrameNumber = NULL); + virtual status_t cancelRequest(int requestId, + /*out*/ + int64_t* lastFrameNumber = NULL); // Returns -EBUSY if device is not idle virtual status_t deleteStream(int streamId); @@ -89,7 +98,8 @@ public: virtual status_t waitUntilIdle(); // Flush all active and pending requests as fast as possible - virtual status_t flush(); + virtual status_t flush(/*out*/ + int64_t* lastFrameNumber = NULL); /** * Interface used by CameraService @@ -114,16 +124,16 @@ public: */ virtual void notifyIdle(); - virtual void notifyError(); - virtual void notifyShutter(int requestId, nsecs_t timestamp); + virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras); + virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp); /** * Interface used by independent components of CameraDeviceClient. */ protected: /** FilteredListener implementation **/ - virtual void onFrameAvailable(int32_t requestId, - const CameraMetadata& frame); + virtual void onResultAvailable(const CaptureResult& result); virtual void detachDevice(); // Calculate the ANativeWindow transform from android.sensor.orientation diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp index 1a7a7a7..0f6d278 100644 --- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp +++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp @@ -373,9 +373,7 @@ void ProCamera2Client::detachDevice() { Camera2ClientBase::detachDevice(); } -/** Device-related methods */ -void ProCamera2Client::onFrameAvailable(int32_t requestId, - const CameraMetadata& frame) { +void ProCamera2Client::onResultAvailable(const CaptureResult& result) { ATRACE_CALL(); ALOGV("%s", __FUNCTION__); @@ -383,13 +381,12 @@ void ProCamera2Client::onFrameAvailable(int32_t requestId, SharedCameraCallbacks::Lock l(mSharedCameraCallbacks); if (mRemoteCallback != NULL) { - CameraMetadata tmp(frame); + CameraMetadata tmp(result.mMetadata); camera_metadata_t* meta = tmp.release(); ALOGV("%s: meta = %p ", __FUNCTION__, meta); - mRemoteCallback->onResultReceived(requestId, meta); + mRemoteCallback->onResultReceived(result.mResultExtras.requestId, meta); tmp.acquire(meta); } - } bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) { diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h index 8a0f547..9d83122 100644 --- a/services/camera/libcameraservice/api_pro/ProCamera2Client.h +++ b/services/camera/libcameraservice/api_pro/ProCamera2Client.h @@ -21,6 +21,7 @@ #include "common/FrameProcessorBase.h" #include "common/Camera2ClientBase.h" #include "device2/Camera2Device.h" +#include "camera/CaptureResult.h" namespace android { @@ -97,8 +98,8 @@ public: protected: /** FilteredListener implementation **/ - virtual void onFrameAvailable(int32_t requestId, - const CameraMetadata& frame); + virtual void onResultAvailable(const CaptureResult& result); + virtual void detachDevice(); private: diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp index 6a88c87..19efd30 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp +++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp @@ -221,10 +221,11 @@ status_t Camera2ClientBase<TClientBase>::connect( /** Device-related methods */ template <typename TClientBase> -void Camera2ClientBase<TClientBase>::notifyError(int errorCode, int arg1, - int arg2) { - ALOGE("Error condition %d reported by HAL, arguments %d, %d", errorCode, - arg1, arg2); +void Camera2ClientBase<TClientBase>::notifyError( + ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras) { + ALOGE("Error condition %d reported by HAL, requestId %" PRId32, errorCode, + resultExtras.requestId); } template <typename TClientBase> @@ -233,13 +234,13 @@ void Camera2ClientBase<TClientBase>::notifyIdle() { } template <typename TClientBase> -void Camera2ClientBase<TClientBase>::notifyShutter(int requestId, +void Camera2ClientBase<TClientBase>::notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) { - (void)requestId; + (void)resultExtras; (void)timestamp; - ALOGV("%s: Shutter notification for request id %d at time %" PRId64, - __FUNCTION__, requestId, timestamp); + ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64, + __FUNCTION__, resultExtras.requestId, timestamp); } template <typename TClientBase> diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h index 61e44f0..9feca93 100644 --- a/services/camera/libcameraservice/common/Camera2ClientBase.h +++ b/services/camera/libcameraservice/common/Camera2ClientBase.h @@ -18,6 +18,7 @@ #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H #include "common/CameraDeviceBase.h" +#include "camera/CaptureResult.h" namespace android { @@ -61,9 +62,11 @@ public: * CameraDeviceBase::NotificationListener implementation */ - virtual void notifyError(int errorCode, int arg1, int arg2); + virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras& resultExtras); virtual void notifyIdle(); - virtual void notifyShutter(int requestId, nsecs_t timestamp); + virtual void notifyShutter(const CaptureResultExtras& resultExtras, + nsecs_t timestamp); virtual void notifyAutoFocus(uint8_t newState, int triggerId); virtual void notifyAutoExposure(uint8_t newState, int triggerId); virtual void notifyAutoWhitebalance(uint8_t newState, diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h index e80abf1..7597b10 100644 --- a/services/camera/libcameraservice/common/CameraDeviceBase.h +++ b/services/camera/libcameraservice/common/CameraDeviceBase.h @@ -22,9 +22,12 @@ #include <utils/String16.h> #include <utils/Vector.h> #include <utils/Timers.h> +#include <utils/List.h> +#include <camera/camera2/ICameraDeviceCallbacks.h> #include "hardware/camera2.h" #include "camera/CameraMetadata.h" +#include "camera/CaptureResult.h" namespace android { @@ -44,7 +47,7 @@ class CameraDeviceBase : public virtual RefBase { virtual status_t initialize(camera_module_t *module) = 0; virtual status_t disconnect() = 0; - virtual status_t dump(int fd, const Vector<String16>& args) = 0; + virtual status_t dump(int fd, const Vector<String16> &args) = 0; /** * The device's static characteristics metadata buffer @@ -54,19 +57,37 @@ class CameraDeviceBase : public virtual RefBase { /** * Submit request for capture. The CameraDevice takes ownership of the * passed-in buffer. + * Output lastFrameNumber is the expected frame number of this request. */ - virtual status_t capture(CameraMetadata &request) = 0; + virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) = 0; + + /** + * Submit a list of requests. + * Output lastFrameNumber is the expected last frame number of the list of requests. + */ + virtual status_t captureList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL) = 0; /** * Submit request for streaming. The CameraDevice makes a copy of the * passed-in buffer and the caller retains ownership. + * Output lastFrameNumber is the last frame number of the previous streaming request. + */ + virtual status_t setStreamingRequest(const CameraMetadata &request, + int64_t *lastFrameNumber = NULL) = 0; + + /** + * Submit a list of requests for streaming. + * Output lastFrameNumber is the last frame number of the previous streaming request. */ - virtual status_t setStreamingRequest(const CameraMetadata &request) = 0; + virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL) = 0; /** * Clear the streaming request slot. + * Output lastFrameNumber is the last frame number of the previous streaming request. */ - virtual status_t clearStreamingRequest() = 0; + virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) = 0; /** * Wait until a request with the given ID has been dequeued by the @@ -142,11 +163,12 @@ class CameraDeviceBase : public virtual RefBase { // API1 and API2. // Required for API 1 and 2 - virtual void notifyError(int errorCode, int arg1, int arg2) = 0; + virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode, + const CaptureResultExtras &resultExtras) = 0; // Required only for API2 virtual void notifyIdle() = 0; - virtual void notifyShutter(int requestId, + virtual void notifyShutter(const CaptureResultExtras &resultExtras, nsecs_t timestamp) = 0; // Required only for API1 @@ -179,11 +201,12 @@ class CameraDeviceBase : public virtual RefBase { virtual status_t waitForNextFrame(nsecs_t timeout) = 0; /** - * Get next metadata frame from the frame queue. Returns NULL if the queue - * is empty; caller takes ownership of the metadata buffer. - * May be called concurrently to most methods, except for waitForNextFrame + * Get next capture result frame from the result queue. Returns NOT_ENOUGH_DATA + * if the queue is empty; caller takes ownership of the metadata buffer inside + * the capture result object's metadata field. + * May be called concurrently to most methods, except for waitForNextFrame. */ - virtual status_t getNextFrame(CameraMetadata *frame) = 0; + virtual status_t getNextResult(CaptureResult *frame) = 0; /** * Trigger auto-focus. The latest ID used in a trigger autofocus or cancel @@ -224,8 +247,9 @@ class CameraDeviceBase : public virtual RefBase { /** * Flush all pending and in-flight requests. Blocks until flush is * complete. + * Output lastFrameNumber is the last frame number of the previous streaming request. */ - virtual status_t flush() = 0; + virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0; }; diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp index 4d31667..f6a971a 100644 --- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp +++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp @@ -99,15 +99,17 @@ bool FrameProcessorBase::threadLoop() { void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) { status_t res; ATRACE_CALL(); - CameraMetadata frame; + CaptureResult result; ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId()); - while ( (res = device->getNextFrame(&frame)) == OK) { + while ( (res = device->getNextResult(&result)) == OK) { + // TODO: instead of getting frame number from metadata, we should read + // this from result.mResultExtras when CameraDeviceBase interface is fixed. camera_metadata_entry_t entry; - entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); + entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT); if (entry.count == 0) { ALOGE("%s: Camera %d: Error reading frame number", __FUNCTION__, device->getId()); @@ -115,13 +117,13 @@ void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) { } ATRACE_INT("cam2_frame", entry.data.i32[0]); - if (!processSingleFrame(frame, device)) { + if (!processSingleFrame(result, device)) { break; } - if (!frame.isEmpty()) { + if (!result.mMetadata.isEmpty()) { Mutex::Autolock al(mLastFrameMutex); - mLastFrame.acquire(frame); + mLastFrame.acquire(result.mMetadata); } } if (res != NOT_ENOUGH_DATA) { @@ -133,21 +135,22 @@ void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) { return; } -bool FrameProcessorBase::processSingleFrame(CameraMetadata &frame, - const sp<CameraDeviceBase> &device) { +bool FrameProcessorBase::processSingleFrame(CaptureResult &result, + const sp<CameraDeviceBase> &device) { ALOGV("%s: Camera %d: Process single frame (is empty? %d)", - __FUNCTION__, device->getId(), frame.isEmpty()); - return processListeners(frame, device) == OK; + __FUNCTION__, device->getId(), result.mMetadata.isEmpty()); + return processListeners(result, device) == OK; } -status_t FrameProcessorBase::processListeners(const CameraMetadata &frame, +status_t FrameProcessorBase::processListeners(const CaptureResult &result, const sp<CameraDeviceBase> &device) { ATRACE_CALL(); + camera_metadata_ro_entry_t entry; // Quirks: Don't deliver partial results to listeners that don't want them bool quirkIsPartial = false; - entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT); + entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT); if (entry.count != 0 && entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) { ALOGV("%s: Camera %d: Not forwarding partial result to listeners", @@ -155,10 +158,13 @@ status_t FrameProcessorBase::processListeners(const CameraMetadata &frame, quirkIsPartial = true; } - entry = frame.find(ANDROID_REQUEST_ID); + // TODO: instead of getting requestID from CameraMetadata, we should get it + // from CaptureResultExtras. This will require changing Camera2Device. + // Currently Camera2Device uses MetadataQueue to store results, which does not + // include CaptureResultExtras. + entry = result.mMetadata.find(ANDROID_REQUEST_ID); if (entry.count == 0) { - ALOGE("%s: Camera %d: Error reading frame id", - __FUNCTION__, device->getId()); + ALOGE("%s: Camera %d: Error reading frame id", __FUNCTION__, device->getId()); return BAD_VALUE; } int32_t requestId = entry.data.i32[0]; @@ -169,9 +175,8 @@ status_t FrameProcessorBase::processListeners(const CameraMetadata &frame, List<RangeListener>::iterator item = mRangeListeners.begin(); while (item != mRangeListeners.end()) { - if (requestId >= item->minId && - requestId < item->maxId && - (!quirkIsPartial || item->quirkSendPartials) ) { + if (requestId >= item->minId && requestId < item->maxId && + (!quirkIsPartial || item->quirkSendPartials)) { sp<FilteredListener> listener = item->listener.promote(); if (listener == 0) { item = mRangeListeners.erase(item); @@ -183,10 +188,12 @@ status_t FrameProcessorBase::processListeners(const CameraMetadata &frame, item++; } } - ALOGV("Got %zu range listeners out of %zu", listeners.size(), mRangeListeners.size()); + ALOGV("%s: Camera %d: Got %zu range listeners out of %zu", __FUNCTION__, + device->getId(), listeners.size(), mRangeListeners.size()); + List<sp<FilteredListener> >::iterator item = listeners.begin(); for (; item != listeners.end(); item++) { - (*item)->onFrameAvailable(requestId, frame); + (*item)->onResultAvailable(result); } return OK; } diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h index 89b608a..15a014e 100644 --- a/services/camera/libcameraservice/common/FrameProcessorBase.h +++ b/services/camera/libcameraservice/common/FrameProcessorBase.h @@ -23,6 +23,7 @@ #include <utils/KeyedVector.h> #include <utils/List.h> #include <camera/CameraMetadata.h> +#include <camera/CaptureResult.h> namespace android { @@ -39,8 +40,7 @@ class FrameProcessorBase: public Thread { virtual ~FrameProcessorBase(); struct FilteredListener: virtual public RefBase { - virtual void onFrameAvailable(int32_t requestId, - const CameraMetadata &frame) = 0; + virtual void onResultAvailable(const CaptureResult &result) = 0; }; // Register a listener for a range of IDs [minId, maxId). Multiple listeners @@ -72,10 +72,10 @@ class FrameProcessorBase: public Thread { void processNewFrames(const sp<CameraDeviceBase> &device); - virtual bool processSingleFrame(CameraMetadata &frame, + virtual bool processSingleFrame(CaptureResult &result, const sp<CameraDeviceBase> &device); - status_t processListeners(const CameraMetadata &frame, + status_t processListeners(const CaptureResult &result, const sp<CameraDeviceBase> &device); CameraMetadata mLastFrame; diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp index 2966d82..c33c166 100644 --- a/services/camera/libcameraservice/device2/Camera2Device.cpp +++ b/services/camera/libcameraservice/device2/Camera2Device.cpp @@ -112,20 +112,6 @@ status_t Camera2Device::initialize(camera_module_t *module) return res; } - res = device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps); - if (res != OK ) { - ALOGE("%s: Camera %d: Unable to retrieve tag ops from device: %s (%d)", - __FUNCTION__, mId, strerror(-res), res); - device->common.close(&device->common); - return res; - } - res = set_camera_metadata_vendor_tag_ops(mVendorTagOps); - if (res != OK) { - ALOGE("%s: Camera %d: Unable to set tag ops: %s (%d)", - __FUNCTION__, mId, strerror(-res), res); - device->common.close(&device->common); - return res; - } res = device->ops->set_notify_callback(device, notificationCallback, NULL); if (res != OK) { @@ -213,7 +199,7 @@ const CameraMetadata& Camera2Device::info() const { return mDeviceInfo; } -status_t Camera2Device::capture(CameraMetadata &request) { +status_t Camera2Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); @@ -221,15 +207,29 @@ status_t Camera2Device::capture(CameraMetadata &request) { return OK; } +status_t Camera2Device::captureList(const List<const CameraMetadata> &requests, + int64_t* /*lastFrameNumber*/) { + ATRACE_CALL(); + ALOGE("%s: Camera2Device burst capture not implemented", __FUNCTION__); + return INVALID_OPERATION; +} -status_t Camera2Device::setStreamingRequest(const CameraMetadata &request) { +status_t Camera2Device::setStreamingRequest(const CameraMetadata &request, + int64_t* /*lastFrameNumber*/) { ATRACE_CALL(); ALOGV("%s: E", __FUNCTION__); CameraMetadata streamRequest(request); return mRequestQueue.setStreamSlot(streamRequest.release()); } -status_t Camera2Device::clearStreamingRequest() { +status_t Camera2Device::setStreamingRequestList(const List<const CameraMetadata> &requests, + int64_t* /*lastFrameNumber*/) { + ATRACE_CALL(); + ALOGE("%s, Camera2Device streaming burst not implemented", __FUNCTION__); + return INVALID_OPERATION; +} + +status_t Camera2Device::clearStreamingRequest(int64_t* /*lastFrameNumber*/) { ATRACE_CALL(); return mRequestQueue.setStreamSlot(NULL); } @@ -462,7 +462,13 @@ void Camera2Device::notificationCallback(int32_t msg_type, if (listener != NULL) { switch (msg_type) { case CAMERA2_MSG_ERROR: - listener->notifyError(ext1, ext2, ext3); + // TODO: This needs to be fixed. ext2 and ext3 need to be considered. + listener->notifyError( + ((ext1 == CAMERA2_MSG_ERROR_DEVICE) + || (ext1 == CAMERA2_MSG_ERROR_HARDWARE)) ? + ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE : + ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, + CaptureResultExtras()); break; case CAMERA2_MSG_SHUTTER: { // TODO: Only needed for camera2 API, which is unsupported @@ -491,16 +497,22 @@ status_t Camera2Device::waitForNextFrame(nsecs_t timeout) { return mFrameQueue.waitForBuffer(timeout); } -status_t Camera2Device::getNextFrame(CameraMetadata *frame) { +status_t Camera2Device::getNextResult(CaptureResult *result) { ATRACE_CALL(); + ALOGV("%s: get CaptureResult", __FUNCTION__); + if (result == NULL) { + ALOGE("%s: result pointer is NULL", __FUNCTION__); + return BAD_VALUE; + } status_t res; camera_metadata_t *rawFrame; res = mFrameQueue.dequeue(&rawFrame); - if (rawFrame == NULL) { + if (rawFrame == NULL) { return NOT_ENOUGH_DATA; } else if (res == OK) { - frame->acquire(rawFrame); + result->mMetadata.acquire(rawFrame); } + return res; } @@ -570,7 +582,7 @@ status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId, return res; } -status_t Camera2Device::flush() { +status_t Camera2Device::flush(int64_t* /*lastFrameNumber*/) { ATRACE_CALL(); mRequestQueue.clear(); diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h index 1f53c56..22a13ac 100644 --- a/services/camera/libcameraservice/device2/Camera2Device.h +++ b/services/camera/libcameraservice/device2/Camera2Device.h @@ -47,9 +47,14 @@ class Camera2Device: public CameraDeviceBase { virtual status_t disconnect(); virtual status_t dump(int fd, const Vector<String16>& args); virtual const CameraMetadata& info() const; - virtual status_t capture(CameraMetadata &request); - virtual status_t setStreamingRequest(const CameraMetadata &request); - virtual status_t clearStreamingRequest(); + virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL); + virtual status_t captureList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL); + virtual status_t setStreamingRequest(const CameraMetadata &request, + int64_t *lastFrameNumber = NULL); + virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL); + virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL); virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout); virtual status_t createStream(sp<ANativeWindow> consumer, uint32_t width, uint32_t height, int format, size_t size, @@ -65,20 +70,19 @@ class Camera2Device: public CameraDeviceBase { virtual status_t setNotifyCallback(NotificationListener *listener); virtual bool willNotify3A(); virtual status_t waitForNextFrame(nsecs_t timeout); - virtual status_t getNextFrame(CameraMetadata *frame); + virtual status_t getNextResult(CaptureResult *frame); virtual status_t triggerAutofocus(uint32_t id); virtual status_t triggerCancelAutofocus(uint32_t id); virtual status_t triggerPrecaptureMetering(uint32_t id); virtual status_t pushReprocessBuffer(int reprocessStreamId, buffer_handle_t *buffer, wp<BufferReleasedListener> listener); // Flush implemented as just a wait - virtual status_t flush(); + virtual status_t flush(int64_t *lastFrameNumber = NULL); private: const int mId; camera2_device_t *mHal2Device; CameraMetadata mDeviceInfo; - vendor_tag_query_ops_t *mVendorTagOps; /** * Queue class for both sending requests to a camera2 device, and for diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp index 7e11a3b..f965136 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.cpp +++ b/services/camera/libcameraservice/device3/Camera3Device.cpp @@ -102,8 +102,10 @@ status_t Camera3Device::initialize(camera_module_t *module) camera3_device_t *device; + ATRACE_BEGIN("camera3->open"); res = module->common.methods->open(&module->common, deviceName.string(), reinterpret_cast<hw_device_t**>(&device)); + ATRACE_END(); if (res != OK) { SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res); @@ -112,9 +114,9 @@ status_t Camera3Device::initialize(camera_module_t *module) /** Cross-check device version */ - if (device->common.version != CAMERA_DEVICE_API_VERSION_3_0) { + if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) { SET_ERR_L("Could not open camera: " - "Camera device is not version %x, reports %x instead", + "Camera device should be at least %x, reports %x instead", CAMERA_DEVICE_API_VERSION_3_0, device->common.version); device->common.close(&device->common); @@ -128,7 +130,7 @@ status_t Camera3Device::initialize(camera_module_t *module) if (info.device_version != device->common.version) { SET_ERR_L("HAL reporting mismatched camera_info version (%x)" " and device version (%x).", - device->common.version, info.device_version); + info.device_version, device->common.version); device->common.close(&device->common); return BAD_VALUE; } @@ -146,24 +148,6 @@ status_t Camera3Device::initialize(camera_module_t *module) return BAD_VALUE; } - /** Get vendor metadata tags */ - - mVendorTagOps.get_camera_vendor_section_name = NULL; - - ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops"); - device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps); - ATRACE_END(); - - if (mVendorTagOps.get_camera_vendor_section_name != NULL) { - res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps); - if (res != OK) { - SET_ERR_L("Unable to set tag ops: %s (%d)", - strerror(-res), res); - device->common.close(&device->common); - return res; - } - } - /** Start up status tracker thread */ mStatusTracker = new StatusTracker(this); res = mStatusTracker->run(String8::format("C3Dev-%d-Status", mId).string()); @@ -271,7 +255,9 @@ status_t Camera3Device::disconnect() { mStatusTracker.clear(); if (mHal3Device != NULL) { + ATRACE_BEGIN("camera3->close"); mHal3Device->common.close(&mHal3Device->common); + ATRACE_END(); mHal3Device = NULL; } @@ -386,14 +372,7 @@ const CameraMetadata& Camera3Device::info() const { return mDeviceInfo; } -status_t Camera3Device::capture(CameraMetadata &request) { - ATRACE_CALL(); - status_t res; - Mutex::Autolock il(mInterfaceLock); - Mutex::Autolock l(mLock); - - // TODO: take ownership of the request - +status_t Camera3Device::checkStatusOkToCaptureLocked() { switch (mStatus) { case STATUS_ERROR: CLOGE("Device has encountered a serious error"); @@ -402,7 +381,6 @@ status_t Camera3Device::capture(CameraMetadata &request) { CLOGE("Device not initialized"); return INVALID_OPERATION; case STATUS_UNCONFIGURED: - // May be lazily configuring streams, will check during setup case STATUS_CONFIGURED: case STATUS_ACTIVE: // OK @@ -411,71 +389,119 @@ status_t Camera3Device::capture(CameraMetadata &request) { SET_ERR_L("Unexpected status: %d", mStatus); return INVALID_OPERATION; } + return OK; +} - sp<CaptureRequest> newRequest = setUpRequestLocked(request); - if (newRequest == NULL) { - CLOGE("Can't create capture request"); +status_t Camera3Device::convertMetadataListToRequestListLocked( + const List<const CameraMetadata> &metadataList, RequestList *requestList) { + if (requestList == NULL) { + CLOGE("requestList cannot be NULL."); return BAD_VALUE; } - res = mRequestThread->queueRequest(newRequest); - if (res == OK) { - waitUntilStateThenRelock(/*active*/ true, kActiveTimeout); - if (res != OK) { - SET_ERR_L("Can't transition to active in %f seconds!", - kActiveTimeout/1e9); + int32_t burstId = 0; + for (List<const CameraMetadata>::const_iterator it = metadataList.begin(); + it != metadataList.end(); ++it) { + sp<CaptureRequest> newRequest = setUpRequestLocked(*it); + if (newRequest == 0) { + CLOGE("Can't create capture request"); + return BAD_VALUE; + } + + // Setup burst Id and request Id + newRequest->mResultExtras.burstId = burstId++; + if (it->exists(ANDROID_REQUEST_ID)) { + if (it->find(ANDROID_REQUEST_ID).count == 0) { + CLOGE("RequestID entry exists; but must not be empty in metadata"); + return BAD_VALUE; + } + newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0]; + } else { + CLOGE("RequestID does not exist in metadata"); + return BAD_VALUE; } - ALOGV("Camera %d: Capture request enqueued", mId); + + requestList->push_back(newRequest); + + ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId); } - return res; + return OK; } +status_t Camera3Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) { + ATRACE_CALL(); + + List<const CameraMetadata> requests; + requests.push_back(request); + return captureList(requests, /*lastFrameNumber*/NULL); +} -status_t Camera3Device::setStreamingRequest(const CameraMetadata &request) { +status_t Camera3Device::submitRequestsHelper( + const List<const CameraMetadata> &requests, bool repeating, + /*out*/ + int64_t *lastFrameNumber) { ATRACE_CALL(); - status_t res; Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); - switch (mStatus) { - case STATUS_ERROR: - CLOGE("Device has encountered a serious error"); - return INVALID_OPERATION; - case STATUS_UNINITIALIZED: - CLOGE("Device not initialized"); - return INVALID_OPERATION; - case STATUS_UNCONFIGURED: - // May be lazily configuring streams, will check during setup - case STATUS_CONFIGURED: - case STATUS_ACTIVE: - // OK - break; - default: - SET_ERR_L("Unexpected status: %d", mStatus); - return INVALID_OPERATION; + status_t res = checkStatusOkToCaptureLocked(); + if (res != OK) { + // error logged by previous call + return res; } - sp<CaptureRequest> newRepeatingRequest = setUpRequestLocked(request); - if (newRepeatingRequest == NULL) { - CLOGE("Can't create repeating request"); - return BAD_VALUE; + RequestList requestList; + + res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList); + if (res != OK) { + // error logged by previous call + return res; } - RequestList newRepeatingRequests; - newRepeatingRequests.push_back(newRepeatingRequest); + if (repeating) { + res = mRequestThread->setRepeatingRequests(requestList, lastFrameNumber); + } else { + res = mRequestThread->queueRequestList(requestList, lastFrameNumber); + } - res = mRequestThread->setRepeatingRequests(newRepeatingRequests); if (res == OK) { - waitUntilStateThenRelock(/*active*/ true, kActiveTimeout); + waitUntilStateThenRelock(/*active*/true, kActiveTimeout); if (res != OK) { SET_ERR_L("Can't transition to active in %f seconds!", kActiveTimeout/1e9); } - ALOGV("Camera %d: Repeating request set", mId); + ALOGV("Camera %d: Capture request %" PRId32 " enqueued", mId, + (*(requestList.begin()))->mResultExtras.requestId); + } else { + CLOGE("Cannot queue request. Impossible."); + return BAD_VALUE; } + return res; } +status_t Camera3Device::captureList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber) { + ATRACE_CALL(); + + return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber); +} + +status_t Camera3Device::setStreamingRequest(const CameraMetadata &request, + int64_t* /*lastFrameNumber*/) { + ATRACE_CALL(); + + List<const CameraMetadata> requests; + requests.push_back(request); + return setStreamingRequestList(requests, /*lastFrameNumber*/NULL); +} + +status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber) { + ATRACE_CALL(); + + return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber); +} sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked( const CameraMetadata &request) { @@ -497,7 +523,7 @@ sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked( return newRequest; } -status_t Camera3Device::clearStreamingRequest() { +status_t Camera3Device::clearStreamingRequest(int64_t *lastFrameNumber) { ATRACE_CALL(); Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); @@ -519,7 +545,8 @@ status_t Camera3Device::clearStreamingRequest() { return INVALID_OPERATION; } ALOGV("Camera %d: Clearing repeating request", mId); - return mRequestThread->clearRepeatingRequests(); + + return mRequestThread->clearRepeatingRequests(lastFrameNumber); } status_t Camera3Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) { @@ -838,16 +865,20 @@ status_t Camera3Device::deleteStream(int id) { } sp<Camera3StreamInterface> deletedStream; + ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id); if (mInputStream != NULL && id == mInputStream->getId()) { deletedStream = mInputStream; mInputStream.clear(); } else { - ssize_t idx = mOutputStreams.indexOfKey(id); - if (idx == NAME_NOT_FOUND) { + if (outputStreamIdx == NAME_NOT_FOUND) { CLOGE("Stream %d does not exist", id); return BAD_VALUE; } - deletedStream = mOutputStreams.editValueAt(idx); + } + + // Delete output stream or the output part of a bi-directional stream. + if (outputStreamIdx != NAME_NOT_FOUND) { + deletedStream = mOutputStreams.editValueAt(outputStreamIdx); mOutputStreams.removeItem(id); } @@ -916,6 +947,10 @@ status_t Camera3Device::waitUntilDrained() { Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); + return waitUntilDrainedLocked(); +} + +status_t Camera3Device::waitUntilDrainedLocked() { switch (mStatus) { case STATUS_UNINITIALIZED: case STATUS_UNCONFIGURED: @@ -1028,7 +1063,7 @@ status_t Camera3Device::waitForNextFrame(nsecs_t timeout) { return OK; } -status_t Camera3Device::getNextFrame(CameraMetadata *frame) { +status_t Camera3Device::getNextResult(CaptureResult *frame) { ATRACE_CALL(); Mutex::Autolock l(mOutputLock); @@ -1036,8 +1071,14 @@ status_t Camera3Device::getNextFrame(CameraMetadata *frame) { return NOT_ENOUGH_DATA; } - CameraMetadata &result = *(mResultQueue.begin()); - frame->acquire(result); + if (frame == NULL) { + ALOGE("%s: argument cannot be NULL", __FUNCTION__); + return BAD_VALUE; + } + + CaptureResult &result = *(mResultQueue.begin()); + frame->mResultExtras = result.mResultExtras; + frame->mMetadata.acquire(result.mMetadata); mResultQueue.erase(mResultQueue.begin()); return OK; @@ -1115,14 +1156,21 @@ status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId, return INVALID_OPERATION; } -status_t Camera3Device::flush() { +status_t Camera3Device::flush(int64_t *frameNumber) { ATRACE_CALL(); ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId); Mutex::Autolock il(mInterfaceLock); Mutex::Autolock l(mLock); - mRequestThread->clear(); - return mHal3Device->ops->flush(mHal3Device); + mRequestThread->clear(/*out*/frameNumber); + status_t res; + if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) { + res = mHal3Device->ops->flush(mHal3Device); + } else { + res = waitUntilDrainedLocked(); + } + + return res; } /** @@ -1390,13 +1438,13 @@ void Camera3Device::setErrorStateLockedV(const char *fmt, va_list args) { * In-flight request management */ -status_t Camera3Device::registerInFlight(int32_t frameNumber, - int32_t requestId, int32_t numBuffers) { +status_t Camera3Device::registerInFlight(uint32_t frameNumber, + int32_t numBuffers, CaptureResultExtras resultExtras) { ATRACE_CALL(); Mutex::Autolock l(mInFlightLock); ssize_t res; - res = mInFlightMap.add(frameNumber, InFlightRequest(requestId, numBuffers)); + res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras)); if (res < 0) return res; return OK; @@ -1408,8 +1456,8 @@ status_t Camera3Device::registerInFlight(int32_t frameNumber, * to the output frame queue */ bool Camera3Device::processPartial3AQuirk( - int32_t frameNumber, int32_t requestId, - const CameraMetadata& partial) { + uint32_t frameNumber, + const CameraMetadata& partial, const CaptureResultExtras& resultExtras) { // Check if all 3A states are present // The full list of fields is @@ -1458,7 +1506,7 @@ bool Camera3Device::processPartial3AQuirk( ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, " "AF state %d, AE state %d, AWB state %d, " "AF trigger %d, AE precapture trigger %d", - __FUNCTION__, mId, frameNumber, requestId, + __FUNCTION__, mId, frameNumber, resultExtras.requestId, afMode, awbMode, afState, aeState, awbState, afTriggerId, aeTriggerId); @@ -1473,58 +1521,63 @@ bool Camera3Device::processPartial3AQuirk( Mutex::Autolock l(mOutputLock); - CameraMetadata& min3AResult = - *mResultQueue.insert( - mResultQueue.end(), - CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0)); - - if (!insert3AResult(min3AResult, ANDROID_REQUEST_FRAME_COUNT, - &frameNumber, frameNumber)) { + CaptureResult captureResult; + captureResult.mResultExtras = resultExtras; + captureResult.mMetadata = CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0); + // TODO: change this to sp<CaptureResult>. This will need other changes, including, + // but not limited to CameraDeviceBase::getNextResult + CaptureResult& min3AResult = + *mResultQueue.insert(mResultQueue.end(), captureResult); + + if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_FRAME_COUNT, + // TODO: This is problematic casting. Need to fix CameraMetadata. + reinterpret_cast<int32_t*>(&frameNumber), frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_REQUEST_ID, + int32_t requestId = resultExtras.requestId; + if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_ID, &requestId, frameNumber)) { return false; } static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL; - if (!insert3AResult(min3AResult, ANDROID_QUIRKS_PARTIAL_RESULT, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT, &partialResult, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_MODE, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE, &afMode, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_MODE, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_MODE, &awbMode, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_STATE, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_STATE, &aeState, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_STATE, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_STATE, &afState, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AWB_STATE, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_STATE, &awbState, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AF_TRIGGER_ID, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID, &afTriggerId, frameNumber)) { return false; } - if (!insert3AResult(min3AResult, ANDROID_CONTROL_AE_PRECAPTURE_ID, + if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID, &aeTriggerId, frameNumber)) { return false; } @@ -1536,7 +1589,7 @@ bool Camera3Device::processPartial3AQuirk( template<typename T> bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag, - T* value, int32_t frameNumber) { + T* value, uint32_t frameNumber) { (void) frameNumber; camera_metadata_ro_entry_t entry; @@ -1561,7 +1614,7 @@ bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag, template<typename T> bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag, - const T* value, int32_t frameNumber) { + const T* value, uint32_t frameNumber) { if (result.update(tag, value, 1) != NO_ERROR) { mResultQueue.erase(--mResultQueue.end(), mResultQueue.end()); SET_ERR("Frame %d: Failed to set %s in partial metadata", @@ -1588,11 +1641,12 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { } bool partialResultQuirk = false; CameraMetadata collectedQuirkResult; + CaptureResultExtras resultExtras; - // Get capture timestamp from list of in-flight requests, where it was added - // by the shutter notification for this frame. Then update the in-flight - // status and remove the in-flight entry if all result data has been - // received. + // Get capture timestamp and resultExtras from list of in-flight requests, + // where it was added by the shutter notification for this frame. + // Then update the in-flight status and remove the in-flight entry if + // all result data has been received. nsecs_t timestamp = 0; { Mutex::Autolock l(mInFlightLock); @@ -1603,6 +1657,10 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { return; } InFlightRequest &request = mInFlightMap.editValueAt(idx); + ALOGVV("%s: got InFlightRequest requestId = %" PRId32 ", frameNumber = %" PRId64 + ", burstId = %" PRId32, + __FUNCTION__, request.resultExtras.requestId, request.resultExtras.frameNumber, + request.resultExtras.burstId); // Check if this result carries only partial metadata if (mUsePartialResultQuirk && result->result != NULL) { @@ -1624,13 +1682,15 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { if (!request.partialResultQuirk.haveSent3A) { request.partialResultQuirk.haveSent3A = processPartial3AQuirk(frameNumber, - request.requestId, - request.partialResultQuirk.collectedResult); + request.partialResultQuirk.collectedResult, + request.resultExtras); } } } timestamp = request.captureTimestamp; + resultExtras = request.resultExtras; + /** * One of the following must happen before it's legal to call process_capture_result, * unless partial metadata is being provided: @@ -1666,8 +1726,10 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { return; } - // Check if everything has arrived for this result (buffers and metadata) - if (request.haveResultMetadata && request.numBuffersLeft == 0) { + // Check if everything has arrived for this result (buffers and metadata), remove it from + // InFlightMap if both arrived or HAL reports error for this request (i.e. during flush). + if ((request.requestStatus != OK) || + (request.haveResultMetadata && request.numBuffersLeft == 0)) { ATRACE_ASYNC_END("frame capture", frameNumber); mInFlightMap.removeItemsAt(idx, 1); } @@ -1695,11 +1757,12 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { } mNextResultFrameNumber++; - CameraMetadata captureResult; - captureResult = result->result; + CaptureResult captureResult; + captureResult.mResultExtras = resultExtras; + captureResult.mMetadata = result->result; - if (captureResult.update(ANDROID_REQUEST_FRAME_COUNT, - (int32_t*)&frameNumber, 1) != OK) { + if (captureResult.mMetadata.update(ANDROID_REQUEST_FRAME_COUNT, + (int32_t*)&frameNumber, 1) != OK) { SET_ERR("Failed to set frame# in metadata (%d)", frameNumber); gotResult = false; @@ -1710,15 +1773,15 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { // Append any previous partials to form a complete result if (mUsePartialResultQuirk && !collectedQuirkResult.isEmpty()) { - captureResult.append(collectedQuirkResult); + captureResult.mMetadata.append(collectedQuirkResult); } - captureResult.sort(); + captureResult.mMetadata.sort(); // Check that there's a timestamp in the result metadata camera_metadata_entry entry = - captureResult.find(ANDROID_SENSOR_TIMESTAMP); + captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP); if (entry.count == 0) { SET_ERR("No timestamp provided by HAL for frame %d!", frameNumber); @@ -1732,9 +1795,13 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { if (gotResult) { // Valid result, insert into queue - CameraMetadata& queuedResult = - *mResultQueue.insert(mResultQueue.end(), CameraMetadata()); - queuedResult.swap(captureResult); + List<CaptureResult>::iterator queuedResult = + mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult)); + ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64 + ", burstId = %" PRId32, __FUNCTION__, + queuedResult->mResultExtras.requestId, + queuedResult->mResultExtras.frameNumber, + queuedResult->mResultExtras.burstId); } } // scope for mOutputLock @@ -1760,8 +1827,6 @@ void Camera3Device::processCaptureResult(const camera3_capture_result *result) { } - - void Camera3Device::notify(const camera3_notify_msg *msg) { ATRACE_CALL(); NotificationListener *listener; @@ -1788,18 +1853,32 @@ void Camera3Device::notify(const camera3_notify_msg *msg) { mId, __FUNCTION__, msg->message.error.frame_number, streamId, msg->message.error.error_code); + CaptureResultExtras resultExtras; // Set request error status for the request in the in-flight tracking { Mutex::Autolock l(mInFlightLock); ssize_t idx = mInFlightMap.indexOfKey(msg->message.error.frame_number); if (idx >= 0) { - mInFlightMap.editValueAt(idx).requestStatus = msg->message.error.error_code; + InFlightRequest &r = mInFlightMap.editValueAt(idx); + r.requestStatus = msg->message.error.error_code; + resultExtras = r.resultExtras; + } else { + resultExtras.frameNumber = msg->message.error.frame_number; + ALOGE("Camera %d: %s: cannot find in-flight request on frame %" PRId64 + " error", mId, __FUNCTION__, resultExtras.frameNumber); } } if (listener != NULL) { - listener->notifyError(msg->message.error.error_code, - msg->message.error.frame_number, streamId); + if (msg->message.error.error_code == CAMERA3_MSG_ERROR_DEVICE) { + listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE, + resultExtras); + } else { + listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, + resultExtras); + } + } else { + ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__); } break; } @@ -1819,7 +1898,7 @@ void Camera3Device::notify(const camera3_notify_msg *msg) { mNextShutterFrameNumber++; } - int32_t requestId = -1; + CaptureResultExtras resultExtras; // Set timestamp for the request in the in-flight tracking // and get the request ID to send upstream @@ -1829,7 +1908,7 @@ void Camera3Device::notify(const camera3_notify_msg *msg) { if (idx >= 0) { InFlightRequest &r = mInFlightMap.editValueAt(idx); r.captureTimestamp = timestamp; - requestId = r.requestId; + resultExtras = r.resultExtras; } } if (idx < 0) { @@ -1838,10 +1917,10 @@ void Camera3Device::notify(const camera3_notify_msg *msg) { break; } ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64, - mId, __FUNCTION__, frameNumber, requestId, timestamp); + mId, __FUNCTION__, frameNumber, resultExtras.requestId, timestamp); // Call listener, if any if (listener != NULL) { - listener->notifyShutter(requestId, timestamp); + listener->notifyShutter(resultExtras, timestamp); } break; } @@ -1863,6 +1942,7 @@ CameraMetadata Camera3Device::getLatestRequestLocked() { return retVal; } + /** * RequestThread inner class methods */ @@ -1879,7 +1959,8 @@ Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent, mDoPause(false), mPaused(true), mFrameNumber(0), - mLatestRequestId(NAME_NOT_FOUND) { + mLatestRequestId(NAME_NOT_FOUND), + mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES) { mStatusId = statusTracker->addComponent(); } @@ -1888,10 +1969,22 @@ void Camera3Device::RequestThread::configurationComplete() { mReconfigured = true; } -status_t Camera3Device::RequestThread::queueRequest( - sp<CaptureRequest> request) { +status_t Camera3Device::RequestThread::queueRequestList( + List<sp<CaptureRequest> > &requests, + /*out*/ + int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); - mRequestQueue.push_back(request); + for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); + ++it) { + mRequestQueue.push_back(*it); + } + + if (lastFrameNumber != NULL) { + *lastFrameNumber = mFrameNumber + mRequestQueue.size() - 1; + ALOGV("%s: requestId %d, mFrameNumber %" PRId32 ", lastFrameNumber %" PRId64 ".", + __FUNCTION__, (*(requests.begin()))->mResultExtras.requestId, mFrameNumber, + *lastFrameNumber); + } unpauseForNewRequests(); @@ -1955,28 +2048,43 @@ status_t Camera3Device::RequestThread::queueTriggerLocked( } status_t Camera3Device::RequestThread::setRepeatingRequests( - const RequestList &requests) { + const RequestList &requests, + /*out*/ + int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); + if (lastFrameNumber != NULL) { + *lastFrameNumber = mRepeatingLastFrameNumber; + } mRepeatingRequests.clear(); mRepeatingRequests.insert(mRepeatingRequests.begin(), requests.begin(), requests.end()); unpauseForNewRequests(); + mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES; return OK; } -status_t Camera3Device::RequestThread::clearRepeatingRequests() { +status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); mRepeatingRequests.clear(); + if (lastFrameNumber != NULL) { + *lastFrameNumber = mRepeatingLastFrameNumber; + } + mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES; return OK; } -status_t Camera3Device::RequestThread::clear() { +status_t Camera3Device::RequestThread::clear(/*out*/int64_t *lastFrameNumber) { Mutex::Autolock l(mRequestLock); + ALOGV("RequestThread::%s:", __FUNCTION__); mRepeatingRequests.clear(); mRequestQueue.clear(); mTriggerMap.clear(); + if (lastFrameNumber != NULL) { + *lastFrameNumber = mRepeatingLastFrameNumber; + } + mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES; return OK; } @@ -2028,6 +2136,7 @@ bool Camera3Device::RequestThread::threadLoop() { // Create request to HAL camera3_capture_request_t request = camera3_capture_request_t(); + request.frame_number = nextRequest->mResultExtras.frameNumber; Vector<camera3_stream_buffer_t> outputBuffers; // Get the request ID, if any @@ -2048,7 +2157,7 @@ bool Camera3Device::RequestThread::threadLoop() { if (res < 0) { SET_ERR("RequestThread: Unable to insert triggers " "(capture request %d, HAL device: %s (%d)", - (mFrameNumber+1), strerror(-res), res); + request.frame_number, strerror(-res), res); cleanUpFailedRequest(request, nextRequest, outputBuffers); return false; } @@ -2066,7 +2175,7 @@ bool Camera3Device::RequestThread::threadLoop() { if (res != OK) { SET_ERR("RequestThread: Unable to insert dummy trigger IDs " "(capture request %d, HAL device: %s (%d)", - (mFrameNumber+1), strerror(-res), res); + request.frame_number, strerror(-res), res); cleanUpFailedRequest(request, nextRequest, outputBuffers); return false; } @@ -2090,7 +2199,7 @@ bool Camera3Device::RequestThread::threadLoop() { if (e.count > 0) { ALOGV("%s: Request (frame num %d) had AF trigger 0x%x", __FUNCTION__, - mFrameNumber+1, + request.frame_number, e.data.u8[0]); } } @@ -2132,8 +2241,6 @@ bool Camera3Device::RequestThread::threadLoop() { request.num_output_buffers++; } - request.frame_number = mFrameNumber++; - // Log request in the in-flight queue sp<Camera3Device> parent = mParent.promote(); if (parent == NULL) { @@ -2142,8 +2249,13 @@ bool Camera3Device::RequestThread::threadLoop() { return false; } - res = parent->registerInFlight(request.frame_number, requestId, - request.num_output_buffers); + res = parent->registerInFlight(request.frame_number, + request.num_output_buffers, nextRequest->mResultExtras); + ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64 + ", burstId = %" PRId32 ".", + __FUNCTION__, + nextRequest->mResultExtras.requestId, nextRequest->mResultExtras.frameNumber, + nextRequest->mResultExtras.burstId); if (res != OK) { SET_ERR("RequestThread: Unable to register new in-flight request:" " %s (%d)", strerror(-res), res); @@ -2220,6 +2332,7 @@ CameraMetadata Camera3Device::RequestThread::getLatestRequest() const { return mLatestRequest; } + void Camera3Device::RequestThread::cleanUpFailedRequest( camera3_capture_request_t &request, sp<CaptureRequest> &nextRequest, @@ -2261,6 +2374,9 @@ sp<Camera3Device::CaptureRequest> ++firstRequest, requests.end()); // No need to wait any longer + + mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1; + break; } @@ -2312,6 +2428,9 @@ sp<Camera3Device::CaptureRequest> mReconfigured = false; } + if (nextRequest != NULL) { + nextRequest->mResultExtras.frameNumber = mFrameNumber++; + } return nextRequest; } diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h index 468f641..3ef39f3 100644 --- a/services/camera/libcameraservice/device3/Camera3Device.h +++ b/services/camera/libcameraservice/device3/Camera3Device.h @@ -24,6 +24,8 @@ #include <utils/Thread.h> #include <utils/KeyedVector.h> #include <hardware/camera3.h> +#include <camera/CaptureResult.h> +#include <camera/camera2/ICameraDeviceUser.h> #include "common/CameraDeviceBase.h" #include "device3/StatusTracker.h" @@ -54,7 +56,7 @@ class Camera3StreamInterface; } /** - * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 + * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher. */ class Camera3Device : public CameraDeviceBase, @@ -78,9 +80,14 @@ class Camera3Device : // Capture and setStreamingRequest will configure streams if currently in // idle state - virtual status_t capture(CameraMetadata &request); - virtual status_t setStreamingRequest(const CameraMetadata &request); - virtual status_t clearStreamingRequest(); + virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL); + virtual status_t captureList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL); + virtual status_t setStreamingRequest(const CameraMetadata &request, + int64_t *lastFrameNumber = NULL); + virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests, + int64_t *lastFrameNumber = NULL); + virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL); virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout); @@ -116,7 +123,7 @@ class Camera3Device : virtual status_t setNotifyCallback(NotificationListener *listener); virtual bool willNotify3A(); virtual status_t waitForNextFrame(nsecs_t timeout); - virtual status_t getNextFrame(CameraMetadata *frame); + virtual status_t getNextResult(CaptureResult *frame); virtual status_t triggerAutofocus(uint32_t id); virtual status_t triggerCancelAutofocus(uint32_t id); @@ -125,7 +132,7 @@ class Camera3Device : virtual status_t pushReprocessBuffer(int reprocessStreamId, buffer_handle_t *buffer, wp<BufferReleasedListener> listener); - virtual status_t flush(); + virtual status_t flush(int64_t *lastFrameNumber = NULL); // Methods called by subclasses void notifyStatus(bool idle); // updates from StatusTracker @@ -157,7 +164,6 @@ class Camera3Device : camera3_device_t *mHal3Device; CameraMetadata mDeviceInfo; - vendor_tag_query_ops_t mVendorTagOps; enum Status { STATUS_ERROR, @@ -199,9 +205,20 @@ class Camera3Device : sp<camera3::Camera3Stream> mInputStream; Vector<sp<camera3::Camera3OutputStreamInterface> > mOutputStreams; + CaptureResultExtras mResultExtras; }; typedef List<sp<CaptureRequest> > RequestList; + status_t checkStatusOkToCaptureLocked(); + + status_t convertMetadataListToRequestListLocked( + const List<const CameraMetadata> &metadataList, + /*out*/ + RequestList *requestList); + + status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating, + int64_t *lastFrameNumber = NULL); + /** * Get the last request submitted to the hal by the request thread. * @@ -237,6 +254,13 @@ class Camera3Device : status_t waitUntilStateThenRelock(bool active, nsecs_t timeout); /** + * Implementation of waitUntilDrained. On success, will transition to IDLE state. + * + * Need to be called with mLock and mInterfaceLock held. + */ + status_t waitUntilDrainedLocked(); + + /** * Do common work for setting up a streaming or single capture request. * On success, will transition to ACTIVE if in IDLE. */ @@ -308,15 +332,21 @@ class Camera3Device : * on either. Use waitUntilPaused to wait until request queue * has emptied out. */ - status_t setRepeatingRequests(const RequestList& requests); - status_t clearRepeatingRequests(); + status_t setRepeatingRequests(const RequestList& requests, + /*out*/ + int64_t *lastFrameNumber = NULL); + status_t clearRepeatingRequests(/*out*/ + int64_t *lastFrameNumber = NULL); - status_t queueRequest(sp<CaptureRequest> request); + status_t queueRequestList(List<sp<CaptureRequest> > &requests, + /*out*/ + int64_t *lastFrameNumber = NULL); /** * Remove all queued and repeating requests, and pending triggers */ - status_t clear(); + status_t clear(/*out*/ + int64_t *lastFrameNumber = NULL); /** * Queue a trigger to be dispatched with the next outgoing @@ -429,6 +459,8 @@ class Camera3Device : TriggerMap mTriggerMap; TriggerMap mTriggerRemovedMap; TriggerMap mTriggerReplacedMap; + + int64_t mRepeatingLastFrameNumber; }; sp<RequestThread> mRequestThread; @@ -437,8 +469,6 @@ class Camera3Device : */ struct InFlightRequest { - // android.request.id for the request - int requestId; // Set by notify() SHUTTER call. nsecs_t captureTimestamp; int requestStatus; @@ -447,6 +477,7 @@ class Camera3Device : // Decremented by calls to process_capture_result with valid output // buffers int numBuffersLeft; + CaptureResultExtras resultExtras; // Fields used by the partial result quirk only struct PartialResultQuirkInFlight { @@ -462,20 +493,26 @@ class Camera3Device : // Default constructor needed by KeyedVector InFlightRequest() : - requestId(0), captureTimestamp(0), requestStatus(OK), haveResultMetadata(false), numBuffersLeft(0) { } - InFlightRequest(int id, int numBuffers) : - requestId(id), + InFlightRequest(int numBuffers) : captureTimestamp(0), requestStatus(OK), haveResultMetadata(false), numBuffersLeft(numBuffers) { } + + InFlightRequest(int numBuffers, CaptureResultExtras extras) : + captureTimestamp(0), + requestStatus(OK), + haveResultMetadata(false), + numBuffersLeft(numBuffers), + resultExtras(extras) { + } }; // Map from frame number to the in-flight request state typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap; @@ -483,25 +520,25 @@ class Camera3Device : Mutex mInFlightLock; // Protects mInFlightMap InFlightMap mInFlightMap; - status_t registerInFlight(int32_t frameNumber, int32_t requestId, - int32_t numBuffers); + status_t registerInFlight(uint32_t frameNumber, + int32_t numBuffers, CaptureResultExtras resultExtras); /** * For the partial result quirk, check if all 3A state fields are available * and if so, queue up 3A-only result to the client. Returns true if 3A * is sent. */ - bool processPartial3AQuirk(int32_t frameNumber, int32_t requestId, - const CameraMetadata& partial); + bool processPartial3AQuirk(uint32_t frameNumber, + const CameraMetadata& partial, const CaptureResultExtras& resultExtras); // Helpers for reading and writing 3A metadata into to/from partial results template<typename T> bool get3AResult(const CameraMetadata& result, int32_t tag, - T* value, int32_t frameNumber); + T* value, uint32_t frameNumber); template<typename T> bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value, - int32_t frameNumber); + uint32_t frameNumber); /** * Tracking for idle detection */ @@ -518,7 +555,7 @@ class Camera3Device : uint32_t mNextResultFrameNumber; uint32_t mNextShutterFrameNumber; - List<CameraMetadata> mResultQueue; + List<CaptureResult> mResultQueue; Condition mResultSignal; NotificationListener *mListener; diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp index d662cc2..2257682 100644 --- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp +++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp @@ -146,6 +146,13 @@ void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer, // Inform tracker about becoming busy if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) { + /** + * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers + * before/after register_stream_buffers during initial configuration + * or re-configuration. + * + * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2 + */ sp<StatusTracker> statusTracker = mStatusTracker.promote(); if (statusTracker != 0) { statusTracker->markComponentActive(mStatusId); @@ -224,6 +231,13 @@ status_t Camera3IOStreamBase::returnAnyBufferLocked( mDequeuedBufferCount--; if (mDequeuedBufferCount == 0 && mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) { + /** + * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers + * before/after register_stream_buffers during initial configuration + * or re-configuration. + * + * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2 + */ ALOGV("%s: Stream %d: All buffers returned; now idle", __FUNCTION__, mId); sp<StatusTracker> statusTracker = mStatusTracker.promote(); diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp index 5aa9a3e..dd7fb6c 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp @@ -199,14 +199,36 @@ status_t Camera3InputStream::configureQueueLocked() { assert(mMaxSize == 0); assert(camera3_stream::format != HAL_PIXEL_FORMAT_BLOB); - mTotalBufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS + - camera3_stream::max_buffers; mDequeuedBufferCount = 0; mFrameCount = 0; if (mConsumer.get() == 0) { - sp<BufferQueue> bq = new BufferQueue(); - mConsumer = new BufferItemConsumer(bq, camera3_stream::usage, + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + + int minUndequeuedBuffers = 0; + res = producer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers); + if (res != OK || minUndequeuedBuffers < 0) { + ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)", + __FUNCTION__, mId, res, minUndequeuedBuffers); + return res; + } + size_t minBufs = static_cast<size_t>(minUndequeuedBuffers); + /* + * We promise never to 'acquire' more than camera3_stream::max_buffers + * at any one time. + * + * Boost the number up to meet the minimum required buffer count. + * + * (Note that this sets consumer-side buffer count only, + * and not the sum of producer+consumer side as in other camera streams). + */ + mTotalBufferCount = camera3_stream::max_buffers > minBufs ? + camera3_stream::max_buffers : minBufs; + // TODO: somehow set the total buffer count when producer connects? + + mConsumer = new BufferItemConsumer(consumer, camera3_stream::usage, mTotalBufferCount); mConsumer->setName(String8::format("Camera3-InputStream-%d", mId)); } diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h index 681d684..ae49467 100644 --- a/services/camera/libcameraservice/device3/Camera3InputStream.h +++ b/services/camera/libcameraservice/device3/Camera3InputStream.h @@ -44,6 +44,8 @@ class Camera3InputStream : public Camera3IOStreamBase { virtual void dump(int fd, const Vector<String16> &args) const; + // TODO: expose an interface to get the IGraphicBufferProducer + private: typedef BufferItemConsumer::BufferItem BufferItem; diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp index 70406f1..646f286 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.cpp +++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp @@ -23,6 +23,8 @@ #include "device3/Camera3Stream.h" #include "device3/StatusTracker.h" +#include <cutils/properties.h> + namespace android { namespace camera3 { @@ -137,6 +139,7 @@ camera3_stream* Camera3Stream::startConfiguration() { if (mState == STATE_CONSTRUCTED) { mState = STATE_IN_CONFIG; } else { // mState == STATE_CONFIGURED + LOG_ALWAYS_FATAL_IF(mState != STATE_CONFIGURED, "Invalid state: 0x%x", mState); mState = STATE_IN_RECONFIG; } @@ -223,6 +226,14 @@ status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer, ATRACE_CALL(); Mutex::Autolock l(mLock); + /** + * TODO: Check that the state is valid first. + * + * <HAL3.2 IN_CONFIG and IN_RECONFIG in addition to CONFIGURED. + * >= HAL3.2 CONFIGURED only + * + * Do this for getBuffer as well. + */ status_t res = returnBufferLocked(buffer, timestamp); if (res == OK) { fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/true); @@ -314,12 +325,46 @@ status_t Camera3Stream::disconnect() { status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { ATRACE_CALL(); + + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * camera3_device_t->ops->register_stream_buffers() is not called and must + * be NULL. + */ + if (hal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_2) { + ALOGV("%s: register_stream_buffers unused as of HAL3.2", __FUNCTION__); + + /** + * Skip the NULL check if camera.dev.register_stream is 1. + * + * For development-validation purposes only. + * + * TODO: Remove the property check before shipping L (b/13914251). + */ + char value[PROPERTY_VALUE_MAX] = { '\0', }; + property_get("camera.dev.register_stream", value, "0"); + int propInt = atoi(value); + + if (propInt == 0 && hal3Device->ops->register_stream_buffers != NULL) { + ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; " + "must be set to NULL in camera3_device::ops", __FUNCTION__); + return INVALID_OPERATION; + } else { + ALOGD("%s: Skipping NULL check for deprecated register_stream_buffers"); + } + + return OK; + } else { + ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__); + } + status_t res; size_t bufferCount = getBufferCountLocked(); Vector<buffer_handle_t*> buffers; - buffers.insertAt(NULL, 0, bufferCount); + buffers.insertAt(/*prototype_item*/NULL, /*index*/0, bufferCount); camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set(); bufferSet.stream = this; @@ -327,7 +372,7 @@ status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) { bufferSet.buffers = buffers.editArray(); Vector<camera3_stream_buffer_t> streamBuffers; - streamBuffers.insertAt(camera3_stream_buffer_t(), 0, bufferCount); + streamBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount); // Register all buffers with the HAL. This means getting all the buffers // from the stream, providing them to the HAL with the diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h index 6eeb721..766b772 100644 --- a/services/camera/libcameraservice/device3/Camera3Stream.h +++ b/services/camera/libcameraservice/device3/Camera3Stream.h @@ -82,6 +82,23 @@ namespace camera3 { * STATE_CONFIGURED => STATE_CONSTRUCTED: * When disconnect() is called after making sure stream is idle with * waitUntilIdle(). + * + * Status Tracking: + * Each stream is tracked by StatusTracker as a separate component, + * depending on the handed out buffer count. The state must be STATE_CONFIGURED + * in order for the component to be marked. + * + * It's marked in one of two ways: + * + * - ACTIVE: One or more buffers have been handed out (with #getBuffer). + * - IDLE: All buffers have been returned (with #returnBuffer), and their + * respective release_fence(s) have been signaled. + * + * A typical use case is output streams. When the HAL has any buffers + * dequeued, the stream is marked ACTIVE. When the HAL returns all buffers + * (e.g. if no capture requests are active), the stream is marked IDLE. + * In this use case, the app consumer does not affect the component status. + * */ class Camera3Stream : protected camera3_stream, diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp index 44d8188..09e14c5 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp @@ -111,15 +111,17 @@ struct TimestampFinder : public RingBufferConsumer::RingBufferComparator { } // namespace anonymous Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height, - int depth) : + int bufferCount) : Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL, width, height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED), - mDepth(depth) { + mDepth(bufferCount) { - sp<BufferQueue> bq = new BufferQueue(); - mProducer = new RingBufferConsumer(bq, GRALLOC_USAGE_HW_CAMERA_ZSL, depth); - mConsumer = new Surface(bq); + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mProducer = new RingBufferConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL, bufferCount); + mConsumer = new Surface(producer); } Camera3ZslStream::~Camera3ZslStream() { diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h index c7f4490..6721832 100644 --- a/services/camera/libcameraservice/device3/Camera3ZslStream.h +++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h @@ -37,10 +37,10 @@ class Camera3ZslStream : public Camera3OutputStream { public: /** - * Set up a ZSL stream of a given resolution. Depth is the number of buffers + * Set up a ZSL stream of a given resolution. bufferCount is the number of buffers * cached within the stream that can be retrieved for input. */ - Camera3ZslStream(int id, uint32_t width, uint32_t height, int depth); + Camera3ZslStream(int id, uint32_t width, uint32_t height, int bufferCount); ~Camera3ZslStream(); virtual void dump(int fd, const Vector<String16> &args) const; diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h index b4ad824..a03736d 100644 --- a/services/camera/libcameraservice/gui/RingBufferConsumer.h +++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h @@ -64,7 +64,7 @@ class RingBufferConsumer : public ConsumerBase, // bufferCount parameter specifies how many buffers can be pinned for user // access at the same time. RingBufferConsumer(const sp<IGraphicBufferConsumer>& consumer, uint32_t consumerUsage, - int bufferCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS); + int bufferCount); virtual ~RingBufferConsumer(); diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp index 683fdf3..0c7fbbd 100644 --- a/services/medialog/MediaLogService.cpp +++ b/services/medialog/MediaLogService.cpp @@ -54,7 +54,7 @@ void MediaLogService::unregisterWriter(const sp<IMemory>& shared) } } -status_t MediaLogService::dump(int fd, const Vector<String16>& args) +status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused) { // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp static const String16 sDump("android.permission.DUMP"); |